]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_nv.c
libata: kill ATA_FLAG_DISABLED
[net-next-2.6.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
5a0e3ad6 41#include <linux/gfp.h>
1da177e4
LT
42#include <linux/pci.h>
43#include <linux/init.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
a9524a76 47#include <linux/device.h>
1da177e4 48#include <scsi/scsi_host.h>
fbbb262d 49#include <scsi/scsi_device.h>
1da177e4
LT
50#include <linux/libata.h>
51
52#define DRV_NAME "sata_nv"
2a3103ce 53#define DRV_VERSION "3.5"
fbbb262d
RH
54
55#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 56
10ad05df 57enum {
0d5ff566
TH
58 NV_MMIO_BAR = 5,
59
10ad05df 60 NV_PORTS = 2,
14bdef98
EIB
61 NV_PIO_MASK = ATA_PIO4,
62 NV_MWDMA_MASK = ATA_MWDMA2,
63 NV_UDMA_MASK = ATA_UDMA6,
10ad05df
JG
64 NV_PORT0_SCR_REG_OFFSET = 0x00,
65 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 66
27e4b274 67 /* INT_STATUS/ENABLE */
10ad05df 68 NV_INT_STATUS = 0x10,
10ad05df 69 NV_INT_ENABLE = 0x11,
27e4b274 70 NV_INT_STATUS_CK804 = 0x440,
10ad05df 71 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 72
27e4b274
TH
73 /* INT_STATUS/ENABLE bits */
74 NV_INT_DEV = 0x01,
75 NV_INT_PM = 0x02,
76 NV_INT_ADDED = 0x04,
77 NV_INT_REMOVED = 0x08,
78
79 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80
39f87582 81 NV_INT_ALL = 0x0f,
5a44efff
TH
82 NV_INT_MASK = NV_INT_DEV |
83 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 84
27e4b274 85 /* INT_CONFIG */
10ad05df
JG
86 NV_INT_CONFIG = 0x12,
87 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 88
10ad05df
JG
89 // For PCI config register 20
90 NV_MCP_SATA_CFG_20 = 0x50,
91 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
92 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
93 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
94 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
95 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96
97 NV_ADMA_MAX_CPBS = 32,
98 NV_ADMA_CPB_SZ = 128,
99 NV_ADMA_APRD_SZ = 16,
100 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_APRD_SZ,
102 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
103 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
104 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
105 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106
107 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN = 0x400,
109 NV_ADMA_GEN_CTL = 0x00,
110 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111
112 /* BAR5 offset to ADMA ports */
113 NV_ADMA_PORT = 0x480,
114
115 /* size of ADMA port register space */
116 NV_ADMA_PORT_SIZE = 0x100,
117
118 /* ADMA port registers */
119 NV_ADMA_CTL = 0x40,
120 NV_ADMA_CPB_COUNT = 0x42,
121 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_STAT = 0x44,
123 NV_ADMA_CPB_BASE_LOW = 0x48,
124 NV_ADMA_CPB_BASE_HIGH = 0x4C,
125 NV_ADMA_APPEND = 0x50,
126 NV_ADMA_NOTIFIER = 0x68,
127 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128
129 /* NV_ADMA_CTL register bits */
130 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
131 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
132 NV_ADMA_CTL_GO = (1 << 7),
133 NV_ADMA_CTL_AIEN = (1 << 8),
134 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
135 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136
137 /* CPB response flag bits */
138 NV_CPB_RESP_DONE = (1 << 0),
139 NV_CPB_RESP_ATA_ERR = (1 << 3),
140 NV_CPB_RESP_CMD_ERR = (1 << 4),
141 NV_CPB_RESP_CPB_ERR = (1 << 7),
142
143 /* CPB control flag bits */
144 NV_CPB_CTL_CPB_VALID = (1 << 0),
145 NV_CPB_CTL_QUEUE = (1 << 1),
146 NV_CPB_CTL_APRD_VALID = (1 << 2),
147 NV_CPB_CTL_IEN = (1 << 3),
148 NV_CPB_CTL_FPDMA = (1 << 4),
149
150 /* APRD flags */
151 NV_APRD_WRITE = (1 << 1),
152 NV_APRD_END = (1 << 2),
153 NV_APRD_CONT = (1 << 3),
154
155 /* NV_ADMA_STAT flags */
156 NV_ADMA_STAT_TIMEOUT = (1 << 0),
157 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
158 NV_ADMA_STAT_HOTPLUG = (1 << 2),
159 NV_ADMA_STAT_CPBERR = (1 << 4),
160 NV_ADMA_STAT_SERROR = (1 << 5),
161 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
162 NV_ADMA_STAT_IDLE = (1 << 8),
163 NV_ADMA_STAT_LEGACY = (1 << 9),
164 NV_ADMA_STAT_STOPPED = (1 << 10),
165 NV_ADMA_STAT_DONE = (1 << 12),
166 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
2dcb407e 167 NV_ADMA_STAT_TIMEOUT,
fbbb262d
RH
168
169 /* port flags */
170 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 171 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d 172
f140f0f1
KL
173 /* MCP55 reg offset */
174 NV_CTL_MCP55 = 0x400,
175 NV_INT_STATUS_MCP55 = 0x440,
176 NV_INT_ENABLE_MCP55 = 0x444,
177 NV_NCQ_REG_MCP55 = 0x448,
178
179 /* MCP55 */
180 NV_INT_ALL_MCP55 = 0xffff,
181 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
182 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183
184 /* SWNCQ ENABLE BITS*/
185 NV_CTL_PRI_SWNCQ = 0x02,
186 NV_CTL_SEC_SWNCQ = 0x04,
187
188 /* SW NCQ status bits*/
189 NV_SWNCQ_IRQ_DEV = (1 << 0),
190 NV_SWNCQ_IRQ_PM = (1 << 1),
191 NV_SWNCQ_IRQ_ADDED = (1 << 2),
192 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193
194 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
195 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
196 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
197 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198
199 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
200 NV_SWNCQ_IRQ_REMOVED,
201
fbbb262d
RH
202};
203
204/* ADMA Physical Region Descriptor - one SG segment */
205struct nv_adma_prd {
206 __le64 addr;
207 __le32 len;
208 u8 flags;
209 u8 packet_len;
210 __le16 reserved;
211};
212
213enum nv_adma_regbits {
214 CMDEND = (1 << 15), /* end of command list */
215 WNB = (1 << 14), /* wait-not-BSY */
216 IGN = (1 << 13), /* ignore this entry */
217 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
218 DA2 = (1 << (2 + 8)),
219 DA1 = (1 << (1 + 8)),
220 DA0 = (1 << (0 + 8)),
221};
222
223/* ADMA Command Parameter Block
224 The first 5 SG segments are stored inside the Command Parameter Block itself.
225 If there are more than 5 segments the remainder are stored in a separate
226 memory area indicated by next_aprd. */
227struct nv_adma_cpb {
228 u8 resp_flags; /* 0 */
229 u8 reserved1; /* 1 */
230 u8 ctl_flags; /* 2 */
231 /* len is length of taskfile in 64 bit words */
2dcb407e 232 u8 len; /* 3 */
fbbb262d
RH
233 u8 tag; /* 4 */
234 u8 next_cpb_idx; /* 5 */
235 __le16 reserved2; /* 6-7 */
236 __le16 tf[12]; /* 8-31 */
237 struct nv_adma_prd aprd[5]; /* 32-111 */
238 __le64 next_aprd; /* 112-119 */
239 __le64 reserved3; /* 120-127 */
10ad05df 240};
1da177e4 241
fbbb262d
RH
242
243struct nv_adma_port_priv {
244 struct nv_adma_cpb *cpb;
245 dma_addr_t cpb_dma;
246 struct nv_adma_prd *aprd;
247 dma_addr_t aprd_dma;
2dcb407e
JG
248 void __iomem *ctl_block;
249 void __iomem *gen_block;
250 void __iomem *notifier_clear_block;
8959d300 251 u64 adma_dma_mask;
fbbb262d 252 u8 flags;
5e5c74a5 253 int last_issue_ncq;
fbbb262d
RH
254};
255
cdf56bcf
RH
256struct nv_host_priv {
257 unsigned long type;
258};
259
f140f0f1
KL
260struct defer_queue {
261 u32 defer_bits;
262 unsigned int head;
263 unsigned int tail;
264 unsigned int tag[ATA_MAX_QUEUE];
265};
266
267enum ncq_saw_flag_list {
268 ncq_saw_d2h = (1U << 0),
269 ncq_saw_dmas = (1U << 1),
270 ncq_saw_sdb = (1U << 2),
271 ncq_saw_backout = (1U << 3),
272};
273
274struct nv_swncq_port_priv {
275 struct ata_prd *prd; /* our SG list */
276 dma_addr_t prd_dma; /* and its DMA mapping */
277 void __iomem *sactive_block;
278 void __iomem *irq_block;
279 void __iomem *tag_block;
280 u32 qc_active;
281
282 unsigned int last_issue_tag;
283
284 /* fifo circular queue to store deferral command */
285 struct defer_queue defer_queue;
286
287 /* for NCQ interrupt analysis */
288 u32 dhfis_bits;
289 u32 dmafis_bits;
290 u32 sdbfis_bits;
291
292 unsigned int ncq_flags;
293};
294
295
5796d1c4 296#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
fbbb262d 297
2dcb407e 298static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
438ac6d5 299#ifdef CONFIG_PM
cdf56bcf 300static int nv_pci_device_resume(struct pci_dev *pdev);
438ac6d5 301#endif
cca3974e 302static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
303static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
305static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
82ef04fb
TH
306static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
307static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
1da177e4 308
7f4774b3
TH
309static int nv_hardreset(struct ata_link *link, unsigned int *class,
310 unsigned long deadline);
39f87582
TH
311static void nv_nf2_freeze(struct ata_port *ap);
312static void nv_nf2_thaw(struct ata_port *ap);
313static void nv_ck804_freeze(struct ata_port *ap);
314static void nv_ck804_thaw(struct ata_port *ap);
fbbb262d 315static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 316static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
317static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
318static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
319static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
320static void nv_adma_irq_clear(struct ata_port *ap);
321static int nv_adma_port_start(struct ata_port *ap);
322static void nv_adma_port_stop(struct ata_port *ap);
438ac6d5 323#ifdef CONFIG_PM
cdf56bcf
RH
324static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
325static int nv_adma_port_resume(struct ata_port *ap);
438ac6d5 326#endif
53014e25
RH
327static void nv_adma_freeze(struct ata_port *ap);
328static void nv_adma_thaw(struct ata_port *ap);
fbbb262d
RH
329static void nv_adma_error_handler(struct ata_port *ap);
330static void nv_adma_host_stop(struct ata_host *host);
f5ecac2d 331static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
f2fb344b 332static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
39f87582 333
f140f0f1
KL
334static void nv_mcp55_thaw(struct ata_port *ap);
335static void nv_mcp55_freeze(struct ata_port *ap);
336static void nv_swncq_error_handler(struct ata_port *ap);
337static int nv_swncq_slave_config(struct scsi_device *sdev);
338static int nv_swncq_port_start(struct ata_port *ap);
339static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
340static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
341static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
342static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
343static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344#ifdef CONFIG_PM
345static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
346static int nv_swncq_port_resume(struct ata_port *ap);
347#endif
348
1da177e4
LT
349enum nv_host_type
350{
351 GENERIC,
352 NFORCE2,
27e4b274 353 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d 354 CK804,
f140f0f1 355 ADMA,
2d775708 356 MCP5x,
f140f0f1 357 SWNCQ,
1da177e4
LT
358};
359
3b7d697d 360static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
2d775708
TH
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
e2e031eb
KL
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
374 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
2d2744fc
JG
375
376 { } /* terminate list */
1da177e4
LT
377};
378
1da177e4
LT
379static struct pci_driver nv_pci_driver = {
380 .name = DRV_NAME,
381 .id_table = nv_pci_tbl,
382 .probe = nv_init_one,
438ac6d5 383#ifdef CONFIG_PM
cdf56bcf
RH
384 .suspend = ata_pci_device_suspend,
385 .resume = nv_pci_device_resume,
438ac6d5 386#endif
1daf9ce7 387 .remove = ata_pci_remove_one,
1da177e4
LT
388};
389
193515d5 390static struct scsi_host_template nv_sht = {
68d1d07b 391 ATA_BMDMA_SHT(DRV_NAME),
1da177e4
LT
392};
393
fbbb262d 394static struct scsi_host_template nv_adma_sht = {
68d1d07b 395 ATA_NCQ_SHT(DRV_NAME),
fbbb262d 396 .can_queue = NV_ADMA_MAX_CPBS,
fbbb262d 397 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
398 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
399 .slave_configure = nv_adma_slave_config,
fbbb262d
RH
400};
401
f140f0f1 402static struct scsi_host_template nv_swncq_sht = {
68d1d07b 403 ATA_NCQ_SHT(DRV_NAME),
f140f0f1 404 .can_queue = ATA_MAX_QUEUE,
f140f0f1 405 .sg_tablesize = LIBATA_MAX_PRD,
f140f0f1
KL
406 .dma_boundary = ATA_DMA_BOUNDARY,
407 .slave_configure = nv_swncq_slave_config,
f140f0f1
KL
408};
409
7f4774b3
TH
410/*
411 * NV SATA controllers have various different problems with hardreset
412 * protocol depending on the specific controller and device.
413 *
414 * GENERIC:
415 *
416 * bko11195 reports that link doesn't come online after hardreset on
417 * generic nv's and there have been several other similar reports on
418 * linux-ide.
419 *
420 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
421 * softreset.
422 *
423 * NF2/3:
424 *
425 * bko3352 reports nf2/3 controllers can't determine device signature
426 * reliably after hardreset. The following thread reports detection
427 * failure on cold boot with the standard debouncing timing.
428 *
429 * http://thread.gmane.org/gmane.linux.ide/34098
430 *
431 * bko12176 reports that hardreset fails to bring up the link during
432 * boot on nf2.
433 *
434 * CK804:
435 *
436 * For initial probing after boot and hot plugging, hardreset mostly
437 * works fine on CK804 but curiously, reprobing on the initial port
438 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
439 * FIS in somewhat undeterministic way.
440 *
441 * SWNCQ:
442 *
443 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
444 * hardreset should be used and hardreset can't report proper
445 * signature, which suggests that mcp5x is closer to nf2 as long as
446 * reset quirkiness is concerned.
447 *
448 * bko12703 reports that boot probing fails for intel SSD with
449 * hardreset. Link fails to come online. Softreset works fine.
450 *
451 * The failures are varied but the following patterns seem true for
452 * all flavors.
453 *
454 * - Softreset during boot always works.
455 *
456 * - Hardreset during boot sometimes fails to bring up the link on
457 * certain comibnations and device signature acquisition is
458 * unreliable.
459 *
460 * - Hardreset is often necessary after hotplug.
461 *
462 * So, preferring softreset for boot probing and error handling (as
463 * hardreset might bring down the link) but using hardreset for
464 * post-boot probing should work around the above issues in most
465 * cases. Define nv_hardreset() which only kicks in for post-boot
466 * probing and use it for all variants.
467 */
468static struct ata_port_operations nv_generic_ops = {
029cfd6b 469 .inherits = &ata_bmdma_port_ops,
c96f1732 470 .lost_interrupt = ATA_OP_NULL,
1da177e4
LT
471 .scr_read = nv_scr_read,
472 .scr_write = nv_scr_write,
7f4774b3 473 .hardreset = nv_hardreset,
1da177e4
LT
474};
475
029cfd6b 476static struct ata_port_operations nv_nf2_ops = {
7dac745b 477 .inherits = &nv_generic_ops,
39f87582
TH
478 .freeze = nv_nf2_freeze,
479 .thaw = nv_nf2_thaw,
ada364e8
TH
480};
481
029cfd6b 482static struct ata_port_operations nv_ck804_ops = {
7f4774b3 483 .inherits = &nv_generic_ops,
39f87582
TH
484 .freeze = nv_ck804_freeze,
485 .thaw = nv_ck804_thaw,
ada364e8
TH
486 .host_stop = nv_ck804_host_stop,
487};
488
029cfd6b 489static struct ata_port_operations nv_adma_ops = {
3c324283 490 .inherits = &nv_ck804_ops,
029cfd6b 491
2dec7555 492 .check_atapi_dma = nv_adma_check_atapi_dma,
5682ed33 493 .sff_tf_read = nv_adma_tf_read,
31cc23b3 494 .qc_defer = ata_std_qc_defer,
fbbb262d
RH
495 .qc_prep = nv_adma_qc_prep,
496 .qc_issue = nv_adma_qc_issue,
5682ed33 497 .sff_irq_clear = nv_adma_irq_clear,
029cfd6b 498
53014e25
RH
499 .freeze = nv_adma_freeze,
500 .thaw = nv_adma_thaw,
fbbb262d 501 .error_handler = nv_adma_error_handler,
f5ecac2d 502 .post_internal_cmd = nv_adma_post_internal_cmd,
029cfd6b 503
fbbb262d
RH
504 .port_start = nv_adma_port_start,
505 .port_stop = nv_adma_port_stop,
438ac6d5 506#ifdef CONFIG_PM
cdf56bcf
RH
507 .port_suspend = nv_adma_port_suspend,
508 .port_resume = nv_adma_port_resume,
438ac6d5 509#endif
fbbb262d
RH
510 .host_stop = nv_adma_host_stop,
511};
512
029cfd6b 513static struct ata_port_operations nv_swncq_ops = {
7f4774b3 514 .inherits = &nv_generic_ops,
029cfd6b 515
f140f0f1
KL
516 .qc_defer = ata_std_qc_defer,
517 .qc_prep = nv_swncq_qc_prep,
518 .qc_issue = nv_swncq_qc_issue,
029cfd6b 519
f140f0f1
KL
520 .freeze = nv_mcp55_freeze,
521 .thaw = nv_mcp55_thaw,
522 .error_handler = nv_swncq_error_handler,
029cfd6b 523
f140f0f1
KL
524#ifdef CONFIG_PM
525 .port_suspend = nv_swncq_port_suspend,
526 .port_resume = nv_swncq_port_resume,
527#endif
528 .port_start = nv_swncq_port_start,
529};
530
95947193
TH
531struct nv_pi_priv {
532 irq_handler_t irq_handler;
533 struct scsi_host_template *sht;
534};
535
536#define NV_PI_PRIV(_irq_handler, _sht) \
537 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538
1626aeb8 539static const struct ata_port_info nv_port_info[] = {
ada364e8
TH
540 /* generic */
541 {
0c88758b 542 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
543 .pio_mask = NV_PIO_MASK,
544 .mwdma_mask = NV_MWDMA_MASK,
545 .udma_mask = NV_UDMA_MASK,
546 .port_ops = &nv_generic_ops,
95947193 547 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
ada364e8
TH
548 },
549 /* nforce2/3 */
550 {
0c88758b 551 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
552 .pio_mask = NV_PIO_MASK,
553 .mwdma_mask = NV_MWDMA_MASK,
554 .udma_mask = NV_UDMA_MASK,
555 .port_ops = &nv_nf2_ops,
95947193 556 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
ada364e8
TH
557 },
558 /* ck804 */
559 {
0c88758b 560 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
561 .pio_mask = NV_PIO_MASK,
562 .mwdma_mask = NV_MWDMA_MASK,
563 .udma_mask = NV_UDMA_MASK,
564 .port_ops = &nv_ck804_ops,
95947193 565 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
ada364e8 566 },
fbbb262d
RH
567 /* ADMA */
568 {
fbbb262d
RH
569 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
570 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_adma_ops,
95947193 575 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
fbbb262d 576 },
2d775708
TH
577 /* MCP5x */
578 {
579 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
580 .pio_mask = NV_PIO_MASK,
581 .mwdma_mask = NV_MWDMA_MASK,
582 .udma_mask = NV_UDMA_MASK,
7f4774b3 583 .port_ops = &nv_generic_ops,
2d775708
TH
584 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
585 },
f140f0f1
KL
586 /* SWNCQ */
587 {
f140f0f1
KL
588 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
589 ATA_FLAG_NCQ,
f140f0f1
KL
590 .pio_mask = NV_PIO_MASK,
591 .mwdma_mask = NV_MWDMA_MASK,
592 .udma_mask = NV_UDMA_MASK,
593 .port_ops = &nv_swncq_ops,
95947193 594 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
f140f0f1 595 },
1da177e4
LT
596};
597
598MODULE_AUTHOR("NVIDIA");
599MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
600MODULE_LICENSE("GPL");
601MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
602MODULE_VERSION(DRV_VERSION);
603
06993d22 604static int adma_enabled;
d21279f4 605static int swncq_enabled = 1;
51c89499 606static int msi_enabled;
fbbb262d 607
2dec7555
RH
608static void nv_adma_register_mode(struct ata_port *ap)
609{
2dec7555 610 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 611 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
612 u16 tmp, status;
613 int count = 0;
2dec7555
RH
614
615 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
616 return;
617
a2cfe81a 618 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 619 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
a2cfe81a
RH
620 ndelay(50);
621 status = readw(mmio + NV_ADMA_STAT);
622 count++;
623 }
2dcb407e 624 if (count == 20)
a2cfe81a
RH
625 ata_port_printk(ap, KERN_WARNING,
626 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
627 status);
628
2dec7555
RH
629 tmp = readw(mmio + NV_ADMA_CTL);
630 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
631
a2cfe81a
RH
632 count = 0;
633 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 634 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
a2cfe81a
RH
635 ndelay(50);
636 status = readw(mmio + NV_ADMA_STAT);
637 count++;
638 }
2dcb407e 639 if (count == 20)
a2cfe81a
RH
640 ata_port_printk(ap, KERN_WARNING,
641 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
642 status);
643
2dec7555
RH
644 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
645}
646
647static void nv_adma_mode(struct ata_port *ap)
648{
2dec7555 649 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 650 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
651 u16 tmp, status;
652 int count = 0;
2dec7555
RH
653
654 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
655 return;
f20b16ff 656
2dec7555
RH
657 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
658
659 tmp = readw(mmio + NV_ADMA_CTL);
660 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
661
a2cfe81a 662 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 663 while (((status & NV_ADMA_STAT_LEGACY) ||
a2cfe81a
RH
664 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
665 ndelay(50);
666 status = readw(mmio + NV_ADMA_STAT);
667 count++;
668 }
2dcb407e 669 if (count == 20)
a2cfe81a
RH
670 ata_port_printk(ap, KERN_WARNING,
671 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
672 status);
673
2dec7555
RH
674 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
675}
676
fbbb262d
RH
677static int nv_adma_slave_config(struct scsi_device *sdev)
678{
679 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555 680 struct nv_adma_port_priv *pp = ap->private_data;
8959d300
RH
681 struct nv_adma_port_priv *port0, *port1;
682 struct scsi_device *sdev0, *sdev1;
2dec7555 683 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
8959d300 684 unsigned long segment_boundary, flags;
fbbb262d
RH
685 unsigned short sg_tablesize;
686 int rc;
2dec7555
RH
687 int adma_enable;
688 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
689
690 rc = ata_scsi_slave_config(sdev);
691
692 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
693 /* Not a proper libata device, ignore */
694 return rc;
695
8959d300
RH
696 spin_lock_irqsave(ap->lock, flags);
697
9af5c9c9 698 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
fbbb262d
RH
699 /*
700 * NVIDIA reports that ADMA mode does not support ATAPI commands.
701 * Therefore ATAPI commands are sent through the legacy interface.
702 * However, the legacy interface only supports 32-bit DMA.
703 * Restrict DMA parameters as required by the legacy interface
704 * when an ATAPI device is connected.
705 */
fbbb262d
RH
706 segment_boundary = ATA_DMA_BOUNDARY;
707 /* Subtract 1 since an extra entry may be needed for padding, see
708 libata-scsi.c */
709 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 710
2dec7555
RH
711 /* Since the legacy DMA engine is in use, we need to disable ADMA
712 on the port. */
713 adma_enable = 0;
714 nv_adma_register_mode(ap);
2dcb407e 715 } else {
fbbb262d
RH
716 segment_boundary = NV_ADMA_DMA_BOUNDARY;
717 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 718 adma_enable = 1;
fbbb262d 719 }
f20b16ff 720
2dec7555
RH
721 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
722
2dcb407e 723 if (ap->port_no == 1)
2dec7555
RH
724 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
725 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
726 else
727 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
728 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 729
2dcb407e 730 if (adma_enable) {
2dec7555
RH
731 new_reg = current_reg | config_mask;
732 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
2dcb407e 733 } else {
2dec7555
RH
734 new_reg = current_reg & ~config_mask;
735 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
736 }
f20b16ff 737
2dcb407e 738 if (current_reg != new_reg)
2dec7555 739 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 740
8959d300
RH
741 port0 = ap->host->ports[0]->private_data;
742 port1 = ap->host->ports[1]->private_data;
743 sdev0 = ap->host->ports[0]->link.device[0].sdev;
744 sdev1 = ap->host->ports[1]->link.device[0].sdev;
745 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
746 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
747 /** We have to set the DMA mask to 32-bit if either port is in
748 ATAPI mode, since they are on the same PCI device which is
749 used for DMA mapping. If we set the mask we also need to set
750 the bounce limit on both ports to ensure that the block
751 layer doesn't feed addresses that cause DMA mapping to
752 choke. If either SCSI device is not allocated yet, it's OK
753 since that port will discover its correct setting when it
754 does get allocated.
755 Note: Setting 32-bit mask should not fail. */
756 if (sdev0)
757 blk_queue_bounce_limit(sdev0->request_queue,
758 ATA_DMA_MASK);
759 if (sdev1)
760 blk_queue_bounce_limit(sdev1->request_queue,
761 ATA_DMA_MASK);
762
763 pci_set_dma_mask(pdev, ATA_DMA_MASK);
764 } else {
765 /** This shouldn't fail as it was set to this value before */
766 pci_set_dma_mask(pdev, pp->adma_dma_mask);
767 if (sdev0)
768 blk_queue_bounce_limit(sdev0->request_queue,
769 pp->adma_dma_mask);
770 if (sdev1)
771 blk_queue_bounce_limit(sdev1->request_queue,
772 pp->adma_dma_mask);
773 }
774
fbbb262d 775 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
8a78362c 776 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
fbbb262d 777 ata_port_printk(ap, KERN_INFO,
8959d300
RH
778 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
779 (unsigned long long)*ap->host->dev->dma_mask,
780 segment_boundary, sg_tablesize);
781
782 spin_unlock_irqrestore(ap->lock, flags);
783
fbbb262d
RH
784 return rc;
785}
786
2dec7555
RH
787static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
788{
789 struct nv_adma_port_priv *pp = qc->ap->private_data;
790 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
791}
792
f2fb344b
RH
793static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
794{
3f3debdb
RH
795 /* Other than when internal or pass-through commands are executed,
796 the only time this function will be called in ADMA mode will be
797 if a command fails. In the failure case we don't care about going
798 into register mode with ADMA commands pending, as the commands will
799 all shortly be aborted anyway. We assume that NCQ commands are not
800 issued via passthrough, which is the only way that switching into
801 ADMA mode could abort outstanding commands. */
f2fb344b
RH
802 nv_adma_register_mode(ap);
803
9363c382 804 ata_sff_tf_read(ap, tf);
f2fb344b
RH
805}
806
2dec7555 807static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
808{
809 unsigned int idx = 0;
810
2dcb407e 811 if (tf->flags & ATA_TFLAG_ISADDR) {
ac3d6b86
RH
812 if (tf->flags & ATA_TFLAG_LBA48) {
813 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
814 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
815 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
816 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
817 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
818 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
819 } else
820 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
a84471fe 821
ac3d6b86
RH
822 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
823 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
824 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
825 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
fbbb262d 826 }
a84471fe 827
2dcb407e 828 if (tf->flags & ATA_TFLAG_DEVICE)
ac3d6b86 829 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
fbbb262d
RH
830
831 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
a84471fe 832
2dcb407e 833 while (idx < 12)
ac3d6b86 834 cpb[idx++] = cpu_to_le16(IGN);
fbbb262d
RH
835
836 return idx;
837}
838
5bd28a4b 839static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
840{
841 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 842 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
843
844 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
845
5bd28a4b
RH
846 if (unlikely((force_err ||
847 flags & (NV_CPB_RESP_ATA_ERR |
848 NV_CPB_RESP_CMD_ERR |
849 NV_CPB_RESP_CPB_ERR)))) {
9af5c9c9 850 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
851 int freeze = 0;
852
853 ata_ehi_clear_desc(ehi);
2dcb407e 854 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
5bd28a4b 855 if (flags & NV_CPB_RESP_ATA_ERR) {
b64bbc39 856 ata_ehi_push_desc(ehi, "ATA error");
5bd28a4b
RH
857 ehi->err_mask |= AC_ERR_DEV;
858 } else if (flags & NV_CPB_RESP_CMD_ERR) {
b64bbc39 859 ata_ehi_push_desc(ehi, "CMD error");
5bd28a4b
RH
860 ehi->err_mask |= AC_ERR_DEV;
861 } else if (flags & NV_CPB_RESP_CPB_ERR) {
b64bbc39 862 ata_ehi_push_desc(ehi, "CPB error");
5bd28a4b
RH
863 ehi->err_mask |= AC_ERR_SYSTEM;
864 freeze = 1;
865 } else {
866 /* notifier error, but no error in CPB flags? */
b64bbc39 867 ata_ehi_push_desc(ehi, "unknown");
5bd28a4b
RH
868 ehi->err_mask |= AC_ERR_OTHER;
869 freeze = 1;
870 }
871 /* Kill all commands. EH will determine what actually failed. */
872 if (freeze)
873 ata_port_freeze(ap);
874 else
875 ata_port_abort(ap);
876 return 1;
fbbb262d 877 }
5bd28a4b 878
f2fb344b 879 if (likely(flags & NV_CPB_RESP_DONE)) {
fbbb262d 880 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
881 VPRINTK("CPB flags done, flags=0x%x\n", flags);
882 if (likely(qc)) {
2dcb407e 883 DPRINTK("Completing qc from tag %d\n", cpb_num);
fbbb262d 884 ata_qc_complete(qc);
2a54cf76 885 } else {
9af5c9c9 886 struct ata_eh_info *ehi = &ap->link.eh_info;
2a54cf76
RH
887 /* Notifier bits set without a command may indicate the drive
888 is misbehaving. Raise host state machine violation on this
889 condition. */
5796d1c4
JG
890 ata_port_printk(ap, KERN_ERR,
891 "notifier for tag %d with no cmd?\n",
892 cpb_num);
2a54cf76 893 ehi->err_mask |= AC_ERR_HSM;
cf480626 894 ehi->action |= ATA_EH_RESET;
2a54cf76
RH
895 ata_port_freeze(ap);
896 return 1;
fbbb262d
RH
897 }
898 }
5bd28a4b 899 return 0;
fbbb262d
RH
900}
901
2dec7555
RH
902static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
903{
9af5c9c9 904 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2dec7555
RH
905
906 /* freeze if hotplugged */
907 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
908 ata_port_freeze(ap);
909 return 1;
910 }
911
912 /* bail out if not our interrupt */
913 if (!(irq_stat & NV_INT_DEV))
914 return 0;
915
916 /* DEV interrupt w/ no active qc? */
917 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
9363c382 918 ata_sff_check_status(ap);
2dec7555
RH
919 return 1;
920 }
921
922 /* handle interrupt */
9363c382 923 return ata_sff_host_intr(ap, qc);
2dec7555
RH
924}
925
fbbb262d
RH
926static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
927{
928 struct ata_host *host = dev_instance;
929 int i, handled = 0;
2dec7555 930 u32 notifier_clears[2];
fbbb262d
RH
931
932 spin_lock(&host->lock);
933
934 for (i = 0; i < host->n_ports; i++) {
935 struct ata_port *ap = host->ports[i];
3e4ec344
TH
936 struct nv_adma_port_priv *pp = ap->private_data;
937 void __iomem *mmio = pp->ctl_block;
938 u16 status;
939 u32 gen_ctl;
940 u32 notifier, notifier_error;
941
2dec7555 942 notifier_clears[i] = 0;
fbbb262d 943
3e4ec344
TH
944 /* if ADMA is disabled, use standard ata interrupt handler */
945 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
946 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
947 >> (NV_INT_PORT_SHIFT * i);
948 handled += nv_host_intr(ap, irq_stat);
949 continue;
950 }
fbbb262d 951
3e4ec344
TH
952 /* if in ATA register mode, check for standard interrupts */
953 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
954 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
955 >> (NV_INT_PORT_SHIFT * i);
956 if (ata_tag_valid(ap->link.active_tag))
957 /** NV_INT_DEV indication seems unreliable
958 at times at least in ADMA mode. Force it
959 on always when a command is active, to
960 prevent losing interrupts. */
961 irq_stat |= NV_INT_DEV;
962 handled += nv_host_intr(ap, irq_stat);
963 }
964
965 notifier = readl(mmio + NV_ADMA_NOTIFIER);
966 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
967 notifier_clears[i] = notifier | notifier_error;
968
969 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
970
971 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
972 !notifier_error)
973 /* Nothing to do */
974 continue;
975
976 status = readw(mmio + NV_ADMA_STAT);
977
978 /*
979 * Clear status. Ensure the controller sees the
980 * clearing before we start looking at any of the CPB
981 * statuses, so that any CPB completions after this
982 * point in the handler will raise another interrupt.
983 */
984 writew(status, mmio + NV_ADMA_STAT);
985 readw(mmio + NV_ADMA_STAT); /* flush posted write */
986 rmb();
fbbb262d 987
3e4ec344
TH
988 handled++; /* irq handled if we got here */
989
990 /* freeze if hotplugged or controller error */
991 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
992 NV_ADMA_STAT_HOTUNPLUG |
993 NV_ADMA_STAT_TIMEOUT |
994 NV_ADMA_STAT_SERROR))) {
995 struct ata_eh_info *ehi = &ap->link.eh_info;
996
997 ata_ehi_clear_desc(ehi);
998 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
999 if (status & NV_ADMA_STAT_TIMEOUT) {
1000 ehi->err_mask |= AC_ERR_SYSTEM;
1001 ata_ehi_push_desc(ehi, "timeout");
1002 } else if (status & NV_ADMA_STAT_HOTPLUG) {
1003 ata_ehi_hotplugged(ehi);
1004 ata_ehi_push_desc(ehi, "hotplug");
1005 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1006 ata_ehi_hotplugged(ehi);
1007 ata_ehi_push_desc(ehi, "hot unplug");
1008 } else if (status & NV_ADMA_STAT_SERROR) {
1009 /* let EH analyze SError and figure out cause */
1010 ata_ehi_push_desc(ehi, "SError");
1011 } else
1012 ata_ehi_push_desc(ehi, "unknown");
1013 ata_port_freeze(ap);
1014 continue;
1015 }
1016
1017 if (status & (NV_ADMA_STAT_DONE |
1018 NV_ADMA_STAT_CPBERR |
1019 NV_ADMA_STAT_CMD_COMPLETE)) {
1020 u32 check_commands = notifier_clears[i];
1021 int pos, error = 0;
1022
1023 if (status & NV_ADMA_STAT_CPBERR) {
1024 /* check all active commands */
1025 if (ata_tag_valid(ap->link.active_tag))
1026 check_commands = 1 <<
1027 ap->link.active_tag;
1028 else
1029 check_commands = ap->link.sactive;
fbbb262d
RH
1030 }
1031
3e4ec344
TH
1032 /* check CPBs for completed commands */
1033 while ((pos = ffs(check_commands)) && !error) {
1034 pos--;
1035 error = nv_adma_check_cpb(ap, pos,
5796d1c4 1036 notifier_error & (1 << pos));
3e4ec344 1037 check_commands &= ~(1 << pos);
fbbb262d 1038 }
fbbb262d
RH
1039 }
1040 }
f20b16ff 1041
b447916e 1042 if (notifier_clears[0] || notifier_clears[1]) {
2dec7555
RH
1043 /* Note: Both notifier clear registers must be written
1044 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
1045 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1046 writel(notifier_clears[0], pp->notifier_clear_block);
1047 pp = host->ports[1]->private_data;
1048 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 1049 }
fbbb262d
RH
1050
1051 spin_unlock(&host->lock);
1052
1053 return IRQ_RETVAL(handled);
1054}
1055
53014e25
RH
1056static void nv_adma_freeze(struct ata_port *ap)
1057{
1058 struct nv_adma_port_priv *pp = ap->private_data;
1059 void __iomem *mmio = pp->ctl_block;
1060 u16 tmp;
1061
1062 nv_ck804_freeze(ap);
1063
1064 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1065 return;
1066
1067 /* clear any outstanding CK804 notifications */
2dcb407e 1068 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25
RH
1069 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1070
1071 /* Disable interrupt */
1072 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1073 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1074 mmio + NV_ADMA_CTL);
5796d1c4 1075 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1076}
1077
1078static void nv_adma_thaw(struct ata_port *ap)
1079{
1080 struct nv_adma_port_priv *pp = ap->private_data;
1081 void __iomem *mmio = pp->ctl_block;
1082 u16 tmp;
1083
1084 nv_ck804_thaw(ap);
1085
1086 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1087 return;
1088
1089 /* Enable interrupt */
1090 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1091 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1092 mmio + NV_ADMA_CTL);
5796d1c4 1093 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1094}
1095
fbbb262d
RH
1096static void nv_adma_irq_clear(struct ata_port *ap)
1097{
cdf56bcf
RH
1098 struct nv_adma_port_priv *pp = ap->private_data;
1099 void __iomem *mmio = pp->ctl_block;
53014e25 1100 u32 notifier_clears[2];
fbbb262d 1101
53014e25 1102 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
9363c382 1103 ata_sff_irq_clear(ap);
53014e25
RH
1104 return;
1105 }
1106
1107 /* clear any outstanding CK804 notifications */
2dcb407e 1108 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25 1109 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
fbbb262d 1110
53014e25
RH
1111 /* clear ADMA status */
1112 writew(0xffff, mmio + NV_ADMA_STAT);
a617c09f 1113
53014e25
RH
1114 /* clear notifiers - note both ports need to be written with
1115 something even though we are only clearing on one */
1116 if (ap->port_no == 0) {
1117 notifier_clears[0] = 0xFFFFFFFF;
1118 notifier_clears[1] = 0;
1119 } else {
1120 notifier_clears[0] = 0;
1121 notifier_clears[1] = 0xFFFFFFFF;
1122 }
1123 pp = ap->host->ports[0]->private_data;
1124 writel(notifier_clears[0], pp->notifier_clear_block);
1125 pp = ap->host->ports[1]->private_data;
1126 writel(notifier_clears[1], pp->notifier_clear_block);
fbbb262d
RH
1127}
1128
f5ecac2d 1129static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
fbbb262d 1130{
f5ecac2d 1131 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d 1132
b447916e 1133 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
9363c382 1134 ata_sff_post_internal_cmd(qc);
fbbb262d
RH
1135}
1136
1137static int nv_adma_port_start(struct ata_port *ap)
1138{
1139 struct device *dev = ap->host->dev;
1140 struct nv_adma_port_priv *pp;
1141 int rc;
1142 void *mem;
1143 dma_addr_t mem_dma;
cdf56bcf 1144 void __iomem *mmio;
8959d300 1145 struct pci_dev *pdev = to_pci_dev(dev);
fbbb262d
RH
1146 u16 tmp;
1147
1148 VPRINTK("ENTER\n");
1149
8959d300
RH
1150 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1151 pad buffers */
1152 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1153 if (rc)
1154 return rc;
1155 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1156 if (rc)
1157 return rc;
1158
fbbb262d
RH
1159 rc = ata_port_start(ap);
1160 if (rc)
1161 return rc;
1162
24dc5f33
TH
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1164 if (!pp)
1165 return -ENOMEM;
fbbb262d 1166
0d5ff566 1167 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
1168 ap->port_no * NV_ADMA_PORT_SIZE;
1169 pp->ctl_block = mmio;
0d5ff566 1170 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
1171 pp->notifier_clear_block = pp->gen_block +
1172 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1173
8959d300
RH
1174 /* Now that the legacy PRD and padding buffer are allocated we can
1175 safely raise the DMA mask to allocate the CPB/APRD table.
1176 These are allowed to fail since we store the value that ends up
1177 being used to set as the bounce limit in slave_config later if
1178 needed. */
1179 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1180 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1181 pp->adma_dma_mask = *dev->dma_mask;
1182
24dc5f33
TH
1183 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1184 &mem_dma, GFP_KERNEL);
1185 if (!mem)
1186 return -ENOMEM;
fbbb262d
RH
1187 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1188
1189 /*
1190 * First item in chunk of DMA memory:
1191 * 128-byte command parameter block (CPB)
1192 * one for each command tag
1193 */
1194 pp->cpb = mem;
1195 pp->cpb_dma = mem_dma;
1196
1197 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1198 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
fbbb262d
RH
1199
1200 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1201 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1202
1203 /*
1204 * Second item: block of ADMA_SGTBL_LEN s/g entries
1205 */
1206 pp->aprd = mem;
1207 pp->aprd_dma = mem_dma;
1208
1209 ap->private_data = pp;
1210
1211 /* clear any outstanding interrupt conditions */
1212 writew(0xffff, mmio + NV_ADMA_STAT);
1213
1214 /* initialize port variables */
1215 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1216
1217 /* clear CPB fetch count */
1218 writew(0, mmio + NV_ADMA_CPB_COUNT);
1219
cdf56bcf 1220 /* clear GO for register mode, enable interrupt */
fbbb262d 1221 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1222 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1223 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1224
1225 tmp = readw(mmio + NV_ADMA_CTL);
1226 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1227 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1228 udelay(1);
1229 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1230 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1231
1232 return 0;
fbbb262d
RH
1233}
1234
1235static void nv_adma_port_stop(struct ata_port *ap)
1236{
fbbb262d 1237 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1238 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1239
1240 VPRINTK("ENTER\n");
fbbb262d 1241 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1242}
1243
438ac6d5 1244#ifdef CONFIG_PM
cdf56bcf
RH
1245static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1246{
1247 struct nv_adma_port_priv *pp = ap->private_data;
1248 void __iomem *mmio = pp->ctl_block;
1249
1250 /* Go to register mode - clears GO */
1251 nv_adma_register_mode(ap);
1252
1253 /* clear CPB fetch count */
1254 writew(0, mmio + NV_ADMA_CPB_COUNT);
1255
1256 /* disable interrupt, shut down port */
1257 writew(0, mmio + NV_ADMA_CTL);
1258
1259 return 0;
1260}
1261
1262static int nv_adma_port_resume(struct ata_port *ap)
1263{
1264 struct nv_adma_port_priv *pp = ap->private_data;
1265 void __iomem *mmio = pp->ctl_block;
1266 u16 tmp;
1267
1268 /* set CPB block location */
1269 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1270 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
cdf56bcf
RH
1271
1272 /* clear any outstanding interrupt conditions */
1273 writew(0xffff, mmio + NV_ADMA_STAT);
1274
1275 /* initialize port variables */
1276 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1277
1278 /* clear CPB fetch count */
1279 writew(0, mmio + NV_ADMA_CPB_COUNT);
1280
1281 /* clear GO for register mode, enable interrupt */
1282 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1283 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1284 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
cdf56bcf
RH
1285
1286 tmp = readw(mmio + NV_ADMA_CTL);
1287 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1288 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1289 udelay(1);
1290 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1291 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1292
1293 return 0;
1294}
438ac6d5 1295#endif
fbbb262d 1296
9a829ccf 1297static void nv_adma_setup_port(struct ata_port *ap)
fbbb262d 1298{
9a829ccf
TH
1299 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1300 struct ata_ioports *ioport = &ap->ioaddr;
fbbb262d
RH
1301
1302 VPRINTK("ENTER\n");
1303
9a829ccf 1304 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
fbbb262d 1305
0d5ff566
TH
1306 ioport->cmd_addr = mmio;
1307 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1308 ioport->error_addr =
0d5ff566
TH
1309 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1310 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1311 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1312 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1313 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1314 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1315 ioport->status_addr =
0d5ff566 1316 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1317 ioport->altstatus_addr =
0d5ff566 1318 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1319}
1320
9a829ccf 1321static int nv_adma_host_init(struct ata_host *host)
fbbb262d 1322{
9a829ccf 1323 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1324 unsigned int i;
1325 u32 tmp32;
1326
1327 VPRINTK("ENTER\n");
1328
1329 /* enable ADMA on the ports */
1330 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1331 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1332 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1333 NV_MCP_SATA_CFG_20_PORT1_EN |
1334 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1335
1336 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1337
9a829ccf
TH
1338 for (i = 0; i < host->n_ports; i++)
1339 nv_adma_setup_port(host->ports[i]);
fbbb262d 1340
fbbb262d
RH
1341 return 0;
1342}
1343
1344static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1345 struct scatterlist *sg,
1346 int idx,
1347 struct nv_adma_prd *aprd)
1348{
41949ed5 1349 u8 flags = 0;
fbbb262d
RH
1350 if (qc->tf.flags & ATA_TFLAG_WRITE)
1351 flags |= NV_APRD_WRITE;
1352 if (idx == qc->n_elem - 1)
1353 flags |= NV_APRD_END;
1354 else if (idx != 4)
1355 flags |= NV_APRD_CONT;
1356
1357 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1358 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1359 aprd->flags = flags;
41949ed5 1360 aprd->packet_len = 0;
fbbb262d
RH
1361}
1362
1363static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1364{
1365 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1366 struct nv_adma_prd *aprd;
1367 struct scatterlist *sg;
ff2aeb1e 1368 unsigned int si;
fbbb262d
RH
1369
1370 VPRINTK("ENTER\n");
1371
ff2aeb1e
TH
1372 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1373 aprd = (si < 5) ? &cpb->aprd[si] :
1374 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1375 nv_adma_fill_aprd(qc, sg, si, aprd);
fbbb262d 1376 }
ff2aeb1e 1377 if (si > 5)
fbbb262d 1378 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
41949ed5
RH
1379 else
1380 cpb->next_aprd = cpu_to_le64(0);
fbbb262d
RH
1381}
1382
382a6652
RH
1383static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1384{
1385 struct nv_adma_port_priv *pp = qc->ap->private_data;
1386
1387 /* ADMA engine can only be used for non-ATAPI DMA commands,
3f3debdb 1388 or interrupt-driven no-data commands. */
b447916e 1389 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
3f3debdb 1390 (qc->tf.flags & ATA_TFLAG_POLLING))
382a6652
RH
1391 return 1;
1392
b447916e 1393 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
382a6652
RH
1394 (qc->tf.protocol == ATA_PROT_NODATA))
1395 return 0;
1396
1397 return 1;
1398}
1399
fbbb262d
RH
1400static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1401{
1402 struct nv_adma_port_priv *pp = qc->ap->private_data;
1403 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1404 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1405 NV_CPB_CTL_IEN;
1406
382a6652 1407 if (nv_adma_use_reg_mode(qc)) {
3f3debdb
RH
1408 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1409 (qc->flags & ATA_QCFLAG_DMAMAP));
2dec7555 1410 nv_adma_register_mode(qc->ap);
9363c382 1411 ata_sff_qc_prep(qc);
fbbb262d
RH
1412 return;
1413 }
1414
41949ed5
RH
1415 cpb->resp_flags = NV_CPB_RESP_DONE;
1416 wmb();
1417 cpb->ctl_flags = 0;
1418 wmb();
fbbb262d
RH
1419
1420 cpb->len = 3;
1421 cpb->tag = qc->tag;
1422 cpb->next_cpb_idx = 0;
1423
1424 /* turn on NCQ flags for NCQ commands */
1425 if (qc->tf.protocol == ATA_PROT_NCQ)
1426 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1427
cdf56bcf
RH
1428 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1429
fbbb262d
RH
1430 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1431
b447916e 1432 if (qc->flags & ATA_QCFLAG_DMAMAP) {
382a6652
RH
1433 nv_adma_fill_sg(qc, cpb);
1434 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1435 } else
1436 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d 1437
5796d1c4
JG
1438 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1439 until we are finished filling in all of the contents */
fbbb262d
RH
1440 wmb();
1441 cpb->ctl_flags = ctl_flags;
41949ed5
RH
1442 wmb();
1443 cpb->resp_flags = 0;
fbbb262d
RH
1444}
1445
1446static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1447{
2dec7555 1448 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1449 void __iomem *mmio = pp->ctl_block;
5e5c74a5 1450 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
fbbb262d
RH
1451
1452 VPRINTK("ENTER\n");
1453
3f3debdb
RH
1454 /* We can't handle result taskfile with NCQ commands, since
1455 retrieving the taskfile switches us out of ADMA mode and would abort
1456 existing commands. */
1457 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1458 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1459 ata_dev_printk(qc->dev, KERN_ERR,
1460 "NCQ w/ RESULT_TF not allowed\n");
1461 return AC_ERR_SYSTEM;
1462 }
1463
382a6652 1464 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1465 /* use ATA register mode */
382a6652 1466 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
3f3debdb
RH
1467 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1468 (qc->flags & ATA_QCFLAG_DMAMAP));
fbbb262d 1469 nv_adma_register_mode(qc->ap);
9363c382 1470 return ata_sff_qc_issue(qc);
fbbb262d
RH
1471 } else
1472 nv_adma_mode(qc->ap);
1473
1474 /* write append register, command tag in lower 8 bits
1475 and (number of cpbs to append -1) in top 8 bits */
1476 wmb();
5e5c74a5 1477
b447916e 1478 if (curr_ncq != pp->last_issue_ncq) {
5796d1c4
JG
1479 /* Seems to need some delay before switching between NCQ and
1480 non-NCQ commands, else we get command timeouts and such. */
5e5c74a5
RH
1481 udelay(20);
1482 pp->last_issue_ncq = curr_ncq;
1483 }
1484
fbbb262d
RH
1485 writew(qc->tag, mmio + NV_ADMA_APPEND);
1486
5796d1c4 1487 DPRINTK("Issued tag %u\n", qc->tag);
fbbb262d
RH
1488
1489 return 0;
1490}
1491
7d12e780 1492static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1493{
cca3974e 1494 struct ata_host *host = dev_instance;
1da177e4
LT
1495 unsigned int i;
1496 unsigned int handled = 0;
1497 unsigned long flags;
1498
cca3974e 1499 spin_lock_irqsave(&host->lock, flags);
1da177e4 1500
cca3974e 1501 for (i = 0; i < host->n_ports; i++) {
3e4ec344
TH
1502 struct ata_port *ap = host->ports[i];
1503 struct ata_queued_cmd *qc;
1da177e4 1504
3e4ec344
TH
1505 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1506 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1507 handled += ata_sff_host_intr(ap, qc);
1508 } else {
1509 /*
1510 * No request pending? Clear interrupt status
1511 * anyway, in case there's one pending.
1512 */
1513 ap->ops->sff_check_status(ap);
1514 }
1da177e4
LT
1515 }
1516
cca3974e 1517 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1518
1519 return IRQ_RETVAL(handled);
1520}
1521
cca3974e 1522static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1523{
1524 int i, handled = 0;
1525
cca3974e 1526 for (i = 0; i < host->n_ports; i++) {
3e4ec344 1527 handled += nv_host_intr(host->ports[i], irq_stat);
ada364e8
TH
1528 irq_stat >>= NV_INT_PORT_SHIFT;
1529 }
1530
1531 return IRQ_RETVAL(handled);
1532}
1533
7d12e780 1534static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1535{
cca3974e 1536 struct ata_host *host = dev_instance;
ada364e8
TH
1537 u8 irq_stat;
1538 irqreturn_t ret;
1539
cca3974e 1540 spin_lock(&host->lock);
0d5ff566 1541 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1542 ret = nv_do_interrupt(host, irq_stat);
1543 spin_unlock(&host->lock);
ada364e8
TH
1544
1545 return ret;
1546}
1547
7d12e780 1548static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1549{
cca3974e 1550 struct ata_host *host = dev_instance;
ada364e8
TH
1551 u8 irq_stat;
1552 irqreturn_t ret;
1553
cca3974e 1554 spin_lock(&host->lock);
0d5ff566 1555 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1556 ret = nv_do_interrupt(host, irq_stat);
1557 spin_unlock(&host->lock);
ada364e8
TH
1558
1559 return ret;
1560}
1561
82ef04fb 1562static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1da177e4 1563{
1da177e4 1564 if (sc_reg > SCR_CONTROL)
da3dbb17 1565 return -EINVAL;
1da177e4 1566
82ef04fb 1567 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1568 return 0;
1da177e4
LT
1569}
1570
82ef04fb 1571static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1da177e4 1572{
1da177e4 1573 if (sc_reg > SCR_CONTROL)
da3dbb17 1574 return -EINVAL;
1da177e4 1575
82ef04fb 1576 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1577 return 0;
1da177e4
LT
1578}
1579
7f4774b3
TH
1580static int nv_hardreset(struct ata_link *link, unsigned int *class,
1581 unsigned long deadline)
e8caa3c7 1582{
7f4774b3 1583 struct ata_eh_context *ehc = &link->eh_context;
e8caa3c7 1584
7f4774b3
TH
1585 /* Do hardreset iff it's post-boot probing, please read the
1586 * comment above port ops for details.
1587 */
1588 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1589 !ata_dev_enabled(link->device))
1590 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1591 NULL, NULL);
6489e326
TH
1592 else {
1593 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1594 int rc;
1595
1596 if (!(ehc->i.flags & ATA_EHI_QUIET))
1597 ata_link_printk(link, KERN_INFO, "nv: skipping "
1598 "hardreset on occupied port\n");
1599
1600 /* make sure the link is online */
1601 rc = sata_link_resume(link, timing, deadline);
1602 /* whine about phy resume failure but proceed */
1603 if (rc && rc != -EOPNOTSUPP)
1604 ata_link_printk(link, KERN_WARNING, "failed to resume "
1605 "link (errno=%d)\n", rc);
1606 }
7f4774b3
TH
1607
1608 /* device signature acquisition is unreliable */
1609 return -EAGAIN;
e8caa3c7
TH
1610}
1611
39f87582
TH
1612static void nv_nf2_freeze(struct ata_port *ap)
1613{
0d5ff566 1614 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1615 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1616 u8 mask;
1617
0d5ff566 1618 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1619 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1620 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1621}
1622
1623static void nv_nf2_thaw(struct ata_port *ap)
1624{
0d5ff566 1625 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1626 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1627 u8 mask;
1628
0d5ff566 1629 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1630
0d5ff566 1631 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1632 mask |= (NV_INT_MASK << shift);
0d5ff566 1633 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1634}
1635
1636static void nv_ck804_freeze(struct ata_port *ap)
1637{
0d5ff566 1638 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1639 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1640 u8 mask;
1641
1642 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1643 mask &= ~(NV_INT_ALL << shift);
1644 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1645}
1646
1647static void nv_ck804_thaw(struct ata_port *ap)
1648{
0d5ff566 1649 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1650 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1651 u8 mask;
1652
1653 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1654
1655 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1656 mask |= (NV_INT_MASK << shift);
1657 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1658}
1659
f140f0f1
KL
1660static void nv_mcp55_freeze(struct ata_port *ap)
1661{
1662 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1663 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1664 u32 mask;
1665
1666 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1667
1668 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1669 mask &= ~(NV_INT_ALL_MCP55 << shift);
1670 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1671 ata_sff_freeze(ap);
f140f0f1
KL
1672}
1673
1674static void nv_mcp55_thaw(struct ata_port *ap)
1675{
1676 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1677 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1678 u32 mask;
1679
1680 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1681
1682 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1683 mask |= (NV_INT_MASK_MCP55 << shift);
1684 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1685 ata_sff_thaw(ap);
f140f0f1
KL
1686}
1687
fbbb262d
RH
1688static void nv_adma_error_handler(struct ata_port *ap)
1689{
1690 struct nv_adma_port_priv *pp = ap->private_data;
b447916e 1691 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1692 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1693 int i;
1694 u16 tmp;
a84471fe 1695
b447916e 1696 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
2cb27853
RH
1697 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1698 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1699 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1700 u32 status = readw(mmio + NV_ADMA_STAT);
08af7414
RH
1701 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1702 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
2cb27853 1703
5796d1c4
JG
1704 ata_port_printk(ap, KERN_ERR,
1705 "EH in ADMA mode, notifier 0x%X "
08af7414
RH
1706 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1707 "next cpb count 0x%X next cpb idx 0x%x\n",
1708 notifier, notifier_error, gen_ctl, status,
1709 cpb_count, next_cpb_idx);
2cb27853 1710
b447916e 1711 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
2cb27853 1712 struct nv_adma_cpb *cpb = &pp->cpb[i];
b447916e 1713 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
5796d1c4 1714 ap->link.sactive & (1 << i))
2cb27853
RH
1715 ata_port_printk(ap, KERN_ERR,
1716 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1717 i, cpb->ctl_flags, cpb->resp_flags);
1718 }
1719 }
fbbb262d 1720
fbbb262d
RH
1721 /* Push us back into port register mode for error handling. */
1722 nv_adma_register_mode(ap);
1723
5796d1c4
JG
1724 /* Mark all of the CPBs as invalid to prevent them from
1725 being executed */
b447916e 1726 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
fbbb262d
RH
1727 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1728
1729 /* clear CPB fetch count */
1730 writew(0, mmio + NV_ADMA_CPB_COUNT);
1731
1732 /* Reset channel */
1733 tmp = readw(mmio + NV_ADMA_CTL);
1734 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1735 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1736 udelay(1);
1737 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1738 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1739 }
1740
9363c382 1741 ata_sff_error_handler(ap);
fbbb262d
RH
1742}
1743
f140f0f1
KL
1744static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1745{
1746 struct nv_swncq_port_priv *pp = ap->private_data;
1747 struct defer_queue *dq = &pp->defer_queue;
1748
1749 /* queue is full */
1750 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1751 dq->defer_bits |= (1 << qc->tag);
1752 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1753}
1754
1755static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1756{
1757 struct nv_swncq_port_priv *pp = ap->private_data;
1758 struct defer_queue *dq = &pp->defer_queue;
1759 unsigned int tag;
1760
1761 if (dq->head == dq->tail) /* null queue */
1762 return NULL;
1763
1764 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1765 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1766 WARN_ON(!(dq->defer_bits & (1 << tag)));
1767 dq->defer_bits &= ~(1 << tag);
1768
1769 return ata_qc_from_tag(ap, tag);
1770}
1771
1772static void nv_swncq_fis_reinit(struct ata_port *ap)
1773{
1774 struct nv_swncq_port_priv *pp = ap->private_data;
1775
1776 pp->dhfis_bits = 0;
1777 pp->dmafis_bits = 0;
1778 pp->sdbfis_bits = 0;
1779 pp->ncq_flags = 0;
1780}
1781
1782static void nv_swncq_pp_reinit(struct ata_port *ap)
1783{
1784 struct nv_swncq_port_priv *pp = ap->private_data;
1785 struct defer_queue *dq = &pp->defer_queue;
1786
1787 dq->head = 0;
1788 dq->tail = 0;
1789 dq->defer_bits = 0;
1790 pp->qc_active = 0;
1791 pp->last_issue_tag = ATA_TAG_POISON;
1792 nv_swncq_fis_reinit(ap);
1793}
1794
1795static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1796{
1797 struct nv_swncq_port_priv *pp = ap->private_data;
1798
1799 writew(fis, pp->irq_block);
1800}
1801
1802static void __ata_bmdma_stop(struct ata_port *ap)
1803{
1804 struct ata_queued_cmd qc;
1805
1806 qc.ap = ap;
1807 ata_bmdma_stop(&qc);
1808}
1809
1810static void nv_swncq_ncq_stop(struct ata_port *ap)
1811{
1812 struct nv_swncq_port_priv *pp = ap->private_data;
1813 unsigned int i;
1814 u32 sactive;
1815 u32 done_mask;
1816
1817 ata_port_printk(ap, KERN_ERR,
1818 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1819 ap->qc_active, ap->link.sactive);
1820 ata_port_printk(ap, KERN_ERR,
1821 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1822 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1823 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1824 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1825
1826 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
5682ed33 1827 ap->ops->sff_check_status(ap),
f140f0f1
KL
1828 ioread8(ap->ioaddr.error_addr));
1829
1830 sactive = readl(pp->sactive_block);
1831 done_mask = pp->qc_active ^ sactive;
1832
1833 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1834 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1835 u8 err = 0;
1836 if (pp->qc_active & (1 << i))
1837 err = 0;
1838 else if (done_mask & (1 << i))
1839 err = 1;
1840 else
1841 continue;
1842
1843 ata_port_printk(ap, KERN_ERR,
1844 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1845 (pp->dhfis_bits >> i) & 0x1,
1846 (pp->dmafis_bits >> i) & 0x1,
1847 (pp->sdbfis_bits >> i) & 0x1,
1848 (sactive >> i) & 0x1,
1849 (err ? "error! tag doesn't exit" : " "));
1850 }
1851
1852 nv_swncq_pp_reinit(ap);
5682ed33 1853 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
1854 __ata_bmdma_stop(ap);
1855 nv_swncq_irq_clear(ap, 0xffff);
1856}
1857
1858static void nv_swncq_error_handler(struct ata_port *ap)
1859{
1860 struct ata_eh_context *ehc = &ap->link.eh_context;
1861
1862 if (ap->link.sactive) {
1863 nv_swncq_ncq_stop(ap);
cf480626 1864 ehc->i.action |= ATA_EH_RESET;
f140f0f1
KL
1865 }
1866
9363c382 1867 ata_sff_error_handler(ap);
f140f0f1
KL
1868}
1869
1870#ifdef CONFIG_PM
1871static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1872{
1873 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1874 u32 tmp;
1875
1876 /* clear irq */
1877 writel(~0, mmio + NV_INT_STATUS_MCP55);
1878
1879 /* disable irq */
1880 writel(0, mmio + NV_INT_ENABLE_MCP55);
1881
1882 /* disable swncq */
1883 tmp = readl(mmio + NV_CTL_MCP55);
1884 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1885 writel(tmp, mmio + NV_CTL_MCP55);
1886
1887 return 0;
1888}
1889
1890static int nv_swncq_port_resume(struct ata_port *ap)
1891{
1892 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1893 u32 tmp;
1894
1895 /* clear irq */
1896 writel(~0, mmio + NV_INT_STATUS_MCP55);
1897
1898 /* enable irq */
1899 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1900
1901 /* enable swncq */
1902 tmp = readl(mmio + NV_CTL_MCP55);
1903 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1904
1905 return 0;
1906}
1907#endif
1908
1909static void nv_swncq_host_init(struct ata_host *host)
1910{
1911 u32 tmp;
1912 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1913 struct pci_dev *pdev = to_pci_dev(host->dev);
1914 u8 regval;
1915
1916 /* disable ECO 398 */
1917 pci_read_config_byte(pdev, 0x7f, &regval);
1918 regval &= ~(1 << 7);
1919 pci_write_config_byte(pdev, 0x7f, regval);
1920
1921 /* enable swncq */
1922 tmp = readl(mmio + NV_CTL_MCP55);
1923 VPRINTK("HOST_CTL:0x%X\n", tmp);
1924 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1925
1926 /* enable irq intr */
1927 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1928 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1929 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1930
1931 /* clear port irq */
1932 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1933}
1934
1935static int nv_swncq_slave_config(struct scsi_device *sdev)
1936{
1937 struct ata_port *ap = ata_shost_to_port(sdev->host);
1938 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1939 struct ata_device *dev;
1940 int rc;
1941 u8 rev;
1942 u8 check_maxtor = 0;
1943 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1944
1945 rc = ata_scsi_slave_config(sdev);
1946 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1947 /* Not a proper libata device, ignore */
1948 return rc;
1949
1950 dev = &ap->link.device[sdev->id];
1951 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1952 return rc;
1953
1954 /* if MCP51 and Maxtor, then disable ncq */
1955 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1956 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1957 check_maxtor = 1;
1958
1959 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1960 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1961 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1962 pci_read_config_byte(pdev, 0x8, &rev);
1963 if (rev <= 0xa2)
1964 check_maxtor = 1;
1965 }
1966
1967 if (!check_maxtor)
1968 return rc;
1969
1970 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1971
1972 if (strncmp(model_num, "Maxtor", 6) == 0) {
e881a172 1973 ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
f140f0f1
KL
1974 ata_dev_printk(dev, KERN_NOTICE,
1975 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1976 }
1977
1978 return rc;
1979}
1980
1981static int nv_swncq_port_start(struct ata_port *ap)
1982{
1983 struct device *dev = ap->host->dev;
1984 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1985 struct nv_swncq_port_priv *pp;
1986 int rc;
1987
1988 rc = ata_port_start(ap);
1989 if (rc)
1990 return rc;
1991
1992 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1993 if (!pp)
1994 return -ENOMEM;
1995
1996 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1997 &pp->prd_dma, GFP_KERNEL);
1998 if (!pp->prd)
1999 return -ENOMEM;
2000 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2001
2002 ap->private_data = pp;
2003 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2004 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2005 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2006
2007 return 0;
2008}
2009
2010static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2011{
2012 if (qc->tf.protocol != ATA_PROT_NCQ) {
9363c382 2013 ata_sff_qc_prep(qc);
f140f0f1
KL
2014 return;
2015 }
2016
2017 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2018 return;
2019
2020 nv_swncq_fill_sg(qc);
2021}
2022
2023static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2024{
2025 struct ata_port *ap = qc->ap;
2026 struct scatterlist *sg;
f140f0f1
KL
2027 struct nv_swncq_port_priv *pp = ap->private_data;
2028 struct ata_prd *prd;
ff2aeb1e 2029 unsigned int si, idx;
f140f0f1
KL
2030
2031 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2032
2033 idx = 0;
ff2aeb1e 2034 for_each_sg(qc->sg, sg, qc->n_elem, si) {
f140f0f1
KL
2035 u32 addr, offset;
2036 u32 sg_len, len;
2037
2038 addr = (u32)sg_dma_address(sg);
2039 sg_len = sg_dma_len(sg);
2040
2041 while (sg_len) {
2042 offset = addr & 0xffff;
2043 len = sg_len;
2044 if ((offset + sg_len) > 0x10000)
2045 len = 0x10000 - offset;
2046
2047 prd[idx].addr = cpu_to_le32(addr);
2048 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2049
2050 idx++;
2051 sg_len -= len;
2052 addr += len;
2053 }
2054 }
2055
ff2aeb1e 2056 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
f140f0f1
KL
2057}
2058
2059static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2060 struct ata_queued_cmd *qc)
2061{
2062 struct nv_swncq_port_priv *pp = ap->private_data;
2063
2064 if (qc == NULL)
2065 return 0;
2066
2067 DPRINTK("Enter\n");
2068
2069 writel((1 << qc->tag), pp->sactive_block);
2070 pp->last_issue_tag = qc->tag;
2071 pp->dhfis_bits &= ~(1 << qc->tag);
2072 pp->dmafis_bits &= ~(1 << qc->tag);
2073 pp->qc_active |= (0x1 << qc->tag);
2074
5682ed33
TH
2075 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2076 ap->ops->sff_exec_command(ap, &qc->tf);
f140f0f1
KL
2077
2078 DPRINTK("Issued tag %u\n", qc->tag);
2079
2080 return 0;
2081}
2082
2083static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2084{
2085 struct ata_port *ap = qc->ap;
2086 struct nv_swncq_port_priv *pp = ap->private_data;
2087
2088 if (qc->tf.protocol != ATA_PROT_NCQ)
9363c382 2089 return ata_sff_qc_issue(qc);
f140f0f1
KL
2090
2091 DPRINTK("Enter\n");
2092
2093 if (!pp->qc_active)
2094 nv_swncq_issue_atacmd(ap, qc);
2095 else
2096 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2097
2098 return 0;
2099}
2100
2101static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2102{
2103 u32 serror;
2104 struct ata_eh_info *ehi = &ap->link.eh_info;
2105
2106 ata_ehi_clear_desc(ehi);
2107
2108 /* AHCI needs SError cleared; otherwise, it might lock up */
2109 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2110 sata_scr_write(&ap->link, SCR_ERROR, serror);
2111
2112 /* analyze @irq_stat */
2113 if (fis & NV_SWNCQ_IRQ_ADDED)
2114 ata_ehi_push_desc(ehi, "hot plug");
2115 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2116 ata_ehi_push_desc(ehi, "hot unplug");
2117
2118 ata_ehi_hotplugged(ehi);
2119
2120 /* okay, let's hand over to EH */
2121 ehi->serror |= serror;
2122
2123 ata_port_freeze(ap);
2124}
2125
2126static int nv_swncq_sdbfis(struct ata_port *ap)
2127{
2128 struct ata_queued_cmd *qc;
2129 struct nv_swncq_port_priv *pp = ap->private_data;
2130 struct ata_eh_info *ehi = &ap->link.eh_info;
2131 u32 sactive;
2132 int nr_done = 0;
2133 u32 done_mask;
2134 int i;
2135 u8 host_stat;
2136 u8 lack_dhfis = 0;
2137
2138 host_stat = ap->ops->bmdma_status(ap);
2139 if (unlikely(host_stat & ATA_DMA_ERR)) {
2140 /* error when transfering data to/from memory */
2141 ata_ehi_clear_desc(ehi);
2142 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2143 ehi->err_mask |= AC_ERR_HOST_BUS;
cf480626 2144 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2145 return -EINVAL;
2146 }
2147
5682ed33 2148 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
2149 __ata_bmdma_stop(ap);
2150
2151 sactive = readl(pp->sactive_block);
2152 done_mask = pp->qc_active ^ sactive;
2153
2154 if (unlikely(done_mask & sactive)) {
2155 ata_ehi_clear_desc(ehi);
2156 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2157 "(%08x->%08x)", pp->qc_active, sactive);
2158 ehi->err_mask |= AC_ERR_HSM;
cf480626 2159 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2160 return -EINVAL;
2161 }
2162 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2163 if (!(done_mask & (1 << i)))
2164 continue;
2165
2166 qc = ata_qc_from_tag(ap, i);
2167 if (qc) {
2168 ata_qc_complete(qc);
2169 pp->qc_active &= ~(1 << i);
2170 pp->dhfis_bits &= ~(1 << i);
2171 pp->dmafis_bits &= ~(1 << i);
2172 pp->sdbfis_bits |= (1 << i);
2173 nr_done++;
2174 }
2175 }
2176
2177 if (!ap->qc_active) {
2178 DPRINTK("over\n");
2179 nv_swncq_pp_reinit(ap);
2180 return nr_done;
2181 }
2182
2183 if (pp->qc_active & pp->dhfis_bits)
2184 return nr_done;
2185
2186 if ((pp->ncq_flags & ncq_saw_backout) ||
2187 (pp->qc_active ^ pp->dhfis_bits))
2188 /* if the controller cann't get a device to host register FIS,
2189 * The driver needs to reissue the new command.
2190 */
2191 lack_dhfis = 1;
2192
2193 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2194 "SWNCQ:qc_active 0x%X defer_bits %X "
2195 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2196 ap->print_id, ap->qc_active, pp->qc_active,
2197 pp->defer_queue.defer_bits, pp->dhfis_bits,
2198 pp->dmafis_bits, pp->last_issue_tag);
2199
2200 nv_swncq_fis_reinit(ap);
2201
2202 if (lack_dhfis) {
2203 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2204 nv_swncq_issue_atacmd(ap, qc);
2205 return nr_done;
2206 }
2207
2208 if (pp->defer_queue.defer_bits) {
2209 /* send deferral queue command */
2210 qc = nv_swncq_qc_from_dq(ap);
2211 WARN_ON(qc == NULL);
2212 nv_swncq_issue_atacmd(ap, qc);
2213 }
2214
2215 return nr_done;
2216}
2217
2218static inline u32 nv_swncq_tag(struct ata_port *ap)
2219{
2220 struct nv_swncq_port_priv *pp = ap->private_data;
2221 u32 tag;
2222
2223 tag = readb(pp->tag_block) >> 2;
2224 return (tag & 0x1f);
2225}
2226
2227static int nv_swncq_dmafis(struct ata_port *ap)
2228{
2229 struct ata_queued_cmd *qc;
2230 unsigned int rw;
2231 u8 dmactl;
2232 u32 tag;
2233 struct nv_swncq_port_priv *pp = ap->private_data;
2234
2235 __ata_bmdma_stop(ap);
2236 tag = nv_swncq_tag(ap);
2237
2238 DPRINTK("dma setup tag 0x%x\n", tag);
2239 qc = ata_qc_from_tag(ap, tag);
2240
2241 if (unlikely(!qc))
2242 return 0;
2243
2244 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2245
2246 /* load PRD table addr. */
2247 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2248 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2249
2250 /* specify data direction, triple-check start bit is clear */
2251 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2252 dmactl &= ~ATA_DMA_WR;
2253 if (!rw)
2254 dmactl |= ATA_DMA_WR;
2255
2256 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2257
2258 return 1;
2259}
2260
2261static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2262{
2263 struct nv_swncq_port_priv *pp = ap->private_data;
2264 struct ata_queued_cmd *qc;
2265 struct ata_eh_info *ehi = &ap->link.eh_info;
2266 u32 serror;
2267 u8 ata_stat;
2268 int rc = 0;
2269
5682ed33 2270 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2271 nv_swncq_irq_clear(ap, fis);
2272 if (!fis)
2273 return;
2274
2275 if (ap->pflags & ATA_PFLAG_FROZEN)
2276 return;
2277
2278 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2279 nv_swncq_hotplug(ap, fis);
2280 return;
2281 }
2282
2283 if (!pp->qc_active)
2284 return;
2285
82ef04fb 2286 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
f140f0f1 2287 return;
82ef04fb 2288 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
f140f0f1
KL
2289
2290 if (ata_stat & ATA_ERR) {
2291 ata_ehi_clear_desc(ehi);
2292 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2293 ehi->err_mask |= AC_ERR_DEV;
2294 ehi->serror |= serror;
cf480626 2295 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2296 ata_port_freeze(ap);
2297 return;
2298 }
2299
2300 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2301 /* If the IRQ is backout, driver must issue
2302 * the new command again some time later.
2303 */
2304 pp->ncq_flags |= ncq_saw_backout;
2305 }
2306
2307 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2308 pp->ncq_flags |= ncq_saw_sdb;
2309 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2310 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2311 ap->print_id, pp->qc_active, pp->dhfis_bits,
2312 pp->dmafis_bits, readl(pp->sactive_block));
2313 rc = nv_swncq_sdbfis(ap);
2314 if (rc < 0)
2315 goto irq_error;
2316 }
2317
2318 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2319 /* The interrupt indicates the new command
2320 * was transmitted correctly to the drive.
2321 */
2322 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2323 pp->ncq_flags |= ncq_saw_d2h;
2324 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2325 ata_ehi_push_desc(ehi, "illegal fis transaction");
2326 ehi->err_mask |= AC_ERR_HSM;
cf480626 2327 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2328 goto irq_error;
2329 }
2330
2331 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2332 !(pp->ncq_flags & ncq_saw_dmas)) {
5682ed33 2333 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2334 if (ata_stat & ATA_BUSY)
2335 goto irq_exit;
2336
2337 if (pp->defer_queue.defer_bits) {
2338 DPRINTK("send next command\n");
2339 qc = nv_swncq_qc_from_dq(ap);
2340 nv_swncq_issue_atacmd(ap, qc);
2341 }
2342 }
2343 }
2344
2345 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2346 /* program the dma controller with appropriate PRD buffers
2347 * and start the DMA transfer for requested command.
2348 */
2349 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2350 pp->ncq_flags |= ncq_saw_dmas;
2351 rc = nv_swncq_dmafis(ap);
2352 }
2353
2354irq_exit:
2355 return;
2356irq_error:
2357 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2358 ata_port_freeze(ap);
2359 return;
2360}
2361
2362static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2363{
2364 struct ata_host *host = dev_instance;
2365 unsigned int i;
2366 unsigned int handled = 0;
2367 unsigned long flags;
2368 u32 irq_stat;
2369
2370 spin_lock_irqsave(&host->lock, flags);
2371
2372 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2373
2374 for (i = 0; i < host->n_ports; i++) {
2375 struct ata_port *ap = host->ports[i];
2376
3e4ec344
TH
2377 if (ap->link.sactive) {
2378 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2379 handled = 1;
2380 } else {
2381 if (irq_stat) /* reserve Hotplug */
2382 nv_swncq_irq_clear(ap, 0xfff0);
f140f0f1 2383
3e4ec344 2384 handled += nv_host_intr(ap, (u8)irq_stat);
f140f0f1
KL
2385 }
2386 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2387 }
2388
2389 spin_unlock_irqrestore(&host->lock, flags);
2390
2391 return IRQ_RETVAL(handled);
2392}
2393
5796d1c4 2394static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2395{
5796d1c4 2396 static int printed_version;
1626aeb8 2397 const struct ata_port_info *ppi[] = { NULL, NULL };
95947193 2398 struct nv_pi_priv *ipriv;
9a829ccf 2399 struct ata_host *host;
cdf56bcf 2400 struct nv_host_priv *hpriv;
1da177e4
LT
2401 int rc;
2402 u32 bar;
0d5ff566 2403 void __iomem *base;
fbbb262d 2404 unsigned long type = ent->driver_data;
1da177e4
LT
2405
2406 // Make sure this is a SATA controller by counting the number of bars
2407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2408 // it's an IDE controller and we ignore it.
5796d1c4 2409 for (bar = 0; bar < 6; bar++)
1da177e4
LT
2410 if (pci_resource_start(pdev, bar) == 0)
2411 return -ENODEV;
2412
cdf56bcf 2413 if (!printed_version++)
a9524a76 2414 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 2415
24dc5f33 2416 rc = pcim_enable_device(pdev);
1da177e4 2417 if (rc)
24dc5f33 2418 return rc;
1da177e4 2419
9a829ccf 2420 /* determine type and allocate host */
f140f0f1 2421 if (type == CK804 && adma_enabled) {
fbbb262d
RH
2422 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2423 type = ADMA;
2d775708
TH
2424 } else if (type == MCP5x && swncq_enabled) {
2425 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2426 type = SWNCQ;
360737a9
JG
2427 }
2428
1626aeb8 2429 ppi[0] = &nv_port_info[type];
95947193 2430 ipriv = ppi[0]->private_data;
9363c382 2431 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
9a829ccf
TH
2432 if (rc)
2433 return rc;
1da177e4 2434
24dc5f33 2435 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 2436 if (!hpriv)
24dc5f33 2437 return -ENOMEM;
9a829ccf
TH
2438 hpriv->type = type;
2439 host->private_data = hpriv;
cdf56bcf 2440
9a829ccf
TH
2441 /* request and iomap NV_MMIO_BAR */
2442 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2443 if (rc)
2444 return rc;
1da177e4 2445
9a829ccf
TH
2446 /* configure SCR access */
2447 base = host->iomap[NV_MMIO_BAR];
2448 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2449 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 2450
ada364e8 2451 /* enable SATA space for CK804 */
fbbb262d 2452 if (type >= CK804) {
ada364e8
TH
2453 u8 regval;
2454
2455 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2456 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2457 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2458 }
2459
9a829ccf 2460 /* init ADMA */
fbbb262d 2461 if (type == ADMA) {
9a829ccf 2462 rc = nv_adma_host_init(host);
fbbb262d 2463 if (rc)
24dc5f33 2464 return rc;
360737a9 2465 } else if (type == SWNCQ)
f140f0f1 2466 nv_swncq_host_init(host);
fbbb262d 2467
51c89499
TV
2468 if (msi_enabled) {
2469 dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2470 pci_enable_msi(pdev);
2471 }
2472
9a829ccf 2473 pci_set_master(pdev);
95cc2c70 2474 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
1da177e4
LT
2475}
2476
438ac6d5 2477#ifdef CONFIG_PM
cdf56bcf
RH
2478static int nv_pci_device_resume(struct pci_dev *pdev)
2479{
2480 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2481 struct nv_host_priv *hpriv = host->private_data;
ce053fa8 2482 int rc;
cdf56bcf 2483
ce053fa8 2484 rc = ata_pci_device_do_resume(pdev);
b447916e 2485 if (rc)
ce053fa8 2486 return rc;
cdf56bcf
RH
2487
2488 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
b447916e 2489 if (hpriv->type >= CK804) {
cdf56bcf
RH
2490 u8 regval;
2491
2492 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2493 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2494 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2495 }
b447916e 2496 if (hpriv->type == ADMA) {
cdf56bcf
RH
2497 u32 tmp32;
2498 struct nv_adma_port_priv *pp;
2499 /* enable/disable ADMA on the ports appropriately */
2500 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2501
2502 pp = host->ports[0]->private_data;
b447916e 2503 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2504 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2505 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf
RH
2506 else
2507 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2508 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf 2509 pp = host->ports[1]->private_data;
b447916e 2510 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2511 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2512 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2513 else
2514 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2515 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2516
2517 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2518 }
2519 }
2520
2521 ata_host_resume(host);
2522
2523 return 0;
2524}
438ac6d5 2525#endif
cdf56bcf 2526
cca3974e 2527static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 2528{
cca3974e 2529 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
2530 u8 regval;
2531
2532 /* disable SATA space for CK804 */
2533 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2534 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2535 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
2536}
2537
fbbb262d
RH
2538static void nv_adma_host_stop(struct ata_host *host)
2539{
2540 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
2541 u32 tmp32;
2542
fbbb262d
RH
2543 /* disable ADMA on the ports */
2544 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2545 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2546 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2547 NV_MCP_SATA_CFG_20_PORT1_EN |
2548 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2549
2550 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2551
2552 nv_ck804_host_stop(host);
2553}
2554
1da177e4
LT
2555static int __init nv_init(void)
2556{
b7887196 2557 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
2558}
2559
2560static void __exit nv_exit(void)
2561{
2562 pci_unregister_driver(&nv_pci_driver);
2563}
2564
2565module_init(nv_init);
2566module_exit(nv_exit);
fbbb262d 2567module_param_named(adma, adma_enabled, bool, 0444);
55f784c8 2568MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
f140f0f1 2569module_param_named(swncq, swncq_enabled, bool, 0444);
d21279f4 2570MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
51c89499
TV
2571module_param_named(msi, msi_enabled, bool, 0444);
2572MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
f140f0f1 2573