]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_nv.c
8139cp: fix checksum broken
[net-next-2.6.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
5a0e3ad6 41#include <linux/gfp.h>
1da177e4
LT
42#include <linux/pci.h>
43#include <linux/init.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
a9524a76 47#include <linux/device.h>
1da177e4 48#include <scsi/scsi_host.h>
fbbb262d 49#include <scsi/scsi_device.h>
1da177e4
LT
50#include <linux/libata.h>
51
52#define DRV_NAME "sata_nv"
2a3103ce 53#define DRV_VERSION "3.5"
fbbb262d
RH
54
55#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 56
10ad05df 57enum {
0d5ff566
TH
58 NV_MMIO_BAR = 5,
59
10ad05df 60 NV_PORTS = 2,
14bdef98
EIB
61 NV_PIO_MASK = ATA_PIO4,
62 NV_MWDMA_MASK = ATA_MWDMA2,
63 NV_UDMA_MASK = ATA_UDMA6,
10ad05df
JG
64 NV_PORT0_SCR_REG_OFFSET = 0x00,
65 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 66
27e4b274 67 /* INT_STATUS/ENABLE */
10ad05df 68 NV_INT_STATUS = 0x10,
10ad05df 69 NV_INT_ENABLE = 0x11,
27e4b274 70 NV_INT_STATUS_CK804 = 0x440,
10ad05df 71 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 72
27e4b274
TH
73 /* INT_STATUS/ENABLE bits */
74 NV_INT_DEV = 0x01,
75 NV_INT_PM = 0x02,
76 NV_INT_ADDED = 0x04,
77 NV_INT_REMOVED = 0x08,
78
79 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80
39f87582 81 NV_INT_ALL = 0x0f,
5a44efff
TH
82 NV_INT_MASK = NV_INT_DEV |
83 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 84
27e4b274 85 /* INT_CONFIG */
10ad05df
JG
86 NV_INT_CONFIG = 0x12,
87 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 88
10ad05df
JG
89 // For PCI config register 20
90 NV_MCP_SATA_CFG_20 = 0x50,
91 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
92 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
93 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
94 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
95 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96
97 NV_ADMA_MAX_CPBS = 32,
98 NV_ADMA_CPB_SZ = 128,
99 NV_ADMA_APRD_SZ = 16,
100 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_APRD_SZ,
102 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
103 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
104 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
105 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106
107 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN = 0x400,
109 NV_ADMA_GEN_CTL = 0x00,
110 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111
112 /* BAR5 offset to ADMA ports */
113 NV_ADMA_PORT = 0x480,
114
115 /* size of ADMA port register space */
116 NV_ADMA_PORT_SIZE = 0x100,
117
118 /* ADMA port registers */
119 NV_ADMA_CTL = 0x40,
120 NV_ADMA_CPB_COUNT = 0x42,
121 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_STAT = 0x44,
123 NV_ADMA_CPB_BASE_LOW = 0x48,
124 NV_ADMA_CPB_BASE_HIGH = 0x4C,
125 NV_ADMA_APPEND = 0x50,
126 NV_ADMA_NOTIFIER = 0x68,
127 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128
129 /* NV_ADMA_CTL register bits */
130 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
131 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
132 NV_ADMA_CTL_GO = (1 << 7),
133 NV_ADMA_CTL_AIEN = (1 << 8),
134 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
135 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136
137 /* CPB response flag bits */
138 NV_CPB_RESP_DONE = (1 << 0),
139 NV_CPB_RESP_ATA_ERR = (1 << 3),
140 NV_CPB_RESP_CMD_ERR = (1 << 4),
141 NV_CPB_RESP_CPB_ERR = (1 << 7),
142
143 /* CPB control flag bits */
144 NV_CPB_CTL_CPB_VALID = (1 << 0),
145 NV_CPB_CTL_QUEUE = (1 << 1),
146 NV_CPB_CTL_APRD_VALID = (1 << 2),
147 NV_CPB_CTL_IEN = (1 << 3),
148 NV_CPB_CTL_FPDMA = (1 << 4),
149
150 /* APRD flags */
151 NV_APRD_WRITE = (1 << 1),
152 NV_APRD_END = (1 << 2),
153 NV_APRD_CONT = (1 << 3),
154
155 /* NV_ADMA_STAT flags */
156 NV_ADMA_STAT_TIMEOUT = (1 << 0),
157 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
158 NV_ADMA_STAT_HOTPLUG = (1 << 2),
159 NV_ADMA_STAT_CPBERR = (1 << 4),
160 NV_ADMA_STAT_SERROR = (1 << 5),
161 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
162 NV_ADMA_STAT_IDLE = (1 << 8),
163 NV_ADMA_STAT_LEGACY = (1 << 9),
164 NV_ADMA_STAT_STOPPED = (1 << 10),
165 NV_ADMA_STAT_DONE = (1 << 12),
166 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
2dcb407e 167 NV_ADMA_STAT_TIMEOUT,
fbbb262d
RH
168
169 /* port flags */
170 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 171 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d 172
f140f0f1
KL
173 /* MCP55 reg offset */
174 NV_CTL_MCP55 = 0x400,
175 NV_INT_STATUS_MCP55 = 0x440,
176 NV_INT_ENABLE_MCP55 = 0x444,
177 NV_NCQ_REG_MCP55 = 0x448,
178
179 /* MCP55 */
180 NV_INT_ALL_MCP55 = 0xffff,
181 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
182 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183
184 /* SWNCQ ENABLE BITS*/
185 NV_CTL_PRI_SWNCQ = 0x02,
186 NV_CTL_SEC_SWNCQ = 0x04,
187
188 /* SW NCQ status bits*/
189 NV_SWNCQ_IRQ_DEV = (1 << 0),
190 NV_SWNCQ_IRQ_PM = (1 << 1),
191 NV_SWNCQ_IRQ_ADDED = (1 << 2),
192 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193
194 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
195 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
196 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
197 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198
199 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
200 NV_SWNCQ_IRQ_REMOVED,
201
fbbb262d
RH
202};
203
204/* ADMA Physical Region Descriptor - one SG segment */
205struct nv_adma_prd {
206 __le64 addr;
207 __le32 len;
208 u8 flags;
209 u8 packet_len;
210 __le16 reserved;
211};
212
213enum nv_adma_regbits {
214 CMDEND = (1 << 15), /* end of command list */
215 WNB = (1 << 14), /* wait-not-BSY */
216 IGN = (1 << 13), /* ignore this entry */
217 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
218 DA2 = (1 << (2 + 8)),
219 DA1 = (1 << (1 + 8)),
220 DA0 = (1 << (0 + 8)),
221};
222
223/* ADMA Command Parameter Block
224 The first 5 SG segments are stored inside the Command Parameter Block itself.
225 If there are more than 5 segments the remainder are stored in a separate
226 memory area indicated by next_aprd. */
227struct nv_adma_cpb {
228 u8 resp_flags; /* 0 */
229 u8 reserved1; /* 1 */
230 u8 ctl_flags; /* 2 */
231 /* len is length of taskfile in 64 bit words */
2dcb407e 232 u8 len; /* 3 */
fbbb262d
RH
233 u8 tag; /* 4 */
234 u8 next_cpb_idx; /* 5 */
235 __le16 reserved2; /* 6-7 */
236 __le16 tf[12]; /* 8-31 */
237 struct nv_adma_prd aprd[5]; /* 32-111 */
238 __le64 next_aprd; /* 112-119 */
239 __le64 reserved3; /* 120-127 */
10ad05df 240};
1da177e4 241
fbbb262d
RH
242
243struct nv_adma_port_priv {
244 struct nv_adma_cpb *cpb;
245 dma_addr_t cpb_dma;
246 struct nv_adma_prd *aprd;
247 dma_addr_t aprd_dma;
2dcb407e
JG
248 void __iomem *ctl_block;
249 void __iomem *gen_block;
250 void __iomem *notifier_clear_block;
8959d300 251 u64 adma_dma_mask;
fbbb262d 252 u8 flags;
5e5c74a5 253 int last_issue_ncq;
fbbb262d
RH
254};
255
cdf56bcf
RH
256struct nv_host_priv {
257 unsigned long type;
258};
259
f140f0f1
KL
260struct defer_queue {
261 u32 defer_bits;
262 unsigned int head;
263 unsigned int tail;
264 unsigned int tag[ATA_MAX_QUEUE];
265};
266
267enum ncq_saw_flag_list {
268 ncq_saw_d2h = (1U << 0),
269 ncq_saw_dmas = (1U << 1),
270 ncq_saw_sdb = (1U << 2),
271 ncq_saw_backout = (1U << 3),
272};
273
274struct nv_swncq_port_priv {
f60d7011 275 struct ata_bmdma_prd *prd; /* our SG list */
f140f0f1
KL
276 dma_addr_t prd_dma; /* and its DMA mapping */
277 void __iomem *sactive_block;
278 void __iomem *irq_block;
279 void __iomem *tag_block;
280 u32 qc_active;
281
282 unsigned int last_issue_tag;
283
284 /* fifo circular queue to store deferral command */
285 struct defer_queue defer_queue;
286
287 /* for NCQ interrupt analysis */
288 u32 dhfis_bits;
289 u32 dmafis_bits;
290 u32 sdbfis_bits;
291
292 unsigned int ncq_flags;
293};
294
295
5796d1c4 296#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
fbbb262d 297
2dcb407e 298static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
438ac6d5 299#ifdef CONFIG_PM
cdf56bcf 300static int nv_pci_device_resume(struct pci_dev *pdev);
438ac6d5 301#endif
cca3974e 302static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
303static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
305static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
82ef04fb
TH
306static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
307static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
1da177e4 308
7f4774b3
TH
309static int nv_hardreset(struct ata_link *link, unsigned int *class,
310 unsigned long deadline);
39f87582
TH
311static void nv_nf2_freeze(struct ata_port *ap);
312static void nv_nf2_thaw(struct ata_port *ap);
313static void nv_ck804_freeze(struct ata_port *ap);
314static void nv_ck804_thaw(struct ata_port *ap);
fbbb262d 315static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 316static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
317static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
318static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
319static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
320static void nv_adma_irq_clear(struct ata_port *ap);
321static int nv_adma_port_start(struct ata_port *ap);
322static void nv_adma_port_stop(struct ata_port *ap);
438ac6d5 323#ifdef CONFIG_PM
cdf56bcf
RH
324static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
325static int nv_adma_port_resume(struct ata_port *ap);
438ac6d5 326#endif
53014e25
RH
327static void nv_adma_freeze(struct ata_port *ap);
328static void nv_adma_thaw(struct ata_port *ap);
fbbb262d
RH
329static void nv_adma_error_handler(struct ata_port *ap);
330static void nv_adma_host_stop(struct ata_host *host);
f5ecac2d 331static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
f2fb344b 332static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
39f87582 333
f140f0f1
KL
334static void nv_mcp55_thaw(struct ata_port *ap);
335static void nv_mcp55_freeze(struct ata_port *ap);
336static void nv_swncq_error_handler(struct ata_port *ap);
337static int nv_swncq_slave_config(struct scsi_device *sdev);
338static int nv_swncq_port_start(struct ata_port *ap);
339static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
340static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
341static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
342static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
343static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344#ifdef CONFIG_PM
345static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
346static int nv_swncq_port_resume(struct ata_port *ap);
347#endif
348
1da177e4
LT
349enum nv_host_type
350{
351 GENERIC,
352 NFORCE2,
27e4b274 353 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d 354 CK804,
f140f0f1 355 ADMA,
2d775708 356 MCP5x,
f140f0f1 357 SWNCQ,
1da177e4
LT
358};
359
3b7d697d 360static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
2d775708
TH
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
e2e031eb
KL
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
374 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
2d2744fc
JG
375
376 { } /* terminate list */
1da177e4
LT
377};
378
1da177e4
LT
379static struct pci_driver nv_pci_driver = {
380 .name = DRV_NAME,
381 .id_table = nv_pci_tbl,
382 .probe = nv_init_one,
438ac6d5 383#ifdef CONFIG_PM
cdf56bcf
RH
384 .suspend = ata_pci_device_suspend,
385 .resume = nv_pci_device_resume,
438ac6d5 386#endif
1daf9ce7 387 .remove = ata_pci_remove_one,
1da177e4
LT
388};
389
193515d5 390static struct scsi_host_template nv_sht = {
68d1d07b 391 ATA_BMDMA_SHT(DRV_NAME),
1da177e4
LT
392};
393
fbbb262d 394static struct scsi_host_template nv_adma_sht = {
68d1d07b 395 ATA_NCQ_SHT(DRV_NAME),
fbbb262d 396 .can_queue = NV_ADMA_MAX_CPBS,
fbbb262d 397 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
398 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
399 .slave_configure = nv_adma_slave_config,
fbbb262d
RH
400};
401
f140f0f1 402static struct scsi_host_template nv_swncq_sht = {
68d1d07b 403 ATA_NCQ_SHT(DRV_NAME),
f140f0f1 404 .can_queue = ATA_MAX_QUEUE,
f140f0f1 405 .sg_tablesize = LIBATA_MAX_PRD,
f140f0f1
KL
406 .dma_boundary = ATA_DMA_BOUNDARY,
407 .slave_configure = nv_swncq_slave_config,
f140f0f1
KL
408};
409
7f4774b3
TH
410/*
411 * NV SATA controllers have various different problems with hardreset
412 * protocol depending on the specific controller and device.
413 *
414 * GENERIC:
415 *
416 * bko11195 reports that link doesn't come online after hardreset on
417 * generic nv's and there have been several other similar reports on
418 * linux-ide.
419 *
420 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
421 * softreset.
422 *
423 * NF2/3:
424 *
425 * bko3352 reports nf2/3 controllers can't determine device signature
426 * reliably after hardreset. The following thread reports detection
427 * failure on cold boot with the standard debouncing timing.
428 *
429 * http://thread.gmane.org/gmane.linux.ide/34098
430 *
431 * bko12176 reports that hardreset fails to bring up the link during
432 * boot on nf2.
433 *
434 * CK804:
435 *
436 * For initial probing after boot and hot plugging, hardreset mostly
437 * works fine on CK804 but curiously, reprobing on the initial port
438 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
439 * FIS in somewhat undeterministic way.
440 *
441 * SWNCQ:
442 *
443 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
444 * hardreset should be used and hardreset can't report proper
445 * signature, which suggests that mcp5x is closer to nf2 as long as
446 * reset quirkiness is concerned.
447 *
448 * bko12703 reports that boot probing fails for intel SSD with
449 * hardreset. Link fails to come online. Softreset works fine.
450 *
451 * The failures are varied but the following patterns seem true for
452 * all flavors.
453 *
454 * - Softreset during boot always works.
455 *
456 * - Hardreset during boot sometimes fails to bring up the link on
457 * certain comibnations and device signature acquisition is
458 * unreliable.
459 *
460 * - Hardreset is often necessary after hotplug.
461 *
462 * So, preferring softreset for boot probing and error handling (as
463 * hardreset might bring down the link) but using hardreset for
464 * post-boot probing should work around the above issues in most
465 * cases. Define nv_hardreset() which only kicks in for post-boot
466 * probing and use it for all variants.
467 */
468static struct ata_port_operations nv_generic_ops = {
029cfd6b 469 .inherits = &ata_bmdma_port_ops,
c96f1732 470 .lost_interrupt = ATA_OP_NULL,
1da177e4
LT
471 .scr_read = nv_scr_read,
472 .scr_write = nv_scr_write,
7f4774b3 473 .hardreset = nv_hardreset,
1da177e4
LT
474};
475
029cfd6b 476static struct ata_port_operations nv_nf2_ops = {
7dac745b 477 .inherits = &nv_generic_ops,
39f87582
TH
478 .freeze = nv_nf2_freeze,
479 .thaw = nv_nf2_thaw,
ada364e8
TH
480};
481
029cfd6b 482static struct ata_port_operations nv_ck804_ops = {
7f4774b3 483 .inherits = &nv_generic_ops,
39f87582
TH
484 .freeze = nv_ck804_freeze,
485 .thaw = nv_ck804_thaw,
ada364e8
TH
486 .host_stop = nv_ck804_host_stop,
487};
488
029cfd6b 489static struct ata_port_operations nv_adma_ops = {
3c324283 490 .inherits = &nv_ck804_ops,
029cfd6b 491
2dec7555 492 .check_atapi_dma = nv_adma_check_atapi_dma,
5682ed33 493 .sff_tf_read = nv_adma_tf_read,
31cc23b3 494 .qc_defer = ata_std_qc_defer,
fbbb262d
RH
495 .qc_prep = nv_adma_qc_prep,
496 .qc_issue = nv_adma_qc_issue,
5682ed33 497 .sff_irq_clear = nv_adma_irq_clear,
029cfd6b 498
53014e25
RH
499 .freeze = nv_adma_freeze,
500 .thaw = nv_adma_thaw,
fbbb262d 501 .error_handler = nv_adma_error_handler,
f5ecac2d 502 .post_internal_cmd = nv_adma_post_internal_cmd,
029cfd6b 503
fbbb262d
RH
504 .port_start = nv_adma_port_start,
505 .port_stop = nv_adma_port_stop,
438ac6d5 506#ifdef CONFIG_PM
cdf56bcf
RH
507 .port_suspend = nv_adma_port_suspend,
508 .port_resume = nv_adma_port_resume,
438ac6d5 509#endif
fbbb262d
RH
510 .host_stop = nv_adma_host_stop,
511};
512
029cfd6b 513static struct ata_port_operations nv_swncq_ops = {
7f4774b3 514 .inherits = &nv_generic_ops,
029cfd6b 515
f140f0f1
KL
516 .qc_defer = ata_std_qc_defer,
517 .qc_prep = nv_swncq_qc_prep,
518 .qc_issue = nv_swncq_qc_issue,
029cfd6b 519
f140f0f1
KL
520 .freeze = nv_mcp55_freeze,
521 .thaw = nv_mcp55_thaw,
522 .error_handler = nv_swncq_error_handler,
029cfd6b 523
f140f0f1
KL
524#ifdef CONFIG_PM
525 .port_suspend = nv_swncq_port_suspend,
526 .port_resume = nv_swncq_port_resume,
527#endif
528 .port_start = nv_swncq_port_start,
529};
530
95947193
TH
531struct nv_pi_priv {
532 irq_handler_t irq_handler;
533 struct scsi_host_template *sht;
534};
535
536#define NV_PI_PRIV(_irq_handler, _sht) \
537 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538
1626aeb8 539static const struct ata_port_info nv_port_info[] = {
ada364e8
TH
540 /* generic */
541 {
0c88758b 542 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
543 .pio_mask = NV_PIO_MASK,
544 .mwdma_mask = NV_MWDMA_MASK,
545 .udma_mask = NV_UDMA_MASK,
546 .port_ops = &nv_generic_ops,
95947193 547 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
ada364e8
TH
548 },
549 /* nforce2/3 */
550 {
0c88758b 551 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
552 .pio_mask = NV_PIO_MASK,
553 .mwdma_mask = NV_MWDMA_MASK,
554 .udma_mask = NV_UDMA_MASK,
555 .port_ops = &nv_nf2_ops,
95947193 556 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
ada364e8
TH
557 },
558 /* ck804 */
559 {
0c88758b 560 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
561 .pio_mask = NV_PIO_MASK,
562 .mwdma_mask = NV_MWDMA_MASK,
563 .udma_mask = NV_UDMA_MASK,
564 .port_ops = &nv_ck804_ops,
95947193 565 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
ada364e8 566 },
fbbb262d
RH
567 /* ADMA */
568 {
fbbb262d
RH
569 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
570 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_adma_ops,
95947193 575 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
fbbb262d 576 },
2d775708
TH
577 /* MCP5x */
578 {
579 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
580 .pio_mask = NV_PIO_MASK,
581 .mwdma_mask = NV_MWDMA_MASK,
582 .udma_mask = NV_UDMA_MASK,
7f4774b3 583 .port_ops = &nv_generic_ops,
2d775708
TH
584 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
585 },
f140f0f1
KL
586 /* SWNCQ */
587 {
f140f0f1
KL
588 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
589 ATA_FLAG_NCQ,
f140f0f1
KL
590 .pio_mask = NV_PIO_MASK,
591 .mwdma_mask = NV_MWDMA_MASK,
592 .udma_mask = NV_UDMA_MASK,
593 .port_ops = &nv_swncq_ops,
95947193 594 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
f140f0f1 595 },
1da177e4
LT
596};
597
598MODULE_AUTHOR("NVIDIA");
599MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
600MODULE_LICENSE("GPL");
601MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
602MODULE_VERSION(DRV_VERSION);
603
06993d22 604static int adma_enabled;
d21279f4 605static int swncq_enabled = 1;
51c89499 606static int msi_enabled;
fbbb262d 607
2dec7555
RH
608static void nv_adma_register_mode(struct ata_port *ap)
609{
2dec7555 610 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 611 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
612 u16 tmp, status;
613 int count = 0;
2dec7555
RH
614
615 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
616 return;
617
a2cfe81a 618 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 619 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
a2cfe81a
RH
620 ndelay(50);
621 status = readw(mmio + NV_ADMA_STAT);
622 count++;
623 }
2dcb407e 624 if (count == 20)
a2cfe81a
RH
625 ata_port_printk(ap, KERN_WARNING,
626 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
627 status);
628
2dec7555
RH
629 tmp = readw(mmio + NV_ADMA_CTL);
630 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
631
a2cfe81a
RH
632 count = 0;
633 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 634 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
a2cfe81a
RH
635 ndelay(50);
636 status = readw(mmio + NV_ADMA_STAT);
637 count++;
638 }
2dcb407e 639 if (count == 20)
a2cfe81a
RH
640 ata_port_printk(ap, KERN_WARNING,
641 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
642 status);
643
2dec7555
RH
644 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
645}
646
647static void nv_adma_mode(struct ata_port *ap)
648{
2dec7555 649 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 650 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
651 u16 tmp, status;
652 int count = 0;
2dec7555
RH
653
654 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
655 return;
f20b16ff 656
2dec7555
RH
657 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
658
659 tmp = readw(mmio + NV_ADMA_CTL);
660 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
661
a2cfe81a 662 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 663 while (((status & NV_ADMA_STAT_LEGACY) ||
a2cfe81a
RH
664 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
665 ndelay(50);
666 status = readw(mmio + NV_ADMA_STAT);
667 count++;
668 }
2dcb407e 669 if (count == 20)
a2cfe81a
RH
670 ata_port_printk(ap, KERN_WARNING,
671 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
672 status);
673
2dec7555
RH
674 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
675}
676
fbbb262d
RH
677static int nv_adma_slave_config(struct scsi_device *sdev)
678{
679 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555 680 struct nv_adma_port_priv *pp = ap->private_data;
8959d300
RH
681 struct nv_adma_port_priv *port0, *port1;
682 struct scsi_device *sdev0, *sdev1;
2dec7555 683 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
8959d300 684 unsigned long segment_boundary, flags;
fbbb262d
RH
685 unsigned short sg_tablesize;
686 int rc;
2dec7555
RH
687 int adma_enable;
688 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
689
690 rc = ata_scsi_slave_config(sdev);
691
692 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
693 /* Not a proper libata device, ignore */
694 return rc;
695
8959d300
RH
696 spin_lock_irqsave(ap->lock, flags);
697
9af5c9c9 698 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
fbbb262d
RH
699 /*
700 * NVIDIA reports that ADMA mode does not support ATAPI commands.
701 * Therefore ATAPI commands are sent through the legacy interface.
702 * However, the legacy interface only supports 32-bit DMA.
703 * Restrict DMA parameters as required by the legacy interface
704 * when an ATAPI device is connected.
705 */
fbbb262d
RH
706 segment_boundary = ATA_DMA_BOUNDARY;
707 /* Subtract 1 since an extra entry may be needed for padding, see
708 libata-scsi.c */
709 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 710
2dec7555
RH
711 /* Since the legacy DMA engine is in use, we need to disable ADMA
712 on the port. */
713 adma_enable = 0;
714 nv_adma_register_mode(ap);
2dcb407e 715 } else {
fbbb262d
RH
716 segment_boundary = NV_ADMA_DMA_BOUNDARY;
717 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 718 adma_enable = 1;
fbbb262d 719 }
f20b16ff 720
2dec7555
RH
721 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
722
2dcb407e 723 if (ap->port_no == 1)
2dec7555
RH
724 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
725 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
726 else
727 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
728 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 729
2dcb407e 730 if (adma_enable) {
2dec7555
RH
731 new_reg = current_reg | config_mask;
732 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
2dcb407e 733 } else {
2dec7555
RH
734 new_reg = current_reg & ~config_mask;
735 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
736 }
f20b16ff 737
2dcb407e 738 if (current_reg != new_reg)
2dec7555 739 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 740
8959d300
RH
741 port0 = ap->host->ports[0]->private_data;
742 port1 = ap->host->ports[1]->private_data;
743 sdev0 = ap->host->ports[0]->link.device[0].sdev;
744 sdev1 = ap->host->ports[1]->link.device[0].sdev;
745 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
746 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
747 /** We have to set the DMA mask to 32-bit if either port is in
748 ATAPI mode, since they are on the same PCI device which is
749 used for DMA mapping. If we set the mask we also need to set
750 the bounce limit on both ports to ensure that the block
751 layer doesn't feed addresses that cause DMA mapping to
752 choke. If either SCSI device is not allocated yet, it's OK
753 since that port will discover its correct setting when it
754 does get allocated.
755 Note: Setting 32-bit mask should not fail. */
756 if (sdev0)
757 blk_queue_bounce_limit(sdev0->request_queue,
758 ATA_DMA_MASK);
759 if (sdev1)
760 blk_queue_bounce_limit(sdev1->request_queue,
761 ATA_DMA_MASK);
762
763 pci_set_dma_mask(pdev, ATA_DMA_MASK);
764 } else {
765 /** This shouldn't fail as it was set to this value before */
766 pci_set_dma_mask(pdev, pp->adma_dma_mask);
767 if (sdev0)
768 blk_queue_bounce_limit(sdev0->request_queue,
769 pp->adma_dma_mask);
770 if (sdev1)
771 blk_queue_bounce_limit(sdev1->request_queue,
772 pp->adma_dma_mask);
773 }
774
fbbb262d 775 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
8a78362c 776 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
fbbb262d 777 ata_port_printk(ap, KERN_INFO,
8959d300
RH
778 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
779 (unsigned long long)*ap->host->dev->dma_mask,
780 segment_boundary, sg_tablesize);
781
782 spin_unlock_irqrestore(ap->lock, flags);
783
fbbb262d
RH
784 return rc;
785}
786
2dec7555
RH
787static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
788{
789 struct nv_adma_port_priv *pp = qc->ap->private_data;
790 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
791}
792
f2fb344b
RH
793static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
794{
3f3debdb
RH
795 /* Other than when internal or pass-through commands are executed,
796 the only time this function will be called in ADMA mode will be
797 if a command fails. In the failure case we don't care about going
798 into register mode with ADMA commands pending, as the commands will
799 all shortly be aborted anyway. We assume that NCQ commands are not
800 issued via passthrough, which is the only way that switching into
801 ADMA mode could abort outstanding commands. */
f2fb344b
RH
802 nv_adma_register_mode(ap);
803
9363c382 804 ata_sff_tf_read(ap, tf);
f2fb344b
RH
805}
806
2dec7555 807static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
808{
809 unsigned int idx = 0;
810
2dcb407e 811 if (tf->flags & ATA_TFLAG_ISADDR) {
ac3d6b86
RH
812 if (tf->flags & ATA_TFLAG_LBA48) {
813 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
814 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
815 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
816 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
817 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
818 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
819 } else
820 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
a84471fe 821
ac3d6b86
RH
822 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
823 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
824 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
825 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
fbbb262d 826 }
a84471fe 827
2dcb407e 828 if (tf->flags & ATA_TFLAG_DEVICE)
ac3d6b86 829 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
fbbb262d
RH
830
831 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
a84471fe 832
2dcb407e 833 while (idx < 12)
ac3d6b86 834 cpb[idx++] = cpu_to_le16(IGN);
fbbb262d
RH
835
836 return idx;
837}
838
5bd28a4b 839static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
840{
841 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 842 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
843
844 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
845
5bd28a4b
RH
846 if (unlikely((force_err ||
847 flags & (NV_CPB_RESP_ATA_ERR |
848 NV_CPB_RESP_CMD_ERR |
849 NV_CPB_RESP_CPB_ERR)))) {
9af5c9c9 850 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
851 int freeze = 0;
852
853 ata_ehi_clear_desc(ehi);
2dcb407e 854 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
5bd28a4b 855 if (flags & NV_CPB_RESP_ATA_ERR) {
b64bbc39 856 ata_ehi_push_desc(ehi, "ATA error");
5bd28a4b
RH
857 ehi->err_mask |= AC_ERR_DEV;
858 } else if (flags & NV_CPB_RESP_CMD_ERR) {
b64bbc39 859 ata_ehi_push_desc(ehi, "CMD error");
5bd28a4b
RH
860 ehi->err_mask |= AC_ERR_DEV;
861 } else if (flags & NV_CPB_RESP_CPB_ERR) {
b64bbc39 862 ata_ehi_push_desc(ehi, "CPB error");
5bd28a4b
RH
863 ehi->err_mask |= AC_ERR_SYSTEM;
864 freeze = 1;
865 } else {
866 /* notifier error, but no error in CPB flags? */
b64bbc39 867 ata_ehi_push_desc(ehi, "unknown");
5bd28a4b
RH
868 ehi->err_mask |= AC_ERR_OTHER;
869 freeze = 1;
870 }
871 /* Kill all commands. EH will determine what actually failed. */
872 if (freeze)
873 ata_port_freeze(ap);
874 else
875 ata_port_abort(ap);
1aadf5c3 876 return -1;
fbbb262d 877 }
5bd28a4b 878
1aadf5c3
TH
879 if (likely(flags & NV_CPB_RESP_DONE))
880 return 1;
5bd28a4b 881 return 0;
fbbb262d
RH
882}
883
2dec7555
RH
884static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
885{
9af5c9c9 886 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2dec7555
RH
887
888 /* freeze if hotplugged */
889 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
890 ata_port_freeze(ap);
891 return 1;
892 }
893
894 /* bail out if not our interrupt */
895 if (!(irq_stat & NV_INT_DEV))
896 return 0;
897
898 /* DEV interrupt w/ no active qc? */
899 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
9363c382 900 ata_sff_check_status(ap);
2dec7555
RH
901 return 1;
902 }
903
904 /* handle interrupt */
c3b28894 905 return ata_bmdma_port_intr(ap, qc);
2dec7555
RH
906}
907
fbbb262d
RH
908static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
909{
910 struct ata_host *host = dev_instance;
911 int i, handled = 0;
2dec7555 912 u32 notifier_clears[2];
fbbb262d
RH
913
914 spin_lock(&host->lock);
915
916 for (i = 0; i < host->n_ports; i++) {
917 struct ata_port *ap = host->ports[i];
3e4ec344
TH
918 struct nv_adma_port_priv *pp = ap->private_data;
919 void __iomem *mmio = pp->ctl_block;
920 u16 status;
921 u32 gen_ctl;
922 u32 notifier, notifier_error;
923
2dec7555 924 notifier_clears[i] = 0;
fbbb262d 925
3e4ec344
TH
926 /* if ADMA is disabled, use standard ata interrupt handler */
927 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
928 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
929 >> (NV_INT_PORT_SHIFT * i);
930 handled += nv_host_intr(ap, irq_stat);
931 continue;
932 }
fbbb262d 933
3e4ec344
TH
934 /* if in ATA register mode, check for standard interrupts */
935 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
936 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
937 >> (NV_INT_PORT_SHIFT * i);
938 if (ata_tag_valid(ap->link.active_tag))
939 /** NV_INT_DEV indication seems unreliable
940 at times at least in ADMA mode. Force it
941 on always when a command is active, to
942 prevent losing interrupts. */
943 irq_stat |= NV_INT_DEV;
944 handled += nv_host_intr(ap, irq_stat);
945 }
946
947 notifier = readl(mmio + NV_ADMA_NOTIFIER);
948 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
949 notifier_clears[i] = notifier | notifier_error;
950
951 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
952
953 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
954 !notifier_error)
955 /* Nothing to do */
956 continue;
957
958 status = readw(mmio + NV_ADMA_STAT);
959
960 /*
961 * Clear status. Ensure the controller sees the
962 * clearing before we start looking at any of the CPB
963 * statuses, so that any CPB completions after this
964 * point in the handler will raise another interrupt.
965 */
966 writew(status, mmio + NV_ADMA_STAT);
967 readw(mmio + NV_ADMA_STAT); /* flush posted write */
968 rmb();
fbbb262d 969
3e4ec344
TH
970 handled++; /* irq handled if we got here */
971
972 /* freeze if hotplugged or controller error */
973 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
974 NV_ADMA_STAT_HOTUNPLUG |
975 NV_ADMA_STAT_TIMEOUT |
976 NV_ADMA_STAT_SERROR))) {
977 struct ata_eh_info *ehi = &ap->link.eh_info;
978
979 ata_ehi_clear_desc(ehi);
980 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
981 if (status & NV_ADMA_STAT_TIMEOUT) {
982 ehi->err_mask |= AC_ERR_SYSTEM;
983 ata_ehi_push_desc(ehi, "timeout");
984 } else if (status & NV_ADMA_STAT_HOTPLUG) {
985 ata_ehi_hotplugged(ehi);
986 ata_ehi_push_desc(ehi, "hotplug");
987 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
988 ata_ehi_hotplugged(ehi);
989 ata_ehi_push_desc(ehi, "hot unplug");
990 } else if (status & NV_ADMA_STAT_SERROR) {
991 /* let EH analyze SError and figure out cause */
992 ata_ehi_push_desc(ehi, "SError");
993 } else
994 ata_ehi_push_desc(ehi, "unknown");
995 ata_port_freeze(ap);
996 continue;
997 }
998
999 if (status & (NV_ADMA_STAT_DONE |
1000 NV_ADMA_STAT_CPBERR |
1001 NV_ADMA_STAT_CMD_COMPLETE)) {
1002 u32 check_commands = notifier_clears[i];
1aadf5c3 1003 u32 done_mask = 0;
752e386c 1004 int pos, rc;
3e4ec344
TH
1005
1006 if (status & NV_ADMA_STAT_CPBERR) {
1007 /* check all active commands */
1008 if (ata_tag_valid(ap->link.active_tag))
1009 check_commands = 1 <<
1010 ap->link.active_tag;
1011 else
1012 check_commands = ap->link.sactive;
fbbb262d
RH
1013 }
1014
3e4ec344 1015 /* check CPBs for completed commands */
752e386c 1016 while ((pos = ffs(check_commands))) {
3e4ec344 1017 pos--;
752e386c 1018 rc = nv_adma_check_cpb(ap, pos,
5796d1c4 1019 notifier_error & (1 << pos));
1aadf5c3
TH
1020 if (rc > 0)
1021 done_mask |= 1 << pos;
1022 else if (unlikely(rc < 0))
752e386c 1023 check_commands = 0;
3e4ec344 1024 check_commands &= ~(1 << pos);
fbbb262d 1025 }
1aadf5c3 1026 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
fbbb262d
RH
1027 }
1028 }
f20b16ff 1029
b447916e 1030 if (notifier_clears[0] || notifier_clears[1]) {
2dec7555
RH
1031 /* Note: Both notifier clear registers must be written
1032 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
1033 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1034 writel(notifier_clears[0], pp->notifier_clear_block);
1035 pp = host->ports[1]->private_data;
1036 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 1037 }
fbbb262d
RH
1038
1039 spin_unlock(&host->lock);
1040
1041 return IRQ_RETVAL(handled);
1042}
1043
53014e25
RH
1044static void nv_adma_freeze(struct ata_port *ap)
1045{
1046 struct nv_adma_port_priv *pp = ap->private_data;
1047 void __iomem *mmio = pp->ctl_block;
1048 u16 tmp;
1049
1050 nv_ck804_freeze(ap);
1051
1052 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1053 return;
1054
1055 /* clear any outstanding CK804 notifications */
2dcb407e 1056 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25
RH
1057 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1058
1059 /* Disable interrupt */
1060 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1061 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1062 mmio + NV_ADMA_CTL);
5796d1c4 1063 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1064}
1065
1066static void nv_adma_thaw(struct ata_port *ap)
1067{
1068 struct nv_adma_port_priv *pp = ap->private_data;
1069 void __iomem *mmio = pp->ctl_block;
1070 u16 tmp;
1071
1072 nv_ck804_thaw(ap);
1073
1074 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1075 return;
1076
1077 /* Enable interrupt */
1078 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1079 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1080 mmio + NV_ADMA_CTL);
5796d1c4 1081 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1082}
1083
fbbb262d
RH
1084static void nv_adma_irq_clear(struct ata_port *ap)
1085{
cdf56bcf
RH
1086 struct nv_adma_port_priv *pp = ap->private_data;
1087 void __iomem *mmio = pp->ctl_block;
53014e25 1088 u32 notifier_clears[2];
fbbb262d 1089
53014e25 1090 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
37f65b8b 1091 ata_bmdma_irq_clear(ap);
53014e25
RH
1092 return;
1093 }
1094
1095 /* clear any outstanding CK804 notifications */
2dcb407e 1096 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25 1097 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
fbbb262d 1098
53014e25
RH
1099 /* clear ADMA status */
1100 writew(0xffff, mmio + NV_ADMA_STAT);
a617c09f 1101
53014e25
RH
1102 /* clear notifiers - note both ports need to be written with
1103 something even though we are only clearing on one */
1104 if (ap->port_no == 0) {
1105 notifier_clears[0] = 0xFFFFFFFF;
1106 notifier_clears[1] = 0;
1107 } else {
1108 notifier_clears[0] = 0;
1109 notifier_clears[1] = 0xFFFFFFFF;
1110 }
1111 pp = ap->host->ports[0]->private_data;
1112 writel(notifier_clears[0], pp->notifier_clear_block);
1113 pp = ap->host->ports[1]->private_data;
1114 writel(notifier_clears[1], pp->notifier_clear_block);
fbbb262d
RH
1115}
1116
f5ecac2d 1117static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
fbbb262d 1118{
f5ecac2d 1119 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d 1120
b447916e 1121 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
fe06e5f9 1122 ata_bmdma_post_internal_cmd(qc);
fbbb262d
RH
1123}
1124
1125static int nv_adma_port_start(struct ata_port *ap)
1126{
1127 struct device *dev = ap->host->dev;
1128 struct nv_adma_port_priv *pp;
1129 int rc;
1130 void *mem;
1131 dma_addr_t mem_dma;
cdf56bcf 1132 void __iomem *mmio;
8959d300 1133 struct pci_dev *pdev = to_pci_dev(dev);
fbbb262d
RH
1134 u16 tmp;
1135
1136 VPRINTK("ENTER\n");
1137
8959d300
RH
1138 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1139 pad buffers */
1140 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1141 if (rc)
1142 return rc;
1143 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1144 if (rc)
1145 return rc;
1146
c7087652
TH
1147 /* we might fallback to bmdma, allocate bmdma resources */
1148 rc = ata_bmdma_port_start(ap);
fbbb262d
RH
1149 if (rc)
1150 return rc;
1151
24dc5f33
TH
1152 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1153 if (!pp)
1154 return -ENOMEM;
fbbb262d 1155
0d5ff566 1156 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
1157 ap->port_no * NV_ADMA_PORT_SIZE;
1158 pp->ctl_block = mmio;
0d5ff566 1159 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
1160 pp->notifier_clear_block = pp->gen_block +
1161 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1162
8959d300
RH
1163 /* Now that the legacy PRD and padding buffer are allocated we can
1164 safely raise the DMA mask to allocate the CPB/APRD table.
1165 These are allowed to fail since we store the value that ends up
1166 being used to set as the bounce limit in slave_config later if
1167 needed. */
1168 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1169 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1170 pp->adma_dma_mask = *dev->dma_mask;
1171
24dc5f33
TH
1172 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1173 &mem_dma, GFP_KERNEL);
1174 if (!mem)
1175 return -ENOMEM;
fbbb262d
RH
1176 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1177
1178 /*
1179 * First item in chunk of DMA memory:
1180 * 128-byte command parameter block (CPB)
1181 * one for each command tag
1182 */
1183 pp->cpb = mem;
1184 pp->cpb_dma = mem_dma;
1185
1186 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1187 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
fbbb262d
RH
1188
1189 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1190 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1191
1192 /*
1193 * Second item: block of ADMA_SGTBL_LEN s/g entries
1194 */
1195 pp->aprd = mem;
1196 pp->aprd_dma = mem_dma;
1197
1198 ap->private_data = pp;
1199
1200 /* clear any outstanding interrupt conditions */
1201 writew(0xffff, mmio + NV_ADMA_STAT);
1202
1203 /* initialize port variables */
1204 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1205
1206 /* clear CPB fetch count */
1207 writew(0, mmio + NV_ADMA_CPB_COUNT);
1208
cdf56bcf 1209 /* clear GO for register mode, enable interrupt */
fbbb262d 1210 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1211 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1212 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1213
1214 tmp = readw(mmio + NV_ADMA_CTL);
1215 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1216 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1217 udelay(1);
1218 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1219 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1220
1221 return 0;
fbbb262d
RH
1222}
1223
1224static void nv_adma_port_stop(struct ata_port *ap)
1225{
fbbb262d 1226 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1227 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1228
1229 VPRINTK("ENTER\n");
fbbb262d 1230 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1231}
1232
438ac6d5 1233#ifdef CONFIG_PM
cdf56bcf
RH
1234static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1235{
1236 struct nv_adma_port_priv *pp = ap->private_data;
1237 void __iomem *mmio = pp->ctl_block;
1238
1239 /* Go to register mode - clears GO */
1240 nv_adma_register_mode(ap);
1241
1242 /* clear CPB fetch count */
1243 writew(0, mmio + NV_ADMA_CPB_COUNT);
1244
1245 /* disable interrupt, shut down port */
1246 writew(0, mmio + NV_ADMA_CTL);
1247
1248 return 0;
1249}
1250
1251static int nv_adma_port_resume(struct ata_port *ap)
1252{
1253 struct nv_adma_port_priv *pp = ap->private_data;
1254 void __iomem *mmio = pp->ctl_block;
1255 u16 tmp;
1256
1257 /* set CPB block location */
1258 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1259 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
cdf56bcf
RH
1260
1261 /* clear any outstanding interrupt conditions */
1262 writew(0xffff, mmio + NV_ADMA_STAT);
1263
1264 /* initialize port variables */
1265 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1266
1267 /* clear CPB fetch count */
1268 writew(0, mmio + NV_ADMA_CPB_COUNT);
1269
1270 /* clear GO for register mode, enable interrupt */
1271 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1272 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1273 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
cdf56bcf
RH
1274
1275 tmp = readw(mmio + NV_ADMA_CTL);
1276 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1277 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1278 udelay(1);
1279 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1280 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1281
1282 return 0;
1283}
438ac6d5 1284#endif
fbbb262d 1285
9a829ccf 1286static void nv_adma_setup_port(struct ata_port *ap)
fbbb262d 1287{
9a829ccf
TH
1288 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1289 struct ata_ioports *ioport = &ap->ioaddr;
fbbb262d
RH
1290
1291 VPRINTK("ENTER\n");
1292
9a829ccf 1293 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
fbbb262d 1294
0d5ff566
TH
1295 ioport->cmd_addr = mmio;
1296 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1297 ioport->error_addr =
0d5ff566
TH
1298 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1299 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1300 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1301 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1302 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1303 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1304 ioport->status_addr =
0d5ff566 1305 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1306 ioport->altstatus_addr =
0d5ff566 1307 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1308}
1309
9a829ccf 1310static int nv_adma_host_init(struct ata_host *host)
fbbb262d 1311{
9a829ccf 1312 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1313 unsigned int i;
1314 u32 tmp32;
1315
1316 VPRINTK("ENTER\n");
1317
1318 /* enable ADMA on the ports */
1319 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1320 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1321 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1322 NV_MCP_SATA_CFG_20_PORT1_EN |
1323 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1324
1325 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1326
9a829ccf
TH
1327 for (i = 0; i < host->n_ports; i++)
1328 nv_adma_setup_port(host->ports[i]);
fbbb262d 1329
fbbb262d
RH
1330 return 0;
1331}
1332
1333static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1334 struct scatterlist *sg,
1335 int idx,
1336 struct nv_adma_prd *aprd)
1337{
41949ed5 1338 u8 flags = 0;
fbbb262d
RH
1339 if (qc->tf.flags & ATA_TFLAG_WRITE)
1340 flags |= NV_APRD_WRITE;
1341 if (idx == qc->n_elem - 1)
1342 flags |= NV_APRD_END;
1343 else if (idx != 4)
1344 flags |= NV_APRD_CONT;
1345
1346 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1347 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1348 aprd->flags = flags;
41949ed5 1349 aprd->packet_len = 0;
fbbb262d
RH
1350}
1351
1352static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1353{
1354 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1355 struct nv_adma_prd *aprd;
1356 struct scatterlist *sg;
ff2aeb1e 1357 unsigned int si;
fbbb262d
RH
1358
1359 VPRINTK("ENTER\n");
1360
ff2aeb1e
TH
1361 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1362 aprd = (si < 5) ? &cpb->aprd[si] :
1363 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1364 nv_adma_fill_aprd(qc, sg, si, aprd);
fbbb262d 1365 }
ff2aeb1e 1366 if (si > 5)
fbbb262d 1367 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
41949ed5
RH
1368 else
1369 cpb->next_aprd = cpu_to_le64(0);
fbbb262d
RH
1370}
1371
382a6652
RH
1372static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1373{
1374 struct nv_adma_port_priv *pp = qc->ap->private_data;
1375
1376 /* ADMA engine can only be used for non-ATAPI DMA commands,
3f3debdb 1377 or interrupt-driven no-data commands. */
b447916e 1378 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
3f3debdb 1379 (qc->tf.flags & ATA_TFLAG_POLLING))
382a6652
RH
1380 return 1;
1381
b447916e 1382 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
382a6652
RH
1383 (qc->tf.protocol == ATA_PROT_NODATA))
1384 return 0;
1385
1386 return 1;
1387}
1388
fbbb262d
RH
1389static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1390{
1391 struct nv_adma_port_priv *pp = qc->ap->private_data;
1392 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1393 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1394 NV_CPB_CTL_IEN;
1395
382a6652 1396 if (nv_adma_use_reg_mode(qc)) {
3f3debdb
RH
1397 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1398 (qc->flags & ATA_QCFLAG_DMAMAP));
2dec7555 1399 nv_adma_register_mode(qc->ap);
f47451c4 1400 ata_bmdma_qc_prep(qc);
fbbb262d
RH
1401 return;
1402 }
1403
41949ed5
RH
1404 cpb->resp_flags = NV_CPB_RESP_DONE;
1405 wmb();
1406 cpb->ctl_flags = 0;
1407 wmb();
fbbb262d
RH
1408
1409 cpb->len = 3;
1410 cpb->tag = qc->tag;
1411 cpb->next_cpb_idx = 0;
1412
1413 /* turn on NCQ flags for NCQ commands */
1414 if (qc->tf.protocol == ATA_PROT_NCQ)
1415 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1416
cdf56bcf
RH
1417 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1418
fbbb262d
RH
1419 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1420
b447916e 1421 if (qc->flags & ATA_QCFLAG_DMAMAP) {
382a6652
RH
1422 nv_adma_fill_sg(qc, cpb);
1423 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1424 } else
1425 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d 1426
5796d1c4
JG
1427 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1428 until we are finished filling in all of the contents */
fbbb262d
RH
1429 wmb();
1430 cpb->ctl_flags = ctl_flags;
41949ed5
RH
1431 wmb();
1432 cpb->resp_flags = 0;
fbbb262d
RH
1433}
1434
1435static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1436{
2dec7555 1437 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1438 void __iomem *mmio = pp->ctl_block;
5e5c74a5 1439 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
fbbb262d
RH
1440
1441 VPRINTK("ENTER\n");
1442
3f3debdb
RH
1443 /* We can't handle result taskfile with NCQ commands, since
1444 retrieving the taskfile switches us out of ADMA mode and would abort
1445 existing commands. */
1446 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1447 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1448 ata_dev_printk(qc->dev, KERN_ERR,
1449 "NCQ w/ RESULT_TF not allowed\n");
1450 return AC_ERR_SYSTEM;
1451 }
1452
382a6652 1453 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1454 /* use ATA register mode */
382a6652 1455 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
3f3debdb
RH
1456 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1457 (qc->flags & ATA_QCFLAG_DMAMAP));
fbbb262d 1458 nv_adma_register_mode(qc->ap);
360ff783 1459 return ata_bmdma_qc_issue(qc);
fbbb262d
RH
1460 } else
1461 nv_adma_mode(qc->ap);
1462
1463 /* write append register, command tag in lower 8 bits
1464 and (number of cpbs to append -1) in top 8 bits */
1465 wmb();
5e5c74a5 1466
b447916e 1467 if (curr_ncq != pp->last_issue_ncq) {
5796d1c4
JG
1468 /* Seems to need some delay before switching between NCQ and
1469 non-NCQ commands, else we get command timeouts and such. */
5e5c74a5
RH
1470 udelay(20);
1471 pp->last_issue_ncq = curr_ncq;
1472 }
1473
fbbb262d
RH
1474 writew(qc->tag, mmio + NV_ADMA_APPEND);
1475
5796d1c4 1476 DPRINTK("Issued tag %u\n", qc->tag);
fbbb262d
RH
1477
1478 return 0;
1479}
1480
7d12e780 1481static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1482{
cca3974e 1483 struct ata_host *host = dev_instance;
1da177e4
LT
1484 unsigned int i;
1485 unsigned int handled = 0;
1486 unsigned long flags;
1487
cca3974e 1488 spin_lock_irqsave(&host->lock, flags);
1da177e4 1489
cca3974e 1490 for (i = 0; i < host->n_ports; i++) {
3e4ec344
TH
1491 struct ata_port *ap = host->ports[i];
1492 struct ata_queued_cmd *qc;
1da177e4 1493
3e4ec344
TH
1494 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1495 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
c3b28894 1496 handled += ata_bmdma_port_intr(ap, qc);
3e4ec344
TH
1497 } else {
1498 /*
1499 * No request pending? Clear interrupt status
1500 * anyway, in case there's one pending.
1501 */
1502 ap->ops->sff_check_status(ap);
1503 }
1da177e4
LT
1504 }
1505
cca3974e 1506 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1507
1508 return IRQ_RETVAL(handled);
1509}
1510
cca3974e 1511static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1512{
1513 int i, handled = 0;
1514
cca3974e 1515 for (i = 0; i < host->n_ports; i++) {
3e4ec344 1516 handled += nv_host_intr(host->ports[i], irq_stat);
ada364e8
TH
1517 irq_stat >>= NV_INT_PORT_SHIFT;
1518 }
1519
1520 return IRQ_RETVAL(handled);
1521}
1522
7d12e780 1523static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1524{
cca3974e 1525 struct ata_host *host = dev_instance;
ada364e8
TH
1526 u8 irq_stat;
1527 irqreturn_t ret;
1528
cca3974e 1529 spin_lock(&host->lock);
0d5ff566 1530 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1531 ret = nv_do_interrupt(host, irq_stat);
1532 spin_unlock(&host->lock);
ada364e8
TH
1533
1534 return ret;
1535}
1536
7d12e780 1537static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1538{
cca3974e 1539 struct ata_host *host = dev_instance;
ada364e8
TH
1540 u8 irq_stat;
1541 irqreturn_t ret;
1542
cca3974e 1543 spin_lock(&host->lock);
0d5ff566 1544 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1545 ret = nv_do_interrupt(host, irq_stat);
1546 spin_unlock(&host->lock);
ada364e8
TH
1547
1548 return ret;
1549}
1550
82ef04fb 1551static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1da177e4 1552{
1da177e4 1553 if (sc_reg > SCR_CONTROL)
da3dbb17 1554 return -EINVAL;
1da177e4 1555
82ef04fb 1556 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1557 return 0;
1da177e4
LT
1558}
1559
82ef04fb 1560static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1da177e4 1561{
1da177e4 1562 if (sc_reg > SCR_CONTROL)
da3dbb17 1563 return -EINVAL;
1da177e4 1564
82ef04fb 1565 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1566 return 0;
1da177e4
LT
1567}
1568
7f4774b3
TH
1569static int nv_hardreset(struct ata_link *link, unsigned int *class,
1570 unsigned long deadline)
e8caa3c7 1571{
7f4774b3 1572 struct ata_eh_context *ehc = &link->eh_context;
e8caa3c7 1573
7f4774b3
TH
1574 /* Do hardreset iff it's post-boot probing, please read the
1575 * comment above port ops for details.
1576 */
1577 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1578 !ata_dev_enabled(link->device))
1579 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1580 NULL, NULL);
6489e326
TH
1581 else {
1582 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1583 int rc;
1584
1585 if (!(ehc->i.flags & ATA_EHI_QUIET))
1586 ata_link_printk(link, KERN_INFO, "nv: skipping "
1587 "hardreset on occupied port\n");
1588
1589 /* make sure the link is online */
1590 rc = sata_link_resume(link, timing, deadline);
1591 /* whine about phy resume failure but proceed */
1592 if (rc && rc != -EOPNOTSUPP)
1593 ata_link_printk(link, KERN_WARNING, "failed to resume "
1594 "link (errno=%d)\n", rc);
1595 }
7f4774b3
TH
1596
1597 /* device signature acquisition is unreliable */
1598 return -EAGAIN;
e8caa3c7
TH
1599}
1600
39f87582
TH
1601static void nv_nf2_freeze(struct ata_port *ap)
1602{
0d5ff566 1603 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1604 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1605 u8 mask;
1606
0d5ff566 1607 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1608 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1609 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1610}
1611
1612static void nv_nf2_thaw(struct ata_port *ap)
1613{
0d5ff566 1614 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1615 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1616 u8 mask;
1617
0d5ff566 1618 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1619
0d5ff566 1620 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1621 mask |= (NV_INT_MASK << shift);
0d5ff566 1622 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1623}
1624
1625static void nv_ck804_freeze(struct ata_port *ap)
1626{
0d5ff566 1627 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1628 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1629 u8 mask;
1630
1631 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1632 mask &= ~(NV_INT_ALL << shift);
1633 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1634}
1635
1636static void nv_ck804_thaw(struct ata_port *ap)
1637{
0d5ff566 1638 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1639 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1640 u8 mask;
1641
1642 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1643
1644 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1645 mask |= (NV_INT_MASK << shift);
1646 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1647}
1648
f140f0f1
KL
1649static void nv_mcp55_freeze(struct ata_port *ap)
1650{
1651 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1652 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1653 u32 mask;
1654
1655 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1656
1657 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1658 mask &= ~(NV_INT_ALL_MCP55 << shift);
1659 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
f140f0f1
KL
1660}
1661
1662static void nv_mcp55_thaw(struct ata_port *ap)
1663{
1664 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1665 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1666 u32 mask;
1667
1668 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1669
1670 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1671 mask |= (NV_INT_MASK_MCP55 << shift);
1672 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
f140f0f1
KL
1673}
1674
fbbb262d
RH
1675static void nv_adma_error_handler(struct ata_port *ap)
1676{
1677 struct nv_adma_port_priv *pp = ap->private_data;
b447916e 1678 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1679 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1680 int i;
1681 u16 tmp;
a84471fe 1682
b447916e 1683 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
2cb27853
RH
1684 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1685 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1686 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1687 u32 status = readw(mmio + NV_ADMA_STAT);
08af7414
RH
1688 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1689 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
2cb27853 1690
5796d1c4
JG
1691 ata_port_printk(ap, KERN_ERR,
1692 "EH in ADMA mode, notifier 0x%X "
08af7414
RH
1693 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1694 "next cpb count 0x%X next cpb idx 0x%x\n",
1695 notifier, notifier_error, gen_ctl, status,
1696 cpb_count, next_cpb_idx);
2cb27853 1697
b447916e 1698 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
2cb27853 1699 struct nv_adma_cpb *cpb = &pp->cpb[i];
b447916e 1700 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
5796d1c4 1701 ap->link.sactive & (1 << i))
2cb27853
RH
1702 ata_port_printk(ap, KERN_ERR,
1703 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1704 i, cpb->ctl_flags, cpb->resp_flags);
1705 }
1706 }
fbbb262d 1707
fbbb262d
RH
1708 /* Push us back into port register mode for error handling. */
1709 nv_adma_register_mode(ap);
1710
5796d1c4
JG
1711 /* Mark all of the CPBs as invalid to prevent them from
1712 being executed */
b447916e 1713 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
fbbb262d
RH
1714 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1715
1716 /* clear CPB fetch count */
1717 writew(0, mmio + NV_ADMA_CPB_COUNT);
1718
1719 /* Reset channel */
1720 tmp = readw(mmio + NV_ADMA_CTL);
1721 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1722 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1723 udelay(1);
1724 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1725 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1726 }
1727
fe06e5f9 1728 ata_bmdma_error_handler(ap);
fbbb262d
RH
1729}
1730
f140f0f1
KL
1731static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1732{
1733 struct nv_swncq_port_priv *pp = ap->private_data;
1734 struct defer_queue *dq = &pp->defer_queue;
1735
1736 /* queue is full */
1737 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1738 dq->defer_bits |= (1 << qc->tag);
1739 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1740}
1741
1742static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1743{
1744 struct nv_swncq_port_priv *pp = ap->private_data;
1745 struct defer_queue *dq = &pp->defer_queue;
1746 unsigned int tag;
1747
1748 if (dq->head == dq->tail) /* null queue */
1749 return NULL;
1750
1751 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1752 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1753 WARN_ON(!(dq->defer_bits & (1 << tag)));
1754 dq->defer_bits &= ~(1 << tag);
1755
1756 return ata_qc_from_tag(ap, tag);
1757}
1758
1759static void nv_swncq_fis_reinit(struct ata_port *ap)
1760{
1761 struct nv_swncq_port_priv *pp = ap->private_data;
1762
1763 pp->dhfis_bits = 0;
1764 pp->dmafis_bits = 0;
1765 pp->sdbfis_bits = 0;
1766 pp->ncq_flags = 0;
1767}
1768
1769static void nv_swncq_pp_reinit(struct ata_port *ap)
1770{
1771 struct nv_swncq_port_priv *pp = ap->private_data;
1772 struct defer_queue *dq = &pp->defer_queue;
1773
1774 dq->head = 0;
1775 dq->tail = 0;
1776 dq->defer_bits = 0;
1777 pp->qc_active = 0;
1778 pp->last_issue_tag = ATA_TAG_POISON;
1779 nv_swncq_fis_reinit(ap);
1780}
1781
1782static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1783{
1784 struct nv_swncq_port_priv *pp = ap->private_data;
1785
1786 writew(fis, pp->irq_block);
1787}
1788
1789static void __ata_bmdma_stop(struct ata_port *ap)
1790{
1791 struct ata_queued_cmd qc;
1792
1793 qc.ap = ap;
1794 ata_bmdma_stop(&qc);
1795}
1796
1797static void nv_swncq_ncq_stop(struct ata_port *ap)
1798{
1799 struct nv_swncq_port_priv *pp = ap->private_data;
1800 unsigned int i;
1801 u32 sactive;
1802 u32 done_mask;
1803
1804 ata_port_printk(ap, KERN_ERR,
1805 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1806 ap->qc_active, ap->link.sactive);
1807 ata_port_printk(ap, KERN_ERR,
1808 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1809 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1810 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1811 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1812
1813 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
5682ed33 1814 ap->ops->sff_check_status(ap),
f140f0f1
KL
1815 ioread8(ap->ioaddr.error_addr));
1816
1817 sactive = readl(pp->sactive_block);
1818 done_mask = pp->qc_active ^ sactive;
1819
1820 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1821 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1822 u8 err = 0;
1823 if (pp->qc_active & (1 << i))
1824 err = 0;
1825 else if (done_mask & (1 << i))
1826 err = 1;
1827 else
1828 continue;
1829
1830 ata_port_printk(ap, KERN_ERR,
1831 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1832 (pp->dhfis_bits >> i) & 0x1,
1833 (pp->dmafis_bits >> i) & 0x1,
1834 (pp->sdbfis_bits >> i) & 0x1,
1835 (sactive >> i) & 0x1,
1836 (err ? "error! tag doesn't exit" : " "));
1837 }
1838
1839 nv_swncq_pp_reinit(ap);
5682ed33 1840 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
1841 __ata_bmdma_stop(ap);
1842 nv_swncq_irq_clear(ap, 0xffff);
1843}
1844
1845static void nv_swncq_error_handler(struct ata_port *ap)
1846{
1847 struct ata_eh_context *ehc = &ap->link.eh_context;
1848
1849 if (ap->link.sactive) {
1850 nv_swncq_ncq_stop(ap);
cf480626 1851 ehc->i.action |= ATA_EH_RESET;
f140f0f1
KL
1852 }
1853
fe06e5f9 1854 ata_bmdma_error_handler(ap);
f140f0f1
KL
1855}
1856
1857#ifdef CONFIG_PM
1858static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1859{
1860 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1861 u32 tmp;
1862
1863 /* clear irq */
1864 writel(~0, mmio + NV_INT_STATUS_MCP55);
1865
1866 /* disable irq */
1867 writel(0, mmio + NV_INT_ENABLE_MCP55);
1868
1869 /* disable swncq */
1870 tmp = readl(mmio + NV_CTL_MCP55);
1871 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1872 writel(tmp, mmio + NV_CTL_MCP55);
1873
1874 return 0;
1875}
1876
1877static int nv_swncq_port_resume(struct ata_port *ap)
1878{
1879 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1880 u32 tmp;
1881
1882 /* clear irq */
1883 writel(~0, mmio + NV_INT_STATUS_MCP55);
1884
1885 /* enable irq */
1886 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1887
1888 /* enable swncq */
1889 tmp = readl(mmio + NV_CTL_MCP55);
1890 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1891
1892 return 0;
1893}
1894#endif
1895
1896static void nv_swncq_host_init(struct ata_host *host)
1897{
1898 u32 tmp;
1899 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1900 struct pci_dev *pdev = to_pci_dev(host->dev);
1901 u8 regval;
1902
1903 /* disable ECO 398 */
1904 pci_read_config_byte(pdev, 0x7f, &regval);
1905 regval &= ~(1 << 7);
1906 pci_write_config_byte(pdev, 0x7f, regval);
1907
1908 /* enable swncq */
1909 tmp = readl(mmio + NV_CTL_MCP55);
1910 VPRINTK("HOST_CTL:0x%X\n", tmp);
1911 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1912
1913 /* enable irq intr */
1914 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1915 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1916 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1917
1918 /* clear port irq */
1919 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1920}
1921
1922static int nv_swncq_slave_config(struct scsi_device *sdev)
1923{
1924 struct ata_port *ap = ata_shost_to_port(sdev->host);
1925 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1926 struct ata_device *dev;
1927 int rc;
1928 u8 rev;
1929 u8 check_maxtor = 0;
1930 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1931
1932 rc = ata_scsi_slave_config(sdev);
1933 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1934 /* Not a proper libata device, ignore */
1935 return rc;
1936
1937 dev = &ap->link.device[sdev->id];
1938 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1939 return rc;
1940
1941 /* if MCP51 and Maxtor, then disable ncq */
1942 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1943 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1944 check_maxtor = 1;
1945
1946 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1947 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1948 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1949 pci_read_config_byte(pdev, 0x8, &rev);
1950 if (rev <= 0xa2)
1951 check_maxtor = 1;
1952 }
1953
1954 if (!check_maxtor)
1955 return rc;
1956
1957 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1958
1959 if (strncmp(model_num, "Maxtor", 6) == 0) {
e881a172 1960 ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
f140f0f1
KL
1961 ata_dev_printk(dev, KERN_NOTICE,
1962 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1963 }
1964
1965 return rc;
1966}
1967
1968static int nv_swncq_port_start(struct ata_port *ap)
1969{
1970 struct device *dev = ap->host->dev;
1971 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1972 struct nv_swncq_port_priv *pp;
1973 int rc;
1974
c7087652
TH
1975 /* we might fallback to bmdma, allocate bmdma resources */
1976 rc = ata_bmdma_port_start(ap);
f140f0f1
KL
1977 if (rc)
1978 return rc;
1979
1980 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1981 if (!pp)
1982 return -ENOMEM;
1983
1984 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1985 &pp->prd_dma, GFP_KERNEL);
1986 if (!pp->prd)
1987 return -ENOMEM;
1988 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1989
1990 ap->private_data = pp;
1991 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1992 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1993 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1994
1995 return 0;
1996}
1997
1998static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1999{
2000 if (qc->tf.protocol != ATA_PROT_NCQ) {
f47451c4 2001 ata_bmdma_qc_prep(qc);
f140f0f1
KL
2002 return;
2003 }
2004
2005 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2006 return;
2007
2008 nv_swncq_fill_sg(qc);
2009}
2010
2011static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2012{
2013 struct ata_port *ap = qc->ap;
2014 struct scatterlist *sg;
f140f0f1 2015 struct nv_swncq_port_priv *pp = ap->private_data;
f60d7011 2016 struct ata_bmdma_prd *prd;
ff2aeb1e 2017 unsigned int si, idx;
f140f0f1
KL
2018
2019 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2020
2021 idx = 0;
ff2aeb1e 2022 for_each_sg(qc->sg, sg, qc->n_elem, si) {
f140f0f1
KL
2023 u32 addr, offset;
2024 u32 sg_len, len;
2025
2026 addr = (u32)sg_dma_address(sg);
2027 sg_len = sg_dma_len(sg);
2028
2029 while (sg_len) {
2030 offset = addr & 0xffff;
2031 len = sg_len;
2032 if ((offset + sg_len) > 0x10000)
2033 len = 0x10000 - offset;
2034
2035 prd[idx].addr = cpu_to_le32(addr);
2036 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2037
2038 idx++;
2039 sg_len -= len;
2040 addr += len;
2041 }
2042 }
2043
ff2aeb1e 2044 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
f140f0f1
KL
2045}
2046
2047static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2048 struct ata_queued_cmd *qc)
2049{
2050 struct nv_swncq_port_priv *pp = ap->private_data;
2051
2052 if (qc == NULL)
2053 return 0;
2054
2055 DPRINTK("Enter\n");
2056
2057 writel((1 << qc->tag), pp->sactive_block);
2058 pp->last_issue_tag = qc->tag;
2059 pp->dhfis_bits &= ~(1 << qc->tag);
2060 pp->dmafis_bits &= ~(1 << qc->tag);
2061 pp->qc_active |= (0x1 << qc->tag);
2062
5682ed33
TH
2063 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2064 ap->ops->sff_exec_command(ap, &qc->tf);
f140f0f1
KL
2065
2066 DPRINTK("Issued tag %u\n", qc->tag);
2067
2068 return 0;
2069}
2070
2071static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2072{
2073 struct ata_port *ap = qc->ap;
2074 struct nv_swncq_port_priv *pp = ap->private_data;
2075
2076 if (qc->tf.protocol != ATA_PROT_NCQ)
360ff783 2077 return ata_bmdma_qc_issue(qc);
f140f0f1
KL
2078
2079 DPRINTK("Enter\n");
2080
2081 if (!pp->qc_active)
2082 nv_swncq_issue_atacmd(ap, qc);
2083 else
2084 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2085
2086 return 0;
2087}
2088
2089static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2090{
2091 u32 serror;
2092 struct ata_eh_info *ehi = &ap->link.eh_info;
2093
2094 ata_ehi_clear_desc(ehi);
2095
2096 /* AHCI needs SError cleared; otherwise, it might lock up */
2097 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2098 sata_scr_write(&ap->link, SCR_ERROR, serror);
2099
2100 /* analyze @irq_stat */
2101 if (fis & NV_SWNCQ_IRQ_ADDED)
2102 ata_ehi_push_desc(ehi, "hot plug");
2103 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2104 ata_ehi_push_desc(ehi, "hot unplug");
2105
2106 ata_ehi_hotplugged(ehi);
2107
2108 /* okay, let's hand over to EH */
2109 ehi->serror |= serror;
2110
2111 ata_port_freeze(ap);
2112}
2113
2114static int nv_swncq_sdbfis(struct ata_port *ap)
2115{
2116 struct ata_queued_cmd *qc;
2117 struct nv_swncq_port_priv *pp = ap->private_data;
2118 struct ata_eh_info *ehi = &ap->link.eh_info;
2119 u32 sactive;
f140f0f1 2120 u32 done_mask;
f140f0f1
KL
2121 u8 host_stat;
2122 u8 lack_dhfis = 0;
2123
2124 host_stat = ap->ops->bmdma_status(ap);
2125 if (unlikely(host_stat & ATA_DMA_ERR)) {
2126 /* error when transfering data to/from memory */
2127 ata_ehi_clear_desc(ehi);
2128 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2129 ehi->err_mask |= AC_ERR_HOST_BUS;
cf480626 2130 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2131 return -EINVAL;
2132 }
2133
5682ed33 2134 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
2135 __ata_bmdma_stop(ap);
2136
2137 sactive = readl(pp->sactive_block);
2138 done_mask = pp->qc_active ^ sactive;
2139
1aadf5c3
TH
2140 pp->qc_active &= ~done_mask;
2141 pp->dhfis_bits &= ~done_mask;
2142 pp->dmafis_bits &= ~done_mask;
2143 pp->sdbfis_bits |= done_mask;
2144 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
f140f0f1
KL
2145
2146 if (!ap->qc_active) {
2147 DPRINTK("over\n");
2148 nv_swncq_pp_reinit(ap);
752e386c 2149 return 0;
f140f0f1
KL
2150 }
2151
2152 if (pp->qc_active & pp->dhfis_bits)
752e386c 2153 return 0;
f140f0f1
KL
2154
2155 if ((pp->ncq_flags & ncq_saw_backout) ||
2156 (pp->qc_active ^ pp->dhfis_bits))
752e386c 2157 /* if the controller can't get a device to host register FIS,
f140f0f1
KL
2158 * The driver needs to reissue the new command.
2159 */
2160 lack_dhfis = 1;
2161
2162 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2163 "SWNCQ:qc_active 0x%X defer_bits %X "
2164 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2165 ap->print_id, ap->qc_active, pp->qc_active,
2166 pp->defer_queue.defer_bits, pp->dhfis_bits,
2167 pp->dmafis_bits, pp->last_issue_tag);
2168
2169 nv_swncq_fis_reinit(ap);
2170
2171 if (lack_dhfis) {
2172 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2173 nv_swncq_issue_atacmd(ap, qc);
752e386c 2174 return 0;
f140f0f1
KL
2175 }
2176
2177 if (pp->defer_queue.defer_bits) {
2178 /* send deferral queue command */
2179 qc = nv_swncq_qc_from_dq(ap);
2180 WARN_ON(qc == NULL);
2181 nv_swncq_issue_atacmd(ap, qc);
2182 }
2183
752e386c 2184 return 0;
f140f0f1
KL
2185}
2186
2187static inline u32 nv_swncq_tag(struct ata_port *ap)
2188{
2189 struct nv_swncq_port_priv *pp = ap->private_data;
2190 u32 tag;
2191
2192 tag = readb(pp->tag_block) >> 2;
2193 return (tag & 0x1f);
2194}
2195
752e386c 2196static void nv_swncq_dmafis(struct ata_port *ap)
f140f0f1
KL
2197{
2198 struct ata_queued_cmd *qc;
2199 unsigned int rw;
2200 u8 dmactl;
2201 u32 tag;
2202 struct nv_swncq_port_priv *pp = ap->private_data;
2203
2204 __ata_bmdma_stop(ap);
2205 tag = nv_swncq_tag(ap);
2206
2207 DPRINTK("dma setup tag 0x%x\n", tag);
2208 qc = ata_qc_from_tag(ap, tag);
2209
2210 if (unlikely(!qc))
752e386c 2211 return;
f140f0f1
KL
2212
2213 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2214
2215 /* load PRD table addr. */
2216 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2217 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2218
2219 /* specify data direction, triple-check start bit is clear */
2220 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2221 dmactl &= ~ATA_DMA_WR;
2222 if (!rw)
2223 dmactl |= ATA_DMA_WR;
2224
2225 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
f140f0f1
KL
2226}
2227
2228static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2229{
2230 struct nv_swncq_port_priv *pp = ap->private_data;
2231 struct ata_queued_cmd *qc;
2232 struct ata_eh_info *ehi = &ap->link.eh_info;
2233 u32 serror;
2234 u8 ata_stat;
f140f0f1 2235
5682ed33 2236 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2237 nv_swncq_irq_clear(ap, fis);
2238 if (!fis)
2239 return;
2240
2241 if (ap->pflags & ATA_PFLAG_FROZEN)
2242 return;
2243
2244 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2245 nv_swncq_hotplug(ap, fis);
2246 return;
2247 }
2248
2249 if (!pp->qc_active)
2250 return;
2251
82ef04fb 2252 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
f140f0f1 2253 return;
82ef04fb 2254 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
f140f0f1
KL
2255
2256 if (ata_stat & ATA_ERR) {
2257 ata_ehi_clear_desc(ehi);
2258 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2259 ehi->err_mask |= AC_ERR_DEV;
2260 ehi->serror |= serror;
cf480626 2261 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2262 ata_port_freeze(ap);
2263 return;
2264 }
2265
2266 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2267 /* If the IRQ is backout, driver must issue
2268 * the new command again some time later.
2269 */
2270 pp->ncq_flags |= ncq_saw_backout;
2271 }
2272
2273 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2274 pp->ncq_flags |= ncq_saw_sdb;
2275 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2276 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2277 ap->print_id, pp->qc_active, pp->dhfis_bits,
2278 pp->dmafis_bits, readl(pp->sactive_block));
752e386c 2279 if (nv_swncq_sdbfis(ap) < 0)
f140f0f1
KL
2280 goto irq_error;
2281 }
2282
2283 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2284 /* The interrupt indicates the new command
2285 * was transmitted correctly to the drive.
2286 */
2287 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2288 pp->ncq_flags |= ncq_saw_d2h;
2289 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2290 ata_ehi_push_desc(ehi, "illegal fis transaction");
2291 ehi->err_mask |= AC_ERR_HSM;
cf480626 2292 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2293 goto irq_error;
2294 }
2295
2296 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2297 !(pp->ncq_flags & ncq_saw_dmas)) {
5682ed33 2298 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2299 if (ata_stat & ATA_BUSY)
2300 goto irq_exit;
2301
2302 if (pp->defer_queue.defer_bits) {
2303 DPRINTK("send next command\n");
2304 qc = nv_swncq_qc_from_dq(ap);
2305 nv_swncq_issue_atacmd(ap, qc);
2306 }
2307 }
2308 }
2309
2310 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2311 /* program the dma controller with appropriate PRD buffers
2312 * and start the DMA transfer for requested command.
2313 */
2314 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2315 pp->ncq_flags |= ncq_saw_dmas;
752e386c 2316 nv_swncq_dmafis(ap);
f140f0f1
KL
2317 }
2318
2319irq_exit:
2320 return;
2321irq_error:
2322 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2323 ata_port_freeze(ap);
2324 return;
2325}
2326
2327static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2328{
2329 struct ata_host *host = dev_instance;
2330 unsigned int i;
2331 unsigned int handled = 0;
2332 unsigned long flags;
2333 u32 irq_stat;
2334
2335 spin_lock_irqsave(&host->lock, flags);
2336
2337 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2338
2339 for (i = 0; i < host->n_ports; i++) {
2340 struct ata_port *ap = host->ports[i];
2341
3e4ec344
TH
2342 if (ap->link.sactive) {
2343 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2344 handled = 1;
2345 } else {
2346 if (irq_stat) /* reserve Hotplug */
2347 nv_swncq_irq_clear(ap, 0xfff0);
f140f0f1 2348
3e4ec344 2349 handled += nv_host_intr(ap, (u8)irq_stat);
f140f0f1
KL
2350 }
2351 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2352 }
2353
2354 spin_unlock_irqrestore(&host->lock, flags);
2355
2356 return IRQ_RETVAL(handled);
2357}
2358
5796d1c4 2359static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2360{
5796d1c4 2361 static int printed_version;
1626aeb8 2362 const struct ata_port_info *ppi[] = { NULL, NULL };
95947193 2363 struct nv_pi_priv *ipriv;
9a829ccf 2364 struct ata_host *host;
cdf56bcf 2365 struct nv_host_priv *hpriv;
1da177e4
LT
2366 int rc;
2367 u32 bar;
0d5ff566 2368 void __iomem *base;
fbbb262d 2369 unsigned long type = ent->driver_data;
1da177e4
LT
2370
2371 // Make sure this is a SATA controller by counting the number of bars
2372 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2373 // it's an IDE controller and we ignore it.
5796d1c4 2374 for (bar = 0; bar < 6; bar++)
1da177e4
LT
2375 if (pci_resource_start(pdev, bar) == 0)
2376 return -ENODEV;
2377
cdf56bcf 2378 if (!printed_version++)
a9524a76 2379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 2380
24dc5f33 2381 rc = pcim_enable_device(pdev);
1da177e4 2382 if (rc)
24dc5f33 2383 return rc;
1da177e4 2384
9a829ccf 2385 /* determine type and allocate host */
f140f0f1 2386 if (type == CK804 && adma_enabled) {
fbbb262d
RH
2387 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2388 type = ADMA;
2d775708
TH
2389 } else if (type == MCP5x && swncq_enabled) {
2390 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2391 type = SWNCQ;
360737a9
JG
2392 }
2393
1626aeb8 2394 ppi[0] = &nv_port_info[type];
95947193 2395 ipriv = ppi[0]->private_data;
1c5afdf7 2396 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
9a829ccf
TH
2397 if (rc)
2398 return rc;
1da177e4 2399
24dc5f33 2400 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 2401 if (!hpriv)
24dc5f33 2402 return -ENOMEM;
9a829ccf
TH
2403 hpriv->type = type;
2404 host->private_data = hpriv;
cdf56bcf 2405
9a829ccf
TH
2406 /* request and iomap NV_MMIO_BAR */
2407 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2408 if (rc)
2409 return rc;
1da177e4 2410
9a829ccf
TH
2411 /* configure SCR access */
2412 base = host->iomap[NV_MMIO_BAR];
2413 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2414 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 2415
ada364e8 2416 /* enable SATA space for CK804 */
fbbb262d 2417 if (type >= CK804) {
ada364e8
TH
2418 u8 regval;
2419
2420 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2421 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2422 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2423 }
2424
9a829ccf 2425 /* init ADMA */
fbbb262d 2426 if (type == ADMA) {
9a829ccf 2427 rc = nv_adma_host_init(host);
fbbb262d 2428 if (rc)
24dc5f33 2429 return rc;
360737a9 2430 } else if (type == SWNCQ)
f140f0f1 2431 nv_swncq_host_init(host);
fbbb262d 2432
51c89499
TV
2433 if (msi_enabled) {
2434 dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2435 pci_enable_msi(pdev);
2436 }
2437
9a829ccf 2438 pci_set_master(pdev);
95cc2c70 2439 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
1da177e4
LT
2440}
2441
438ac6d5 2442#ifdef CONFIG_PM
cdf56bcf
RH
2443static int nv_pci_device_resume(struct pci_dev *pdev)
2444{
2445 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2446 struct nv_host_priv *hpriv = host->private_data;
ce053fa8 2447 int rc;
cdf56bcf 2448
ce053fa8 2449 rc = ata_pci_device_do_resume(pdev);
b447916e 2450 if (rc)
ce053fa8 2451 return rc;
cdf56bcf
RH
2452
2453 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
b447916e 2454 if (hpriv->type >= CK804) {
cdf56bcf
RH
2455 u8 regval;
2456
2457 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2458 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2459 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2460 }
b447916e 2461 if (hpriv->type == ADMA) {
cdf56bcf
RH
2462 u32 tmp32;
2463 struct nv_adma_port_priv *pp;
2464 /* enable/disable ADMA on the ports appropriately */
2465 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2466
2467 pp = host->ports[0]->private_data;
b447916e 2468 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2469 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2470 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf
RH
2471 else
2472 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2473 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf 2474 pp = host->ports[1]->private_data;
b447916e 2475 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2476 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2477 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2478 else
2479 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2480 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2481
2482 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2483 }
2484 }
2485
2486 ata_host_resume(host);
2487
2488 return 0;
2489}
438ac6d5 2490#endif
cdf56bcf 2491
cca3974e 2492static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 2493{
cca3974e 2494 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
2495 u8 regval;
2496
2497 /* disable SATA space for CK804 */
2498 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2499 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2500 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
2501}
2502
fbbb262d
RH
2503static void nv_adma_host_stop(struct ata_host *host)
2504{
2505 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
2506 u32 tmp32;
2507
fbbb262d
RH
2508 /* disable ADMA on the ports */
2509 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2510 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2511 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2512 NV_MCP_SATA_CFG_20_PORT1_EN |
2513 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2514
2515 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2516
2517 nv_ck804_host_stop(host);
2518}
2519
1da177e4
LT
2520static int __init nv_init(void)
2521{
b7887196 2522 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
2523}
2524
2525static void __exit nv_exit(void)
2526{
2527 pci_unregister_driver(&nv_pci_driver);
2528}
2529
2530module_init(nv_init);
2531module_exit(nv_exit);
fbbb262d 2532module_param_named(adma, adma_enabled, bool, 0444);
55f784c8 2533MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
f140f0f1 2534module_param_named(swncq, swncq_enabled, bool, 0444);
d21279f4 2535MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
51c89499
TV
2536module_param_named(msi, msi_enabled, bool, 0444);
2537MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
f140f0f1 2538