]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_nv.c
cpuimx27: fix i2c bus selection
[net-next-2.6.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
5a0e3ad6 41#include <linux/gfp.h>
1da177e4
LT
42#include <linux/pci.h>
43#include <linux/init.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
a9524a76 47#include <linux/device.h>
1da177e4 48#include <scsi/scsi_host.h>
fbbb262d 49#include <scsi/scsi_device.h>
1da177e4
LT
50#include <linux/libata.h>
51
52#define DRV_NAME "sata_nv"
2a3103ce 53#define DRV_VERSION "3.5"
fbbb262d
RH
54
55#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 56
10ad05df 57enum {
0d5ff566
TH
58 NV_MMIO_BAR = 5,
59
10ad05df 60 NV_PORTS = 2,
14bdef98
EIB
61 NV_PIO_MASK = ATA_PIO4,
62 NV_MWDMA_MASK = ATA_MWDMA2,
63 NV_UDMA_MASK = ATA_UDMA6,
10ad05df
JG
64 NV_PORT0_SCR_REG_OFFSET = 0x00,
65 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 66
27e4b274 67 /* INT_STATUS/ENABLE */
10ad05df 68 NV_INT_STATUS = 0x10,
10ad05df 69 NV_INT_ENABLE = 0x11,
27e4b274 70 NV_INT_STATUS_CK804 = 0x440,
10ad05df 71 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 72
27e4b274
TH
73 /* INT_STATUS/ENABLE bits */
74 NV_INT_DEV = 0x01,
75 NV_INT_PM = 0x02,
76 NV_INT_ADDED = 0x04,
77 NV_INT_REMOVED = 0x08,
78
79 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80
39f87582 81 NV_INT_ALL = 0x0f,
5a44efff
TH
82 NV_INT_MASK = NV_INT_DEV |
83 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 84
27e4b274 85 /* INT_CONFIG */
10ad05df
JG
86 NV_INT_CONFIG = 0x12,
87 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 88
10ad05df
JG
89 // For PCI config register 20
90 NV_MCP_SATA_CFG_20 = 0x50,
91 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
92 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
93 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
94 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
95 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96
97 NV_ADMA_MAX_CPBS = 32,
98 NV_ADMA_CPB_SZ = 128,
99 NV_ADMA_APRD_SZ = 16,
100 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_APRD_SZ,
102 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
103 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
104 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
105 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106
107 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN = 0x400,
109 NV_ADMA_GEN_CTL = 0x00,
110 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111
112 /* BAR5 offset to ADMA ports */
113 NV_ADMA_PORT = 0x480,
114
115 /* size of ADMA port register space */
116 NV_ADMA_PORT_SIZE = 0x100,
117
118 /* ADMA port registers */
119 NV_ADMA_CTL = 0x40,
120 NV_ADMA_CPB_COUNT = 0x42,
121 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_STAT = 0x44,
123 NV_ADMA_CPB_BASE_LOW = 0x48,
124 NV_ADMA_CPB_BASE_HIGH = 0x4C,
125 NV_ADMA_APPEND = 0x50,
126 NV_ADMA_NOTIFIER = 0x68,
127 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128
129 /* NV_ADMA_CTL register bits */
130 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
131 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
132 NV_ADMA_CTL_GO = (1 << 7),
133 NV_ADMA_CTL_AIEN = (1 << 8),
134 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
135 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136
137 /* CPB response flag bits */
138 NV_CPB_RESP_DONE = (1 << 0),
139 NV_CPB_RESP_ATA_ERR = (1 << 3),
140 NV_CPB_RESP_CMD_ERR = (1 << 4),
141 NV_CPB_RESP_CPB_ERR = (1 << 7),
142
143 /* CPB control flag bits */
144 NV_CPB_CTL_CPB_VALID = (1 << 0),
145 NV_CPB_CTL_QUEUE = (1 << 1),
146 NV_CPB_CTL_APRD_VALID = (1 << 2),
147 NV_CPB_CTL_IEN = (1 << 3),
148 NV_CPB_CTL_FPDMA = (1 << 4),
149
150 /* APRD flags */
151 NV_APRD_WRITE = (1 << 1),
152 NV_APRD_END = (1 << 2),
153 NV_APRD_CONT = (1 << 3),
154
155 /* NV_ADMA_STAT flags */
156 NV_ADMA_STAT_TIMEOUT = (1 << 0),
157 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
158 NV_ADMA_STAT_HOTPLUG = (1 << 2),
159 NV_ADMA_STAT_CPBERR = (1 << 4),
160 NV_ADMA_STAT_SERROR = (1 << 5),
161 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
162 NV_ADMA_STAT_IDLE = (1 << 8),
163 NV_ADMA_STAT_LEGACY = (1 << 9),
164 NV_ADMA_STAT_STOPPED = (1 << 10),
165 NV_ADMA_STAT_DONE = (1 << 12),
166 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
2dcb407e 167 NV_ADMA_STAT_TIMEOUT,
fbbb262d
RH
168
169 /* port flags */
170 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 171 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d 172
f140f0f1
KL
173 /* MCP55 reg offset */
174 NV_CTL_MCP55 = 0x400,
175 NV_INT_STATUS_MCP55 = 0x440,
176 NV_INT_ENABLE_MCP55 = 0x444,
177 NV_NCQ_REG_MCP55 = 0x448,
178
179 /* MCP55 */
180 NV_INT_ALL_MCP55 = 0xffff,
181 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
182 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183
184 /* SWNCQ ENABLE BITS*/
185 NV_CTL_PRI_SWNCQ = 0x02,
186 NV_CTL_SEC_SWNCQ = 0x04,
187
188 /* SW NCQ status bits*/
189 NV_SWNCQ_IRQ_DEV = (1 << 0),
190 NV_SWNCQ_IRQ_PM = (1 << 1),
191 NV_SWNCQ_IRQ_ADDED = (1 << 2),
192 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193
194 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
195 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
196 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
197 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198
199 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
200 NV_SWNCQ_IRQ_REMOVED,
201
fbbb262d
RH
202};
203
204/* ADMA Physical Region Descriptor - one SG segment */
205struct nv_adma_prd {
206 __le64 addr;
207 __le32 len;
208 u8 flags;
209 u8 packet_len;
210 __le16 reserved;
211};
212
213enum nv_adma_regbits {
214 CMDEND = (1 << 15), /* end of command list */
215 WNB = (1 << 14), /* wait-not-BSY */
216 IGN = (1 << 13), /* ignore this entry */
217 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
218 DA2 = (1 << (2 + 8)),
219 DA1 = (1 << (1 + 8)),
220 DA0 = (1 << (0 + 8)),
221};
222
223/* ADMA Command Parameter Block
224 The first 5 SG segments are stored inside the Command Parameter Block itself.
225 If there are more than 5 segments the remainder are stored in a separate
226 memory area indicated by next_aprd. */
227struct nv_adma_cpb {
228 u8 resp_flags; /* 0 */
229 u8 reserved1; /* 1 */
230 u8 ctl_flags; /* 2 */
231 /* len is length of taskfile in 64 bit words */
2dcb407e 232 u8 len; /* 3 */
fbbb262d
RH
233 u8 tag; /* 4 */
234 u8 next_cpb_idx; /* 5 */
235 __le16 reserved2; /* 6-7 */
236 __le16 tf[12]; /* 8-31 */
237 struct nv_adma_prd aprd[5]; /* 32-111 */
238 __le64 next_aprd; /* 112-119 */
239 __le64 reserved3; /* 120-127 */
10ad05df 240};
1da177e4 241
fbbb262d
RH
242
243struct nv_adma_port_priv {
244 struct nv_adma_cpb *cpb;
245 dma_addr_t cpb_dma;
246 struct nv_adma_prd *aprd;
247 dma_addr_t aprd_dma;
2dcb407e
JG
248 void __iomem *ctl_block;
249 void __iomem *gen_block;
250 void __iomem *notifier_clear_block;
8959d300 251 u64 adma_dma_mask;
fbbb262d 252 u8 flags;
5e5c74a5 253 int last_issue_ncq;
fbbb262d
RH
254};
255
cdf56bcf
RH
256struct nv_host_priv {
257 unsigned long type;
258};
259
f140f0f1
KL
260struct defer_queue {
261 u32 defer_bits;
262 unsigned int head;
263 unsigned int tail;
264 unsigned int tag[ATA_MAX_QUEUE];
265};
266
267enum ncq_saw_flag_list {
268 ncq_saw_d2h = (1U << 0),
269 ncq_saw_dmas = (1U << 1),
270 ncq_saw_sdb = (1U << 2),
271 ncq_saw_backout = (1U << 3),
272};
273
274struct nv_swncq_port_priv {
f60d7011 275 struct ata_bmdma_prd *prd; /* our SG list */
f140f0f1
KL
276 dma_addr_t prd_dma; /* and its DMA mapping */
277 void __iomem *sactive_block;
278 void __iomem *irq_block;
279 void __iomem *tag_block;
280 u32 qc_active;
281
282 unsigned int last_issue_tag;
283
284 /* fifo circular queue to store deferral command */
285 struct defer_queue defer_queue;
286
287 /* for NCQ interrupt analysis */
288 u32 dhfis_bits;
289 u32 dmafis_bits;
290 u32 sdbfis_bits;
291
292 unsigned int ncq_flags;
293};
294
295
5796d1c4 296#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
fbbb262d 297
2dcb407e 298static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
438ac6d5 299#ifdef CONFIG_PM
cdf56bcf 300static int nv_pci_device_resume(struct pci_dev *pdev);
438ac6d5 301#endif
cca3974e 302static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
303static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
305static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
82ef04fb
TH
306static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
307static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
1da177e4 308
7f4774b3
TH
309static int nv_hardreset(struct ata_link *link, unsigned int *class,
310 unsigned long deadline);
39f87582
TH
311static void nv_nf2_freeze(struct ata_port *ap);
312static void nv_nf2_thaw(struct ata_port *ap);
313static void nv_ck804_freeze(struct ata_port *ap);
314static void nv_ck804_thaw(struct ata_port *ap);
fbbb262d 315static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 316static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
317static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
318static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
319static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
320static void nv_adma_irq_clear(struct ata_port *ap);
321static int nv_adma_port_start(struct ata_port *ap);
322static void nv_adma_port_stop(struct ata_port *ap);
438ac6d5 323#ifdef CONFIG_PM
cdf56bcf
RH
324static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
325static int nv_adma_port_resume(struct ata_port *ap);
438ac6d5 326#endif
53014e25
RH
327static void nv_adma_freeze(struct ata_port *ap);
328static void nv_adma_thaw(struct ata_port *ap);
fbbb262d
RH
329static void nv_adma_error_handler(struct ata_port *ap);
330static void nv_adma_host_stop(struct ata_host *host);
f5ecac2d 331static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
f2fb344b 332static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
39f87582 333
f140f0f1
KL
334static void nv_mcp55_thaw(struct ata_port *ap);
335static void nv_mcp55_freeze(struct ata_port *ap);
336static void nv_swncq_error_handler(struct ata_port *ap);
337static int nv_swncq_slave_config(struct scsi_device *sdev);
338static int nv_swncq_port_start(struct ata_port *ap);
339static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
340static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
341static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
342static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
343static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344#ifdef CONFIG_PM
345static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
346static int nv_swncq_port_resume(struct ata_port *ap);
347#endif
348
1da177e4
LT
349enum nv_host_type
350{
351 GENERIC,
352 NFORCE2,
27e4b274 353 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d 354 CK804,
f140f0f1 355 ADMA,
2d775708 356 MCP5x,
f140f0f1 357 SWNCQ,
1da177e4
LT
358};
359
3b7d697d 360static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
2d775708
TH
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
e2e031eb
KL
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
374 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
2d2744fc
JG
375
376 { } /* terminate list */
1da177e4
LT
377};
378
1da177e4
LT
379static struct pci_driver nv_pci_driver = {
380 .name = DRV_NAME,
381 .id_table = nv_pci_tbl,
382 .probe = nv_init_one,
438ac6d5 383#ifdef CONFIG_PM
cdf56bcf
RH
384 .suspend = ata_pci_device_suspend,
385 .resume = nv_pci_device_resume,
438ac6d5 386#endif
1daf9ce7 387 .remove = ata_pci_remove_one,
1da177e4
LT
388};
389
193515d5 390static struct scsi_host_template nv_sht = {
68d1d07b 391 ATA_BMDMA_SHT(DRV_NAME),
1da177e4
LT
392};
393
fbbb262d 394static struct scsi_host_template nv_adma_sht = {
68d1d07b 395 ATA_NCQ_SHT(DRV_NAME),
fbbb262d 396 .can_queue = NV_ADMA_MAX_CPBS,
fbbb262d 397 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
398 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
399 .slave_configure = nv_adma_slave_config,
fbbb262d
RH
400};
401
f140f0f1 402static struct scsi_host_template nv_swncq_sht = {
68d1d07b 403 ATA_NCQ_SHT(DRV_NAME),
f140f0f1 404 .can_queue = ATA_MAX_QUEUE,
f140f0f1 405 .sg_tablesize = LIBATA_MAX_PRD,
f140f0f1
KL
406 .dma_boundary = ATA_DMA_BOUNDARY,
407 .slave_configure = nv_swncq_slave_config,
f140f0f1
KL
408};
409
7f4774b3
TH
410/*
411 * NV SATA controllers have various different problems with hardreset
412 * protocol depending on the specific controller and device.
413 *
414 * GENERIC:
415 *
416 * bko11195 reports that link doesn't come online after hardreset on
417 * generic nv's and there have been several other similar reports on
418 * linux-ide.
419 *
420 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
421 * softreset.
422 *
423 * NF2/3:
424 *
425 * bko3352 reports nf2/3 controllers can't determine device signature
426 * reliably after hardreset. The following thread reports detection
427 * failure on cold boot with the standard debouncing timing.
428 *
429 * http://thread.gmane.org/gmane.linux.ide/34098
430 *
431 * bko12176 reports that hardreset fails to bring up the link during
432 * boot on nf2.
433 *
434 * CK804:
435 *
436 * For initial probing after boot and hot plugging, hardreset mostly
437 * works fine on CK804 but curiously, reprobing on the initial port
438 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
439 * FIS in somewhat undeterministic way.
440 *
441 * SWNCQ:
442 *
443 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
444 * hardreset should be used and hardreset can't report proper
445 * signature, which suggests that mcp5x is closer to nf2 as long as
446 * reset quirkiness is concerned.
447 *
448 * bko12703 reports that boot probing fails for intel SSD with
449 * hardreset. Link fails to come online. Softreset works fine.
450 *
451 * The failures are varied but the following patterns seem true for
452 * all flavors.
453 *
454 * - Softreset during boot always works.
455 *
456 * - Hardreset during boot sometimes fails to bring up the link on
457 * certain comibnations and device signature acquisition is
458 * unreliable.
459 *
460 * - Hardreset is often necessary after hotplug.
461 *
462 * So, preferring softreset for boot probing and error handling (as
463 * hardreset might bring down the link) but using hardreset for
464 * post-boot probing should work around the above issues in most
465 * cases. Define nv_hardreset() which only kicks in for post-boot
466 * probing and use it for all variants.
467 */
468static struct ata_port_operations nv_generic_ops = {
029cfd6b 469 .inherits = &ata_bmdma_port_ops,
c96f1732 470 .lost_interrupt = ATA_OP_NULL,
1da177e4
LT
471 .scr_read = nv_scr_read,
472 .scr_write = nv_scr_write,
7f4774b3 473 .hardreset = nv_hardreset,
1da177e4
LT
474};
475
029cfd6b 476static struct ata_port_operations nv_nf2_ops = {
7dac745b 477 .inherits = &nv_generic_ops,
39f87582
TH
478 .freeze = nv_nf2_freeze,
479 .thaw = nv_nf2_thaw,
ada364e8
TH
480};
481
029cfd6b 482static struct ata_port_operations nv_ck804_ops = {
7f4774b3 483 .inherits = &nv_generic_ops,
39f87582
TH
484 .freeze = nv_ck804_freeze,
485 .thaw = nv_ck804_thaw,
ada364e8
TH
486 .host_stop = nv_ck804_host_stop,
487};
488
029cfd6b 489static struct ata_port_operations nv_adma_ops = {
3c324283 490 .inherits = &nv_ck804_ops,
029cfd6b 491
2dec7555 492 .check_atapi_dma = nv_adma_check_atapi_dma,
5682ed33 493 .sff_tf_read = nv_adma_tf_read,
31cc23b3 494 .qc_defer = ata_std_qc_defer,
fbbb262d
RH
495 .qc_prep = nv_adma_qc_prep,
496 .qc_issue = nv_adma_qc_issue,
5682ed33 497 .sff_irq_clear = nv_adma_irq_clear,
029cfd6b 498
53014e25
RH
499 .freeze = nv_adma_freeze,
500 .thaw = nv_adma_thaw,
fbbb262d 501 .error_handler = nv_adma_error_handler,
f5ecac2d 502 .post_internal_cmd = nv_adma_post_internal_cmd,
029cfd6b 503
fbbb262d
RH
504 .port_start = nv_adma_port_start,
505 .port_stop = nv_adma_port_stop,
438ac6d5 506#ifdef CONFIG_PM
cdf56bcf
RH
507 .port_suspend = nv_adma_port_suspend,
508 .port_resume = nv_adma_port_resume,
438ac6d5 509#endif
fbbb262d
RH
510 .host_stop = nv_adma_host_stop,
511};
512
029cfd6b 513static struct ata_port_operations nv_swncq_ops = {
7f4774b3 514 .inherits = &nv_generic_ops,
029cfd6b 515
f140f0f1
KL
516 .qc_defer = ata_std_qc_defer,
517 .qc_prep = nv_swncq_qc_prep,
518 .qc_issue = nv_swncq_qc_issue,
029cfd6b 519
f140f0f1
KL
520 .freeze = nv_mcp55_freeze,
521 .thaw = nv_mcp55_thaw,
522 .error_handler = nv_swncq_error_handler,
029cfd6b 523
f140f0f1
KL
524#ifdef CONFIG_PM
525 .port_suspend = nv_swncq_port_suspend,
526 .port_resume = nv_swncq_port_resume,
527#endif
528 .port_start = nv_swncq_port_start,
529};
530
95947193
TH
531struct nv_pi_priv {
532 irq_handler_t irq_handler;
533 struct scsi_host_template *sht;
534};
535
536#define NV_PI_PRIV(_irq_handler, _sht) \
537 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538
1626aeb8 539static const struct ata_port_info nv_port_info[] = {
ada364e8
TH
540 /* generic */
541 {
0c88758b 542 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
543 .pio_mask = NV_PIO_MASK,
544 .mwdma_mask = NV_MWDMA_MASK,
545 .udma_mask = NV_UDMA_MASK,
546 .port_ops = &nv_generic_ops,
95947193 547 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
ada364e8
TH
548 },
549 /* nforce2/3 */
550 {
0c88758b 551 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
552 .pio_mask = NV_PIO_MASK,
553 .mwdma_mask = NV_MWDMA_MASK,
554 .udma_mask = NV_UDMA_MASK,
555 .port_ops = &nv_nf2_ops,
95947193 556 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
ada364e8
TH
557 },
558 /* ck804 */
559 {
0c88758b 560 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
561 .pio_mask = NV_PIO_MASK,
562 .mwdma_mask = NV_MWDMA_MASK,
563 .udma_mask = NV_UDMA_MASK,
564 .port_ops = &nv_ck804_ops,
95947193 565 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
ada364e8 566 },
fbbb262d
RH
567 /* ADMA */
568 {
fbbb262d
RH
569 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
570 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_adma_ops,
95947193 575 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
fbbb262d 576 },
2d775708
TH
577 /* MCP5x */
578 {
579 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
580 .pio_mask = NV_PIO_MASK,
581 .mwdma_mask = NV_MWDMA_MASK,
582 .udma_mask = NV_UDMA_MASK,
7f4774b3 583 .port_ops = &nv_generic_ops,
2d775708
TH
584 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
585 },
f140f0f1
KL
586 /* SWNCQ */
587 {
f140f0f1
KL
588 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
589 ATA_FLAG_NCQ,
f140f0f1
KL
590 .pio_mask = NV_PIO_MASK,
591 .mwdma_mask = NV_MWDMA_MASK,
592 .udma_mask = NV_UDMA_MASK,
593 .port_ops = &nv_swncq_ops,
95947193 594 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
f140f0f1 595 },
1da177e4
LT
596};
597
598MODULE_AUTHOR("NVIDIA");
599MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
600MODULE_LICENSE("GPL");
601MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
602MODULE_VERSION(DRV_VERSION);
603
06993d22 604static int adma_enabled;
d21279f4 605static int swncq_enabled = 1;
51c89499 606static int msi_enabled;
fbbb262d 607
2dec7555
RH
608static void nv_adma_register_mode(struct ata_port *ap)
609{
2dec7555 610 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 611 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
612 u16 tmp, status;
613 int count = 0;
2dec7555
RH
614
615 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
616 return;
617
a2cfe81a 618 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 619 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
a2cfe81a
RH
620 ndelay(50);
621 status = readw(mmio + NV_ADMA_STAT);
622 count++;
623 }
2dcb407e 624 if (count == 20)
a2cfe81a
RH
625 ata_port_printk(ap, KERN_WARNING,
626 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
627 status);
628
2dec7555
RH
629 tmp = readw(mmio + NV_ADMA_CTL);
630 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
631
a2cfe81a
RH
632 count = 0;
633 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 634 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
a2cfe81a
RH
635 ndelay(50);
636 status = readw(mmio + NV_ADMA_STAT);
637 count++;
638 }
2dcb407e 639 if (count == 20)
a2cfe81a
RH
640 ata_port_printk(ap, KERN_WARNING,
641 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
642 status);
643
2dec7555
RH
644 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
645}
646
647static void nv_adma_mode(struct ata_port *ap)
648{
2dec7555 649 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 650 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
651 u16 tmp, status;
652 int count = 0;
2dec7555
RH
653
654 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
655 return;
f20b16ff 656
2dec7555
RH
657 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
658
659 tmp = readw(mmio + NV_ADMA_CTL);
660 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
661
a2cfe81a 662 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 663 while (((status & NV_ADMA_STAT_LEGACY) ||
a2cfe81a
RH
664 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
665 ndelay(50);
666 status = readw(mmio + NV_ADMA_STAT);
667 count++;
668 }
2dcb407e 669 if (count == 20)
a2cfe81a
RH
670 ata_port_printk(ap, KERN_WARNING,
671 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
672 status);
673
2dec7555
RH
674 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
675}
676
fbbb262d
RH
677static int nv_adma_slave_config(struct scsi_device *sdev)
678{
679 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555 680 struct nv_adma_port_priv *pp = ap->private_data;
8959d300
RH
681 struct nv_adma_port_priv *port0, *port1;
682 struct scsi_device *sdev0, *sdev1;
2dec7555 683 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
8959d300 684 unsigned long segment_boundary, flags;
fbbb262d
RH
685 unsigned short sg_tablesize;
686 int rc;
2dec7555
RH
687 int adma_enable;
688 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
689
690 rc = ata_scsi_slave_config(sdev);
691
692 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
693 /* Not a proper libata device, ignore */
694 return rc;
695
8959d300
RH
696 spin_lock_irqsave(ap->lock, flags);
697
9af5c9c9 698 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
fbbb262d
RH
699 /*
700 * NVIDIA reports that ADMA mode does not support ATAPI commands.
701 * Therefore ATAPI commands are sent through the legacy interface.
702 * However, the legacy interface only supports 32-bit DMA.
703 * Restrict DMA parameters as required by the legacy interface
704 * when an ATAPI device is connected.
705 */
fbbb262d
RH
706 segment_boundary = ATA_DMA_BOUNDARY;
707 /* Subtract 1 since an extra entry may be needed for padding, see
708 libata-scsi.c */
709 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 710
2dec7555
RH
711 /* Since the legacy DMA engine is in use, we need to disable ADMA
712 on the port. */
713 adma_enable = 0;
714 nv_adma_register_mode(ap);
2dcb407e 715 } else {
fbbb262d
RH
716 segment_boundary = NV_ADMA_DMA_BOUNDARY;
717 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 718 adma_enable = 1;
fbbb262d 719 }
f20b16ff 720
2dec7555
RH
721 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
722
2dcb407e 723 if (ap->port_no == 1)
2dec7555
RH
724 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
725 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
726 else
727 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
728 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 729
2dcb407e 730 if (adma_enable) {
2dec7555
RH
731 new_reg = current_reg | config_mask;
732 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
2dcb407e 733 } else {
2dec7555
RH
734 new_reg = current_reg & ~config_mask;
735 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
736 }
f20b16ff 737
2dcb407e 738 if (current_reg != new_reg)
2dec7555 739 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 740
8959d300
RH
741 port0 = ap->host->ports[0]->private_data;
742 port1 = ap->host->ports[1]->private_data;
743 sdev0 = ap->host->ports[0]->link.device[0].sdev;
744 sdev1 = ap->host->ports[1]->link.device[0].sdev;
745 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
746 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
747 /** We have to set the DMA mask to 32-bit if either port is in
748 ATAPI mode, since they are on the same PCI device which is
749 used for DMA mapping. If we set the mask we also need to set
750 the bounce limit on both ports to ensure that the block
751 layer doesn't feed addresses that cause DMA mapping to
752 choke. If either SCSI device is not allocated yet, it's OK
753 since that port will discover its correct setting when it
754 does get allocated.
755 Note: Setting 32-bit mask should not fail. */
756 if (sdev0)
757 blk_queue_bounce_limit(sdev0->request_queue,
758 ATA_DMA_MASK);
759 if (sdev1)
760 blk_queue_bounce_limit(sdev1->request_queue,
761 ATA_DMA_MASK);
762
763 pci_set_dma_mask(pdev, ATA_DMA_MASK);
764 } else {
765 /** This shouldn't fail as it was set to this value before */
766 pci_set_dma_mask(pdev, pp->adma_dma_mask);
767 if (sdev0)
768 blk_queue_bounce_limit(sdev0->request_queue,
769 pp->adma_dma_mask);
770 if (sdev1)
771 blk_queue_bounce_limit(sdev1->request_queue,
772 pp->adma_dma_mask);
773 }
774
fbbb262d 775 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
8a78362c 776 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
fbbb262d 777 ata_port_printk(ap, KERN_INFO,
8959d300
RH
778 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
779 (unsigned long long)*ap->host->dev->dma_mask,
780 segment_boundary, sg_tablesize);
781
782 spin_unlock_irqrestore(ap->lock, flags);
783
fbbb262d
RH
784 return rc;
785}
786
2dec7555
RH
787static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
788{
789 struct nv_adma_port_priv *pp = qc->ap->private_data;
790 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
791}
792
f2fb344b
RH
793static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
794{
3f3debdb
RH
795 /* Other than when internal or pass-through commands are executed,
796 the only time this function will be called in ADMA mode will be
797 if a command fails. In the failure case we don't care about going
798 into register mode with ADMA commands pending, as the commands will
799 all shortly be aborted anyway. We assume that NCQ commands are not
800 issued via passthrough, which is the only way that switching into
801 ADMA mode could abort outstanding commands. */
f2fb344b
RH
802 nv_adma_register_mode(ap);
803
9363c382 804 ata_sff_tf_read(ap, tf);
f2fb344b
RH
805}
806
2dec7555 807static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
808{
809 unsigned int idx = 0;
810
2dcb407e 811 if (tf->flags & ATA_TFLAG_ISADDR) {
ac3d6b86
RH
812 if (tf->flags & ATA_TFLAG_LBA48) {
813 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
814 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
815 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
816 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
817 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
818 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
819 } else
820 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
a84471fe 821
ac3d6b86
RH
822 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
823 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
824 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
825 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
fbbb262d 826 }
a84471fe 827
2dcb407e 828 if (tf->flags & ATA_TFLAG_DEVICE)
ac3d6b86 829 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
fbbb262d
RH
830
831 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
a84471fe 832
2dcb407e 833 while (idx < 12)
ac3d6b86 834 cpb[idx++] = cpu_to_le16(IGN);
fbbb262d
RH
835
836 return idx;
837}
838
5bd28a4b 839static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
840{
841 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 842 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
843
844 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
845
5bd28a4b
RH
846 if (unlikely((force_err ||
847 flags & (NV_CPB_RESP_ATA_ERR |
848 NV_CPB_RESP_CMD_ERR |
849 NV_CPB_RESP_CPB_ERR)))) {
9af5c9c9 850 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
851 int freeze = 0;
852
853 ata_ehi_clear_desc(ehi);
2dcb407e 854 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
5bd28a4b 855 if (flags & NV_CPB_RESP_ATA_ERR) {
b64bbc39 856 ata_ehi_push_desc(ehi, "ATA error");
5bd28a4b
RH
857 ehi->err_mask |= AC_ERR_DEV;
858 } else if (flags & NV_CPB_RESP_CMD_ERR) {
b64bbc39 859 ata_ehi_push_desc(ehi, "CMD error");
5bd28a4b
RH
860 ehi->err_mask |= AC_ERR_DEV;
861 } else if (flags & NV_CPB_RESP_CPB_ERR) {
b64bbc39 862 ata_ehi_push_desc(ehi, "CPB error");
5bd28a4b
RH
863 ehi->err_mask |= AC_ERR_SYSTEM;
864 freeze = 1;
865 } else {
866 /* notifier error, but no error in CPB flags? */
b64bbc39 867 ata_ehi_push_desc(ehi, "unknown");
5bd28a4b
RH
868 ehi->err_mask |= AC_ERR_OTHER;
869 freeze = 1;
870 }
871 /* Kill all commands. EH will determine what actually failed. */
872 if (freeze)
873 ata_port_freeze(ap);
874 else
875 ata_port_abort(ap);
876 return 1;
fbbb262d 877 }
5bd28a4b 878
f2fb344b 879 if (likely(flags & NV_CPB_RESP_DONE)) {
fbbb262d 880 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
881 VPRINTK("CPB flags done, flags=0x%x\n", flags);
882 if (likely(qc)) {
2dcb407e 883 DPRINTK("Completing qc from tag %d\n", cpb_num);
fbbb262d 884 ata_qc_complete(qc);
2a54cf76 885 } else {
9af5c9c9 886 struct ata_eh_info *ehi = &ap->link.eh_info;
2a54cf76
RH
887 /* Notifier bits set without a command may indicate the drive
888 is misbehaving. Raise host state machine violation on this
889 condition. */
5796d1c4
JG
890 ata_port_printk(ap, KERN_ERR,
891 "notifier for tag %d with no cmd?\n",
892 cpb_num);
2a54cf76 893 ehi->err_mask |= AC_ERR_HSM;
cf480626 894 ehi->action |= ATA_EH_RESET;
2a54cf76
RH
895 ata_port_freeze(ap);
896 return 1;
fbbb262d
RH
897 }
898 }
5bd28a4b 899 return 0;
fbbb262d
RH
900}
901
2dec7555
RH
902static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
903{
9af5c9c9 904 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2dec7555
RH
905
906 /* freeze if hotplugged */
907 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
908 ata_port_freeze(ap);
909 return 1;
910 }
911
912 /* bail out if not our interrupt */
913 if (!(irq_stat & NV_INT_DEV))
914 return 0;
915
916 /* DEV interrupt w/ no active qc? */
917 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
9363c382 918 ata_sff_check_status(ap);
2dec7555
RH
919 return 1;
920 }
921
922 /* handle interrupt */
c3b28894 923 return ata_bmdma_port_intr(ap, qc);
2dec7555
RH
924}
925
fbbb262d
RH
926static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
927{
928 struct ata_host *host = dev_instance;
929 int i, handled = 0;
2dec7555 930 u32 notifier_clears[2];
fbbb262d
RH
931
932 spin_lock(&host->lock);
933
934 for (i = 0; i < host->n_ports; i++) {
935 struct ata_port *ap = host->ports[i];
3e4ec344
TH
936 struct nv_adma_port_priv *pp = ap->private_data;
937 void __iomem *mmio = pp->ctl_block;
938 u16 status;
939 u32 gen_ctl;
940 u32 notifier, notifier_error;
941
2dec7555 942 notifier_clears[i] = 0;
fbbb262d 943
3e4ec344
TH
944 /* if ADMA is disabled, use standard ata interrupt handler */
945 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
946 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
947 >> (NV_INT_PORT_SHIFT * i);
948 handled += nv_host_intr(ap, irq_stat);
949 continue;
950 }
fbbb262d 951
3e4ec344
TH
952 /* if in ATA register mode, check for standard interrupts */
953 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
954 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
955 >> (NV_INT_PORT_SHIFT * i);
956 if (ata_tag_valid(ap->link.active_tag))
957 /** NV_INT_DEV indication seems unreliable
958 at times at least in ADMA mode. Force it
959 on always when a command is active, to
960 prevent losing interrupts. */
961 irq_stat |= NV_INT_DEV;
962 handled += nv_host_intr(ap, irq_stat);
963 }
964
965 notifier = readl(mmio + NV_ADMA_NOTIFIER);
966 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
967 notifier_clears[i] = notifier | notifier_error;
968
969 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
970
971 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
972 !notifier_error)
973 /* Nothing to do */
974 continue;
975
976 status = readw(mmio + NV_ADMA_STAT);
977
978 /*
979 * Clear status. Ensure the controller sees the
980 * clearing before we start looking at any of the CPB
981 * statuses, so that any CPB completions after this
982 * point in the handler will raise another interrupt.
983 */
984 writew(status, mmio + NV_ADMA_STAT);
985 readw(mmio + NV_ADMA_STAT); /* flush posted write */
986 rmb();
fbbb262d 987
3e4ec344
TH
988 handled++; /* irq handled if we got here */
989
990 /* freeze if hotplugged or controller error */
991 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
992 NV_ADMA_STAT_HOTUNPLUG |
993 NV_ADMA_STAT_TIMEOUT |
994 NV_ADMA_STAT_SERROR))) {
995 struct ata_eh_info *ehi = &ap->link.eh_info;
996
997 ata_ehi_clear_desc(ehi);
998 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
999 if (status & NV_ADMA_STAT_TIMEOUT) {
1000 ehi->err_mask |= AC_ERR_SYSTEM;
1001 ata_ehi_push_desc(ehi, "timeout");
1002 } else if (status & NV_ADMA_STAT_HOTPLUG) {
1003 ata_ehi_hotplugged(ehi);
1004 ata_ehi_push_desc(ehi, "hotplug");
1005 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1006 ata_ehi_hotplugged(ehi);
1007 ata_ehi_push_desc(ehi, "hot unplug");
1008 } else if (status & NV_ADMA_STAT_SERROR) {
1009 /* let EH analyze SError and figure out cause */
1010 ata_ehi_push_desc(ehi, "SError");
1011 } else
1012 ata_ehi_push_desc(ehi, "unknown");
1013 ata_port_freeze(ap);
1014 continue;
1015 }
1016
1017 if (status & (NV_ADMA_STAT_DONE |
1018 NV_ADMA_STAT_CPBERR |
1019 NV_ADMA_STAT_CMD_COMPLETE)) {
1020 u32 check_commands = notifier_clears[i];
752e386c 1021 int pos, rc;
3e4ec344
TH
1022
1023 if (status & NV_ADMA_STAT_CPBERR) {
1024 /* check all active commands */
1025 if (ata_tag_valid(ap->link.active_tag))
1026 check_commands = 1 <<
1027 ap->link.active_tag;
1028 else
1029 check_commands = ap->link.sactive;
fbbb262d
RH
1030 }
1031
3e4ec344 1032 /* check CPBs for completed commands */
752e386c 1033 while ((pos = ffs(check_commands))) {
3e4ec344 1034 pos--;
752e386c 1035 rc = nv_adma_check_cpb(ap, pos,
5796d1c4 1036 notifier_error & (1 << pos));
752e386c
TH
1037 if (unlikely(rc))
1038 check_commands = 0;
3e4ec344 1039 check_commands &= ~(1 << pos);
fbbb262d 1040 }
fbbb262d
RH
1041 }
1042 }
f20b16ff 1043
b447916e 1044 if (notifier_clears[0] || notifier_clears[1]) {
2dec7555
RH
1045 /* Note: Both notifier clear registers must be written
1046 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
1047 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1048 writel(notifier_clears[0], pp->notifier_clear_block);
1049 pp = host->ports[1]->private_data;
1050 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 1051 }
fbbb262d
RH
1052
1053 spin_unlock(&host->lock);
1054
1055 return IRQ_RETVAL(handled);
1056}
1057
53014e25
RH
1058static void nv_adma_freeze(struct ata_port *ap)
1059{
1060 struct nv_adma_port_priv *pp = ap->private_data;
1061 void __iomem *mmio = pp->ctl_block;
1062 u16 tmp;
1063
1064 nv_ck804_freeze(ap);
1065
1066 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1067 return;
1068
1069 /* clear any outstanding CK804 notifications */
2dcb407e 1070 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25
RH
1071 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1072
1073 /* Disable interrupt */
1074 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1075 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1076 mmio + NV_ADMA_CTL);
5796d1c4 1077 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1078}
1079
1080static void nv_adma_thaw(struct ata_port *ap)
1081{
1082 struct nv_adma_port_priv *pp = ap->private_data;
1083 void __iomem *mmio = pp->ctl_block;
1084 u16 tmp;
1085
1086 nv_ck804_thaw(ap);
1087
1088 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1089 return;
1090
1091 /* Enable interrupt */
1092 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1093 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1094 mmio + NV_ADMA_CTL);
5796d1c4 1095 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1096}
1097
fbbb262d
RH
1098static void nv_adma_irq_clear(struct ata_port *ap)
1099{
cdf56bcf
RH
1100 struct nv_adma_port_priv *pp = ap->private_data;
1101 void __iomem *mmio = pp->ctl_block;
53014e25 1102 u32 notifier_clears[2];
fbbb262d 1103
53014e25 1104 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
37f65b8b 1105 ata_bmdma_irq_clear(ap);
53014e25
RH
1106 return;
1107 }
1108
1109 /* clear any outstanding CK804 notifications */
2dcb407e 1110 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25 1111 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
fbbb262d 1112
53014e25
RH
1113 /* clear ADMA status */
1114 writew(0xffff, mmio + NV_ADMA_STAT);
a617c09f 1115
53014e25
RH
1116 /* clear notifiers - note both ports need to be written with
1117 something even though we are only clearing on one */
1118 if (ap->port_no == 0) {
1119 notifier_clears[0] = 0xFFFFFFFF;
1120 notifier_clears[1] = 0;
1121 } else {
1122 notifier_clears[0] = 0;
1123 notifier_clears[1] = 0xFFFFFFFF;
1124 }
1125 pp = ap->host->ports[0]->private_data;
1126 writel(notifier_clears[0], pp->notifier_clear_block);
1127 pp = ap->host->ports[1]->private_data;
1128 writel(notifier_clears[1], pp->notifier_clear_block);
fbbb262d
RH
1129}
1130
f5ecac2d 1131static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
fbbb262d 1132{
f5ecac2d 1133 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d 1134
b447916e 1135 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
fe06e5f9 1136 ata_bmdma_post_internal_cmd(qc);
fbbb262d
RH
1137}
1138
1139static int nv_adma_port_start(struct ata_port *ap)
1140{
1141 struct device *dev = ap->host->dev;
1142 struct nv_adma_port_priv *pp;
1143 int rc;
1144 void *mem;
1145 dma_addr_t mem_dma;
cdf56bcf 1146 void __iomem *mmio;
8959d300 1147 struct pci_dev *pdev = to_pci_dev(dev);
fbbb262d
RH
1148 u16 tmp;
1149
1150 VPRINTK("ENTER\n");
1151
8959d300
RH
1152 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1153 pad buffers */
1154 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1155 if (rc)
1156 return rc;
1157 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1158 if (rc)
1159 return rc;
1160
c7087652
TH
1161 /* we might fallback to bmdma, allocate bmdma resources */
1162 rc = ata_bmdma_port_start(ap);
fbbb262d
RH
1163 if (rc)
1164 return rc;
1165
24dc5f33
TH
1166 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1167 if (!pp)
1168 return -ENOMEM;
fbbb262d 1169
0d5ff566 1170 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
1171 ap->port_no * NV_ADMA_PORT_SIZE;
1172 pp->ctl_block = mmio;
0d5ff566 1173 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
1174 pp->notifier_clear_block = pp->gen_block +
1175 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1176
8959d300
RH
1177 /* Now that the legacy PRD and padding buffer are allocated we can
1178 safely raise the DMA mask to allocate the CPB/APRD table.
1179 These are allowed to fail since we store the value that ends up
1180 being used to set as the bounce limit in slave_config later if
1181 needed. */
1182 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1183 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1184 pp->adma_dma_mask = *dev->dma_mask;
1185
24dc5f33
TH
1186 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1187 &mem_dma, GFP_KERNEL);
1188 if (!mem)
1189 return -ENOMEM;
fbbb262d
RH
1190 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1191
1192 /*
1193 * First item in chunk of DMA memory:
1194 * 128-byte command parameter block (CPB)
1195 * one for each command tag
1196 */
1197 pp->cpb = mem;
1198 pp->cpb_dma = mem_dma;
1199
1200 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1201 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
fbbb262d
RH
1202
1203 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1204 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1205
1206 /*
1207 * Second item: block of ADMA_SGTBL_LEN s/g entries
1208 */
1209 pp->aprd = mem;
1210 pp->aprd_dma = mem_dma;
1211
1212 ap->private_data = pp;
1213
1214 /* clear any outstanding interrupt conditions */
1215 writew(0xffff, mmio + NV_ADMA_STAT);
1216
1217 /* initialize port variables */
1218 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1219
1220 /* clear CPB fetch count */
1221 writew(0, mmio + NV_ADMA_CPB_COUNT);
1222
cdf56bcf 1223 /* clear GO for register mode, enable interrupt */
fbbb262d 1224 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1225 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1226 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1227
1228 tmp = readw(mmio + NV_ADMA_CTL);
1229 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1230 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1231 udelay(1);
1232 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1233 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1234
1235 return 0;
fbbb262d
RH
1236}
1237
1238static void nv_adma_port_stop(struct ata_port *ap)
1239{
fbbb262d 1240 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1241 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1242
1243 VPRINTK("ENTER\n");
fbbb262d 1244 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1245}
1246
438ac6d5 1247#ifdef CONFIG_PM
cdf56bcf
RH
1248static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1249{
1250 struct nv_adma_port_priv *pp = ap->private_data;
1251 void __iomem *mmio = pp->ctl_block;
1252
1253 /* Go to register mode - clears GO */
1254 nv_adma_register_mode(ap);
1255
1256 /* clear CPB fetch count */
1257 writew(0, mmio + NV_ADMA_CPB_COUNT);
1258
1259 /* disable interrupt, shut down port */
1260 writew(0, mmio + NV_ADMA_CTL);
1261
1262 return 0;
1263}
1264
1265static int nv_adma_port_resume(struct ata_port *ap)
1266{
1267 struct nv_adma_port_priv *pp = ap->private_data;
1268 void __iomem *mmio = pp->ctl_block;
1269 u16 tmp;
1270
1271 /* set CPB block location */
1272 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1273 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
cdf56bcf
RH
1274
1275 /* clear any outstanding interrupt conditions */
1276 writew(0xffff, mmio + NV_ADMA_STAT);
1277
1278 /* initialize port variables */
1279 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1280
1281 /* clear CPB fetch count */
1282 writew(0, mmio + NV_ADMA_CPB_COUNT);
1283
1284 /* clear GO for register mode, enable interrupt */
1285 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1286 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1287 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
cdf56bcf
RH
1288
1289 tmp = readw(mmio + NV_ADMA_CTL);
1290 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1291 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1292 udelay(1);
1293 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1294 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1295
1296 return 0;
1297}
438ac6d5 1298#endif
fbbb262d 1299
9a829ccf 1300static void nv_adma_setup_port(struct ata_port *ap)
fbbb262d 1301{
9a829ccf
TH
1302 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1303 struct ata_ioports *ioport = &ap->ioaddr;
fbbb262d
RH
1304
1305 VPRINTK("ENTER\n");
1306
9a829ccf 1307 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
fbbb262d 1308
0d5ff566
TH
1309 ioport->cmd_addr = mmio;
1310 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1311 ioport->error_addr =
0d5ff566
TH
1312 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1313 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1314 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1315 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1316 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1317 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1318 ioport->status_addr =
0d5ff566 1319 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1320 ioport->altstatus_addr =
0d5ff566 1321 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1322}
1323
9a829ccf 1324static int nv_adma_host_init(struct ata_host *host)
fbbb262d 1325{
9a829ccf 1326 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1327 unsigned int i;
1328 u32 tmp32;
1329
1330 VPRINTK("ENTER\n");
1331
1332 /* enable ADMA on the ports */
1333 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1334 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1335 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1336 NV_MCP_SATA_CFG_20_PORT1_EN |
1337 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1338
1339 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1340
9a829ccf
TH
1341 for (i = 0; i < host->n_ports; i++)
1342 nv_adma_setup_port(host->ports[i]);
fbbb262d 1343
fbbb262d
RH
1344 return 0;
1345}
1346
1347static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1348 struct scatterlist *sg,
1349 int idx,
1350 struct nv_adma_prd *aprd)
1351{
41949ed5 1352 u8 flags = 0;
fbbb262d
RH
1353 if (qc->tf.flags & ATA_TFLAG_WRITE)
1354 flags |= NV_APRD_WRITE;
1355 if (idx == qc->n_elem - 1)
1356 flags |= NV_APRD_END;
1357 else if (idx != 4)
1358 flags |= NV_APRD_CONT;
1359
1360 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1361 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1362 aprd->flags = flags;
41949ed5 1363 aprd->packet_len = 0;
fbbb262d
RH
1364}
1365
1366static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1367{
1368 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1369 struct nv_adma_prd *aprd;
1370 struct scatterlist *sg;
ff2aeb1e 1371 unsigned int si;
fbbb262d
RH
1372
1373 VPRINTK("ENTER\n");
1374
ff2aeb1e
TH
1375 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1376 aprd = (si < 5) ? &cpb->aprd[si] :
1377 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1378 nv_adma_fill_aprd(qc, sg, si, aprd);
fbbb262d 1379 }
ff2aeb1e 1380 if (si > 5)
fbbb262d 1381 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
41949ed5
RH
1382 else
1383 cpb->next_aprd = cpu_to_le64(0);
fbbb262d
RH
1384}
1385
382a6652
RH
1386static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1387{
1388 struct nv_adma_port_priv *pp = qc->ap->private_data;
1389
1390 /* ADMA engine can only be used for non-ATAPI DMA commands,
3f3debdb 1391 or interrupt-driven no-data commands. */
b447916e 1392 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
3f3debdb 1393 (qc->tf.flags & ATA_TFLAG_POLLING))
382a6652
RH
1394 return 1;
1395
b447916e 1396 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
382a6652
RH
1397 (qc->tf.protocol == ATA_PROT_NODATA))
1398 return 0;
1399
1400 return 1;
1401}
1402
fbbb262d
RH
1403static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1404{
1405 struct nv_adma_port_priv *pp = qc->ap->private_data;
1406 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1407 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1408 NV_CPB_CTL_IEN;
1409
382a6652 1410 if (nv_adma_use_reg_mode(qc)) {
3f3debdb
RH
1411 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1412 (qc->flags & ATA_QCFLAG_DMAMAP));
2dec7555 1413 nv_adma_register_mode(qc->ap);
f47451c4 1414 ata_bmdma_qc_prep(qc);
fbbb262d
RH
1415 return;
1416 }
1417
41949ed5
RH
1418 cpb->resp_flags = NV_CPB_RESP_DONE;
1419 wmb();
1420 cpb->ctl_flags = 0;
1421 wmb();
fbbb262d
RH
1422
1423 cpb->len = 3;
1424 cpb->tag = qc->tag;
1425 cpb->next_cpb_idx = 0;
1426
1427 /* turn on NCQ flags for NCQ commands */
1428 if (qc->tf.protocol == ATA_PROT_NCQ)
1429 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1430
cdf56bcf
RH
1431 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1432
fbbb262d
RH
1433 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1434
b447916e 1435 if (qc->flags & ATA_QCFLAG_DMAMAP) {
382a6652
RH
1436 nv_adma_fill_sg(qc, cpb);
1437 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1438 } else
1439 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d 1440
5796d1c4
JG
1441 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1442 until we are finished filling in all of the contents */
fbbb262d
RH
1443 wmb();
1444 cpb->ctl_flags = ctl_flags;
41949ed5
RH
1445 wmb();
1446 cpb->resp_flags = 0;
fbbb262d
RH
1447}
1448
1449static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1450{
2dec7555 1451 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1452 void __iomem *mmio = pp->ctl_block;
5e5c74a5 1453 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
fbbb262d
RH
1454
1455 VPRINTK("ENTER\n");
1456
3f3debdb
RH
1457 /* We can't handle result taskfile with NCQ commands, since
1458 retrieving the taskfile switches us out of ADMA mode and would abort
1459 existing commands. */
1460 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1461 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1462 ata_dev_printk(qc->dev, KERN_ERR,
1463 "NCQ w/ RESULT_TF not allowed\n");
1464 return AC_ERR_SYSTEM;
1465 }
1466
382a6652 1467 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1468 /* use ATA register mode */
382a6652 1469 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
3f3debdb
RH
1470 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1471 (qc->flags & ATA_QCFLAG_DMAMAP));
fbbb262d 1472 nv_adma_register_mode(qc->ap);
360ff783 1473 return ata_bmdma_qc_issue(qc);
fbbb262d
RH
1474 } else
1475 nv_adma_mode(qc->ap);
1476
1477 /* write append register, command tag in lower 8 bits
1478 and (number of cpbs to append -1) in top 8 bits */
1479 wmb();
5e5c74a5 1480
b447916e 1481 if (curr_ncq != pp->last_issue_ncq) {
5796d1c4
JG
1482 /* Seems to need some delay before switching between NCQ and
1483 non-NCQ commands, else we get command timeouts and such. */
5e5c74a5
RH
1484 udelay(20);
1485 pp->last_issue_ncq = curr_ncq;
1486 }
1487
fbbb262d
RH
1488 writew(qc->tag, mmio + NV_ADMA_APPEND);
1489
5796d1c4 1490 DPRINTK("Issued tag %u\n", qc->tag);
fbbb262d
RH
1491
1492 return 0;
1493}
1494
7d12e780 1495static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1496{
cca3974e 1497 struct ata_host *host = dev_instance;
1da177e4
LT
1498 unsigned int i;
1499 unsigned int handled = 0;
1500 unsigned long flags;
1501
cca3974e 1502 spin_lock_irqsave(&host->lock, flags);
1da177e4 1503
cca3974e 1504 for (i = 0; i < host->n_ports; i++) {
3e4ec344
TH
1505 struct ata_port *ap = host->ports[i];
1506 struct ata_queued_cmd *qc;
1da177e4 1507
3e4ec344
TH
1508 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1509 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
c3b28894 1510 handled += ata_bmdma_port_intr(ap, qc);
3e4ec344
TH
1511 } else {
1512 /*
1513 * No request pending? Clear interrupt status
1514 * anyway, in case there's one pending.
1515 */
1516 ap->ops->sff_check_status(ap);
1517 }
1da177e4
LT
1518 }
1519
cca3974e 1520 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1521
1522 return IRQ_RETVAL(handled);
1523}
1524
cca3974e 1525static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1526{
1527 int i, handled = 0;
1528
cca3974e 1529 for (i = 0; i < host->n_ports; i++) {
3e4ec344 1530 handled += nv_host_intr(host->ports[i], irq_stat);
ada364e8
TH
1531 irq_stat >>= NV_INT_PORT_SHIFT;
1532 }
1533
1534 return IRQ_RETVAL(handled);
1535}
1536
7d12e780 1537static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1538{
cca3974e 1539 struct ata_host *host = dev_instance;
ada364e8
TH
1540 u8 irq_stat;
1541 irqreturn_t ret;
1542
cca3974e 1543 spin_lock(&host->lock);
0d5ff566 1544 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1545 ret = nv_do_interrupt(host, irq_stat);
1546 spin_unlock(&host->lock);
ada364e8
TH
1547
1548 return ret;
1549}
1550
7d12e780 1551static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1552{
cca3974e 1553 struct ata_host *host = dev_instance;
ada364e8
TH
1554 u8 irq_stat;
1555 irqreturn_t ret;
1556
cca3974e 1557 spin_lock(&host->lock);
0d5ff566 1558 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1559 ret = nv_do_interrupt(host, irq_stat);
1560 spin_unlock(&host->lock);
ada364e8
TH
1561
1562 return ret;
1563}
1564
82ef04fb 1565static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1da177e4 1566{
1da177e4 1567 if (sc_reg > SCR_CONTROL)
da3dbb17 1568 return -EINVAL;
1da177e4 1569
82ef04fb 1570 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1571 return 0;
1da177e4
LT
1572}
1573
82ef04fb 1574static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1da177e4 1575{
1da177e4 1576 if (sc_reg > SCR_CONTROL)
da3dbb17 1577 return -EINVAL;
1da177e4 1578
82ef04fb 1579 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1580 return 0;
1da177e4
LT
1581}
1582
7f4774b3
TH
1583static int nv_hardreset(struct ata_link *link, unsigned int *class,
1584 unsigned long deadline)
e8caa3c7 1585{
7f4774b3 1586 struct ata_eh_context *ehc = &link->eh_context;
e8caa3c7 1587
7f4774b3
TH
1588 /* Do hardreset iff it's post-boot probing, please read the
1589 * comment above port ops for details.
1590 */
1591 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1592 !ata_dev_enabled(link->device))
1593 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1594 NULL, NULL);
6489e326
TH
1595 else {
1596 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1597 int rc;
1598
1599 if (!(ehc->i.flags & ATA_EHI_QUIET))
1600 ata_link_printk(link, KERN_INFO, "nv: skipping "
1601 "hardreset on occupied port\n");
1602
1603 /* make sure the link is online */
1604 rc = sata_link_resume(link, timing, deadline);
1605 /* whine about phy resume failure but proceed */
1606 if (rc && rc != -EOPNOTSUPP)
1607 ata_link_printk(link, KERN_WARNING, "failed to resume "
1608 "link (errno=%d)\n", rc);
1609 }
7f4774b3
TH
1610
1611 /* device signature acquisition is unreliable */
1612 return -EAGAIN;
e8caa3c7
TH
1613}
1614
39f87582
TH
1615static void nv_nf2_freeze(struct ata_port *ap)
1616{
0d5ff566 1617 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1618 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1619 u8 mask;
1620
0d5ff566 1621 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1622 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1623 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1624}
1625
1626static void nv_nf2_thaw(struct ata_port *ap)
1627{
0d5ff566 1628 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1629 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1630 u8 mask;
1631
0d5ff566 1632 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1633
0d5ff566 1634 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1635 mask |= (NV_INT_MASK << shift);
0d5ff566 1636 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1637}
1638
1639static void nv_ck804_freeze(struct ata_port *ap)
1640{
0d5ff566 1641 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1642 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1643 u8 mask;
1644
1645 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1646 mask &= ~(NV_INT_ALL << shift);
1647 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1648}
1649
1650static void nv_ck804_thaw(struct ata_port *ap)
1651{
0d5ff566 1652 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1653 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1654 u8 mask;
1655
1656 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1657
1658 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1659 mask |= (NV_INT_MASK << shift);
1660 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1661}
1662
f140f0f1
KL
1663static void nv_mcp55_freeze(struct ata_port *ap)
1664{
1665 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1666 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1667 u32 mask;
1668
1669 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1670
1671 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1672 mask &= ~(NV_INT_ALL_MCP55 << shift);
1673 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
f140f0f1
KL
1674}
1675
1676static void nv_mcp55_thaw(struct ata_port *ap)
1677{
1678 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1679 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1680 u32 mask;
1681
1682 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1683
1684 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1685 mask |= (NV_INT_MASK_MCP55 << shift);
1686 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
f140f0f1
KL
1687}
1688
fbbb262d
RH
1689static void nv_adma_error_handler(struct ata_port *ap)
1690{
1691 struct nv_adma_port_priv *pp = ap->private_data;
b447916e 1692 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1693 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1694 int i;
1695 u16 tmp;
a84471fe 1696
b447916e 1697 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
2cb27853
RH
1698 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1699 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1700 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1701 u32 status = readw(mmio + NV_ADMA_STAT);
08af7414
RH
1702 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1703 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
2cb27853 1704
5796d1c4
JG
1705 ata_port_printk(ap, KERN_ERR,
1706 "EH in ADMA mode, notifier 0x%X "
08af7414
RH
1707 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1708 "next cpb count 0x%X next cpb idx 0x%x\n",
1709 notifier, notifier_error, gen_ctl, status,
1710 cpb_count, next_cpb_idx);
2cb27853 1711
b447916e 1712 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
2cb27853 1713 struct nv_adma_cpb *cpb = &pp->cpb[i];
b447916e 1714 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
5796d1c4 1715 ap->link.sactive & (1 << i))
2cb27853
RH
1716 ata_port_printk(ap, KERN_ERR,
1717 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1718 i, cpb->ctl_flags, cpb->resp_flags);
1719 }
1720 }
fbbb262d 1721
fbbb262d
RH
1722 /* Push us back into port register mode for error handling. */
1723 nv_adma_register_mode(ap);
1724
5796d1c4
JG
1725 /* Mark all of the CPBs as invalid to prevent them from
1726 being executed */
b447916e 1727 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
fbbb262d
RH
1728 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1729
1730 /* clear CPB fetch count */
1731 writew(0, mmio + NV_ADMA_CPB_COUNT);
1732
1733 /* Reset channel */
1734 tmp = readw(mmio + NV_ADMA_CTL);
1735 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1736 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1737 udelay(1);
1738 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1739 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1740 }
1741
fe06e5f9 1742 ata_bmdma_error_handler(ap);
fbbb262d
RH
1743}
1744
f140f0f1
KL
1745static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1746{
1747 struct nv_swncq_port_priv *pp = ap->private_data;
1748 struct defer_queue *dq = &pp->defer_queue;
1749
1750 /* queue is full */
1751 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1752 dq->defer_bits |= (1 << qc->tag);
1753 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1754}
1755
1756static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1757{
1758 struct nv_swncq_port_priv *pp = ap->private_data;
1759 struct defer_queue *dq = &pp->defer_queue;
1760 unsigned int tag;
1761
1762 if (dq->head == dq->tail) /* null queue */
1763 return NULL;
1764
1765 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1766 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1767 WARN_ON(!(dq->defer_bits & (1 << tag)));
1768 dq->defer_bits &= ~(1 << tag);
1769
1770 return ata_qc_from_tag(ap, tag);
1771}
1772
1773static void nv_swncq_fis_reinit(struct ata_port *ap)
1774{
1775 struct nv_swncq_port_priv *pp = ap->private_data;
1776
1777 pp->dhfis_bits = 0;
1778 pp->dmafis_bits = 0;
1779 pp->sdbfis_bits = 0;
1780 pp->ncq_flags = 0;
1781}
1782
1783static void nv_swncq_pp_reinit(struct ata_port *ap)
1784{
1785 struct nv_swncq_port_priv *pp = ap->private_data;
1786 struct defer_queue *dq = &pp->defer_queue;
1787
1788 dq->head = 0;
1789 dq->tail = 0;
1790 dq->defer_bits = 0;
1791 pp->qc_active = 0;
1792 pp->last_issue_tag = ATA_TAG_POISON;
1793 nv_swncq_fis_reinit(ap);
1794}
1795
1796static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1797{
1798 struct nv_swncq_port_priv *pp = ap->private_data;
1799
1800 writew(fis, pp->irq_block);
1801}
1802
1803static void __ata_bmdma_stop(struct ata_port *ap)
1804{
1805 struct ata_queued_cmd qc;
1806
1807 qc.ap = ap;
1808 ata_bmdma_stop(&qc);
1809}
1810
1811static void nv_swncq_ncq_stop(struct ata_port *ap)
1812{
1813 struct nv_swncq_port_priv *pp = ap->private_data;
1814 unsigned int i;
1815 u32 sactive;
1816 u32 done_mask;
1817
1818 ata_port_printk(ap, KERN_ERR,
1819 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1820 ap->qc_active, ap->link.sactive);
1821 ata_port_printk(ap, KERN_ERR,
1822 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1823 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1824 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1825 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1826
1827 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
5682ed33 1828 ap->ops->sff_check_status(ap),
f140f0f1
KL
1829 ioread8(ap->ioaddr.error_addr));
1830
1831 sactive = readl(pp->sactive_block);
1832 done_mask = pp->qc_active ^ sactive;
1833
1834 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1835 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1836 u8 err = 0;
1837 if (pp->qc_active & (1 << i))
1838 err = 0;
1839 else if (done_mask & (1 << i))
1840 err = 1;
1841 else
1842 continue;
1843
1844 ata_port_printk(ap, KERN_ERR,
1845 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1846 (pp->dhfis_bits >> i) & 0x1,
1847 (pp->dmafis_bits >> i) & 0x1,
1848 (pp->sdbfis_bits >> i) & 0x1,
1849 (sactive >> i) & 0x1,
1850 (err ? "error! tag doesn't exit" : " "));
1851 }
1852
1853 nv_swncq_pp_reinit(ap);
5682ed33 1854 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
1855 __ata_bmdma_stop(ap);
1856 nv_swncq_irq_clear(ap, 0xffff);
1857}
1858
1859static void nv_swncq_error_handler(struct ata_port *ap)
1860{
1861 struct ata_eh_context *ehc = &ap->link.eh_context;
1862
1863 if (ap->link.sactive) {
1864 nv_swncq_ncq_stop(ap);
cf480626 1865 ehc->i.action |= ATA_EH_RESET;
f140f0f1
KL
1866 }
1867
fe06e5f9 1868 ata_bmdma_error_handler(ap);
f140f0f1
KL
1869}
1870
1871#ifdef CONFIG_PM
1872static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1873{
1874 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1875 u32 tmp;
1876
1877 /* clear irq */
1878 writel(~0, mmio + NV_INT_STATUS_MCP55);
1879
1880 /* disable irq */
1881 writel(0, mmio + NV_INT_ENABLE_MCP55);
1882
1883 /* disable swncq */
1884 tmp = readl(mmio + NV_CTL_MCP55);
1885 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1886 writel(tmp, mmio + NV_CTL_MCP55);
1887
1888 return 0;
1889}
1890
1891static int nv_swncq_port_resume(struct ata_port *ap)
1892{
1893 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1894 u32 tmp;
1895
1896 /* clear irq */
1897 writel(~0, mmio + NV_INT_STATUS_MCP55);
1898
1899 /* enable irq */
1900 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1901
1902 /* enable swncq */
1903 tmp = readl(mmio + NV_CTL_MCP55);
1904 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1905
1906 return 0;
1907}
1908#endif
1909
1910static void nv_swncq_host_init(struct ata_host *host)
1911{
1912 u32 tmp;
1913 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1914 struct pci_dev *pdev = to_pci_dev(host->dev);
1915 u8 regval;
1916
1917 /* disable ECO 398 */
1918 pci_read_config_byte(pdev, 0x7f, &regval);
1919 regval &= ~(1 << 7);
1920 pci_write_config_byte(pdev, 0x7f, regval);
1921
1922 /* enable swncq */
1923 tmp = readl(mmio + NV_CTL_MCP55);
1924 VPRINTK("HOST_CTL:0x%X\n", tmp);
1925 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1926
1927 /* enable irq intr */
1928 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1929 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1930 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1931
1932 /* clear port irq */
1933 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1934}
1935
1936static int nv_swncq_slave_config(struct scsi_device *sdev)
1937{
1938 struct ata_port *ap = ata_shost_to_port(sdev->host);
1939 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1940 struct ata_device *dev;
1941 int rc;
1942 u8 rev;
1943 u8 check_maxtor = 0;
1944 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1945
1946 rc = ata_scsi_slave_config(sdev);
1947 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1948 /* Not a proper libata device, ignore */
1949 return rc;
1950
1951 dev = &ap->link.device[sdev->id];
1952 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1953 return rc;
1954
1955 /* if MCP51 and Maxtor, then disable ncq */
1956 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1957 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1958 check_maxtor = 1;
1959
1960 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1961 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1962 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1963 pci_read_config_byte(pdev, 0x8, &rev);
1964 if (rev <= 0xa2)
1965 check_maxtor = 1;
1966 }
1967
1968 if (!check_maxtor)
1969 return rc;
1970
1971 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1972
1973 if (strncmp(model_num, "Maxtor", 6) == 0) {
e881a172 1974 ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
f140f0f1
KL
1975 ata_dev_printk(dev, KERN_NOTICE,
1976 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1977 }
1978
1979 return rc;
1980}
1981
1982static int nv_swncq_port_start(struct ata_port *ap)
1983{
1984 struct device *dev = ap->host->dev;
1985 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1986 struct nv_swncq_port_priv *pp;
1987 int rc;
1988
c7087652
TH
1989 /* we might fallback to bmdma, allocate bmdma resources */
1990 rc = ata_bmdma_port_start(ap);
f140f0f1
KL
1991 if (rc)
1992 return rc;
1993
1994 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1995 if (!pp)
1996 return -ENOMEM;
1997
1998 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1999 &pp->prd_dma, GFP_KERNEL);
2000 if (!pp->prd)
2001 return -ENOMEM;
2002 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2003
2004 ap->private_data = pp;
2005 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2006 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2007 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2008
2009 return 0;
2010}
2011
2012static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2013{
2014 if (qc->tf.protocol != ATA_PROT_NCQ) {
f47451c4 2015 ata_bmdma_qc_prep(qc);
f140f0f1
KL
2016 return;
2017 }
2018
2019 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2020 return;
2021
2022 nv_swncq_fill_sg(qc);
2023}
2024
2025static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2026{
2027 struct ata_port *ap = qc->ap;
2028 struct scatterlist *sg;
f140f0f1 2029 struct nv_swncq_port_priv *pp = ap->private_data;
f60d7011 2030 struct ata_bmdma_prd *prd;
ff2aeb1e 2031 unsigned int si, idx;
f140f0f1
KL
2032
2033 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2034
2035 idx = 0;
ff2aeb1e 2036 for_each_sg(qc->sg, sg, qc->n_elem, si) {
f140f0f1
KL
2037 u32 addr, offset;
2038 u32 sg_len, len;
2039
2040 addr = (u32)sg_dma_address(sg);
2041 sg_len = sg_dma_len(sg);
2042
2043 while (sg_len) {
2044 offset = addr & 0xffff;
2045 len = sg_len;
2046 if ((offset + sg_len) > 0x10000)
2047 len = 0x10000 - offset;
2048
2049 prd[idx].addr = cpu_to_le32(addr);
2050 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2051
2052 idx++;
2053 sg_len -= len;
2054 addr += len;
2055 }
2056 }
2057
ff2aeb1e 2058 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
f140f0f1
KL
2059}
2060
2061static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2062 struct ata_queued_cmd *qc)
2063{
2064 struct nv_swncq_port_priv *pp = ap->private_data;
2065
2066 if (qc == NULL)
2067 return 0;
2068
2069 DPRINTK("Enter\n");
2070
2071 writel((1 << qc->tag), pp->sactive_block);
2072 pp->last_issue_tag = qc->tag;
2073 pp->dhfis_bits &= ~(1 << qc->tag);
2074 pp->dmafis_bits &= ~(1 << qc->tag);
2075 pp->qc_active |= (0x1 << qc->tag);
2076
5682ed33
TH
2077 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2078 ap->ops->sff_exec_command(ap, &qc->tf);
f140f0f1
KL
2079
2080 DPRINTK("Issued tag %u\n", qc->tag);
2081
2082 return 0;
2083}
2084
2085static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2086{
2087 struct ata_port *ap = qc->ap;
2088 struct nv_swncq_port_priv *pp = ap->private_data;
2089
2090 if (qc->tf.protocol != ATA_PROT_NCQ)
360ff783 2091 return ata_bmdma_qc_issue(qc);
f140f0f1
KL
2092
2093 DPRINTK("Enter\n");
2094
2095 if (!pp->qc_active)
2096 nv_swncq_issue_atacmd(ap, qc);
2097 else
2098 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2099
2100 return 0;
2101}
2102
2103static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2104{
2105 u32 serror;
2106 struct ata_eh_info *ehi = &ap->link.eh_info;
2107
2108 ata_ehi_clear_desc(ehi);
2109
2110 /* AHCI needs SError cleared; otherwise, it might lock up */
2111 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2112 sata_scr_write(&ap->link, SCR_ERROR, serror);
2113
2114 /* analyze @irq_stat */
2115 if (fis & NV_SWNCQ_IRQ_ADDED)
2116 ata_ehi_push_desc(ehi, "hot plug");
2117 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2118 ata_ehi_push_desc(ehi, "hot unplug");
2119
2120 ata_ehi_hotplugged(ehi);
2121
2122 /* okay, let's hand over to EH */
2123 ehi->serror |= serror;
2124
2125 ata_port_freeze(ap);
2126}
2127
2128static int nv_swncq_sdbfis(struct ata_port *ap)
2129{
2130 struct ata_queued_cmd *qc;
2131 struct nv_swncq_port_priv *pp = ap->private_data;
2132 struct ata_eh_info *ehi = &ap->link.eh_info;
2133 u32 sactive;
f140f0f1
KL
2134 u32 done_mask;
2135 int i;
2136 u8 host_stat;
2137 u8 lack_dhfis = 0;
2138
2139 host_stat = ap->ops->bmdma_status(ap);
2140 if (unlikely(host_stat & ATA_DMA_ERR)) {
2141 /* error when transfering data to/from memory */
2142 ata_ehi_clear_desc(ehi);
2143 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2144 ehi->err_mask |= AC_ERR_HOST_BUS;
cf480626 2145 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2146 return -EINVAL;
2147 }
2148
5682ed33 2149 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
2150 __ata_bmdma_stop(ap);
2151
2152 sactive = readl(pp->sactive_block);
2153 done_mask = pp->qc_active ^ sactive;
2154
2155 if (unlikely(done_mask & sactive)) {
2156 ata_ehi_clear_desc(ehi);
2157 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2158 "(%08x->%08x)", pp->qc_active, sactive);
2159 ehi->err_mask |= AC_ERR_HSM;
cf480626 2160 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2161 return -EINVAL;
2162 }
2163 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2164 if (!(done_mask & (1 << i)))
2165 continue;
2166
2167 qc = ata_qc_from_tag(ap, i);
2168 if (qc) {
2169 ata_qc_complete(qc);
2170 pp->qc_active &= ~(1 << i);
2171 pp->dhfis_bits &= ~(1 << i);
2172 pp->dmafis_bits &= ~(1 << i);
2173 pp->sdbfis_bits |= (1 << i);
f140f0f1
KL
2174 }
2175 }
2176
2177 if (!ap->qc_active) {
2178 DPRINTK("over\n");
2179 nv_swncq_pp_reinit(ap);
752e386c 2180 return 0;
f140f0f1
KL
2181 }
2182
2183 if (pp->qc_active & pp->dhfis_bits)
752e386c 2184 return 0;
f140f0f1
KL
2185
2186 if ((pp->ncq_flags & ncq_saw_backout) ||
2187 (pp->qc_active ^ pp->dhfis_bits))
752e386c 2188 /* if the controller can't get a device to host register FIS,
f140f0f1
KL
2189 * The driver needs to reissue the new command.
2190 */
2191 lack_dhfis = 1;
2192
2193 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2194 "SWNCQ:qc_active 0x%X defer_bits %X "
2195 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2196 ap->print_id, ap->qc_active, pp->qc_active,
2197 pp->defer_queue.defer_bits, pp->dhfis_bits,
2198 pp->dmafis_bits, pp->last_issue_tag);
2199
2200 nv_swncq_fis_reinit(ap);
2201
2202 if (lack_dhfis) {
2203 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2204 nv_swncq_issue_atacmd(ap, qc);
752e386c 2205 return 0;
f140f0f1
KL
2206 }
2207
2208 if (pp->defer_queue.defer_bits) {
2209 /* send deferral queue command */
2210 qc = nv_swncq_qc_from_dq(ap);
2211 WARN_ON(qc == NULL);
2212 nv_swncq_issue_atacmd(ap, qc);
2213 }
2214
752e386c 2215 return 0;
f140f0f1
KL
2216}
2217
2218static inline u32 nv_swncq_tag(struct ata_port *ap)
2219{
2220 struct nv_swncq_port_priv *pp = ap->private_data;
2221 u32 tag;
2222
2223 tag = readb(pp->tag_block) >> 2;
2224 return (tag & 0x1f);
2225}
2226
752e386c 2227static void nv_swncq_dmafis(struct ata_port *ap)
f140f0f1
KL
2228{
2229 struct ata_queued_cmd *qc;
2230 unsigned int rw;
2231 u8 dmactl;
2232 u32 tag;
2233 struct nv_swncq_port_priv *pp = ap->private_data;
2234
2235 __ata_bmdma_stop(ap);
2236 tag = nv_swncq_tag(ap);
2237
2238 DPRINTK("dma setup tag 0x%x\n", tag);
2239 qc = ata_qc_from_tag(ap, tag);
2240
2241 if (unlikely(!qc))
752e386c 2242 return;
f140f0f1
KL
2243
2244 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2245
2246 /* load PRD table addr. */
2247 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2248 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2249
2250 /* specify data direction, triple-check start bit is clear */
2251 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2252 dmactl &= ~ATA_DMA_WR;
2253 if (!rw)
2254 dmactl |= ATA_DMA_WR;
2255
2256 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
f140f0f1
KL
2257}
2258
2259static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2260{
2261 struct nv_swncq_port_priv *pp = ap->private_data;
2262 struct ata_queued_cmd *qc;
2263 struct ata_eh_info *ehi = &ap->link.eh_info;
2264 u32 serror;
2265 u8 ata_stat;
f140f0f1 2266
5682ed33 2267 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2268 nv_swncq_irq_clear(ap, fis);
2269 if (!fis)
2270 return;
2271
2272 if (ap->pflags & ATA_PFLAG_FROZEN)
2273 return;
2274
2275 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2276 nv_swncq_hotplug(ap, fis);
2277 return;
2278 }
2279
2280 if (!pp->qc_active)
2281 return;
2282
82ef04fb 2283 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
f140f0f1 2284 return;
82ef04fb 2285 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
f140f0f1
KL
2286
2287 if (ata_stat & ATA_ERR) {
2288 ata_ehi_clear_desc(ehi);
2289 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2290 ehi->err_mask |= AC_ERR_DEV;
2291 ehi->serror |= serror;
cf480626 2292 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2293 ata_port_freeze(ap);
2294 return;
2295 }
2296
2297 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2298 /* If the IRQ is backout, driver must issue
2299 * the new command again some time later.
2300 */
2301 pp->ncq_flags |= ncq_saw_backout;
2302 }
2303
2304 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2305 pp->ncq_flags |= ncq_saw_sdb;
2306 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2307 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2308 ap->print_id, pp->qc_active, pp->dhfis_bits,
2309 pp->dmafis_bits, readl(pp->sactive_block));
752e386c 2310 if (nv_swncq_sdbfis(ap) < 0)
f140f0f1
KL
2311 goto irq_error;
2312 }
2313
2314 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2315 /* The interrupt indicates the new command
2316 * was transmitted correctly to the drive.
2317 */
2318 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2319 pp->ncq_flags |= ncq_saw_d2h;
2320 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2321 ata_ehi_push_desc(ehi, "illegal fis transaction");
2322 ehi->err_mask |= AC_ERR_HSM;
cf480626 2323 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2324 goto irq_error;
2325 }
2326
2327 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2328 !(pp->ncq_flags & ncq_saw_dmas)) {
5682ed33 2329 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2330 if (ata_stat & ATA_BUSY)
2331 goto irq_exit;
2332
2333 if (pp->defer_queue.defer_bits) {
2334 DPRINTK("send next command\n");
2335 qc = nv_swncq_qc_from_dq(ap);
2336 nv_swncq_issue_atacmd(ap, qc);
2337 }
2338 }
2339 }
2340
2341 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2342 /* program the dma controller with appropriate PRD buffers
2343 * and start the DMA transfer for requested command.
2344 */
2345 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2346 pp->ncq_flags |= ncq_saw_dmas;
752e386c 2347 nv_swncq_dmafis(ap);
f140f0f1
KL
2348 }
2349
2350irq_exit:
2351 return;
2352irq_error:
2353 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2354 ata_port_freeze(ap);
2355 return;
2356}
2357
2358static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2359{
2360 struct ata_host *host = dev_instance;
2361 unsigned int i;
2362 unsigned int handled = 0;
2363 unsigned long flags;
2364 u32 irq_stat;
2365
2366 spin_lock_irqsave(&host->lock, flags);
2367
2368 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2369
2370 for (i = 0; i < host->n_ports; i++) {
2371 struct ata_port *ap = host->ports[i];
2372
3e4ec344
TH
2373 if (ap->link.sactive) {
2374 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2375 handled = 1;
2376 } else {
2377 if (irq_stat) /* reserve Hotplug */
2378 nv_swncq_irq_clear(ap, 0xfff0);
f140f0f1 2379
3e4ec344 2380 handled += nv_host_intr(ap, (u8)irq_stat);
f140f0f1
KL
2381 }
2382 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2383 }
2384
2385 spin_unlock_irqrestore(&host->lock, flags);
2386
2387 return IRQ_RETVAL(handled);
2388}
2389
5796d1c4 2390static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2391{
5796d1c4 2392 static int printed_version;
1626aeb8 2393 const struct ata_port_info *ppi[] = { NULL, NULL };
95947193 2394 struct nv_pi_priv *ipriv;
9a829ccf 2395 struct ata_host *host;
cdf56bcf 2396 struct nv_host_priv *hpriv;
1da177e4
LT
2397 int rc;
2398 u32 bar;
0d5ff566 2399 void __iomem *base;
fbbb262d 2400 unsigned long type = ent->driver_data;
1da177e4
LT
2401
2402 // Make sure this is a SATA controller by counting the number of bars
2403 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2404 // it's an IDE controller and we ignore it.
5796d1c4 2405 for (bar = 0; bar < 6; bar++)
1da177e4
LT
2406 if (pci_resource_start(pdev, bar) == 0)
2407 return -ENODEV;
2408
cdf56bcf 2409 if (!printed_version++)
a9524a76 2410 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 2411
24dc5f33 2412 rc = pcim_enable_device(pdev);
1da177e4 2413 if (rc)
24dc5f33 2414 return rc;
1da177e4 2415
9a829ccf 2416 /* determine type and allocate host */
f140f0f1 2417 if (type == CK804 && adma_enabled) {
fbbb262d
RH
2418 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2419 type = ADMA;
2d775708
TH
2420 } else if (type == MCP5x && swncq_enabled) {
2421 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2422 type = SWNCQ;
360737a9
JG
2423 }
2424
1626aeb8 2425 ppi[0] = &nv_port_info[type];
95947193 2426 ipriv = ppi[0]->private_data;
1c5afdf7 2427 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
9a829ccf
TH
2428 if (rc)
2429 return rc;
1da177e4 2430
24dc5f33 2431 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 2432 if (!hpriv)
24dc5f33 2433 return -ENOMEM;
9a829ccf
TH
2434 hpriv->type = type;
2435 host->private_data = hpriv;
cdf56bcf 2436
9a829ccf
TH
2437 /* request and iomap NV_MMIO_BAR */
2438 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2439 if (rc)
2440 return rc;
1da177e4 2441
9a829ccf
TH
2442 /* configure SCR access */
2443 base = host->iomap[NV_MMIO_BAR];
2444 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2445 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 2446
ada364e8 2447 /* enable SATA space for CK804 */
fbbb262d 2448 if (type >= CK804) {
ada364e8
TH
2449 u8 regval;
2450
2451 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2452 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2453 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2454 }
2455
9a829ccf 2456 /* init ADMA */
fbbb262d 2457 if (type == ADMA) {
9a829ccf 2458 rc = nv_adma_host_init(host);
fbbb262d 2459 if (rc)
24dc5f33 2460 return rc;
360737a9 2461 } else if (type == SWNCQ)
f140f0f1 2462 nv_swncq_host_init(host);
fbbb262d 2463
51c89499
TV
2464 if (msi_enabled) {
2465 dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2466 pci_enable_msi(pdev);
2467 }
2468
9a829ccf 2469 pci_set_master(pdev);
95cc2c70 2470 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
1da177e4
LT
2471}
2472
438ac6d5 2473#ifdef CONFIG_PM
cdf56bcf
RH
2474static int nv_pci_device_resume(struct pci_dev *pdev)
2475{
2476 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2477 struct nv_host_priv *hpriv = host->private_data;
ce053fa8 2478 int rc;
cdf56bcf 2479
ce053fa8 2480 rc = ata_pci_device_do_resume(pdev);
b447916e 2481 if (rc)
ce053fa8 2482 return rc;
cdf56bcf
RH
2483
2484 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
b447916e 2485 if (hpriv->type >= CK804) {
cdf56bcf
RH
2486 u8 regval;
2487
2488 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2489 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2490 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2491 }
b447916e 2492 if (hpriv->type == ADMA) {
cdf56bcf
RH
2493 u32 tmp32;
2494 struct nv_adma_port_priv *pp;
2495 /* enable/disable ADMA on the ports appropriately */
2496 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2497
2498 pp = host->ports[0]->private_data;
b447916e 2499 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2500 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2501 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf
RH
2502 else
2503 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2504 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf 2505 pp = host->ports[1]->private_data;
b447916e 2506 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2507 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2508 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2509 else
2510 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2511 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2512
2513 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2514 }
2515 }
2516
2517 ata_host_resume(host);
2518
2519 return 0;
2520}
438ac6d5 2521#endif
cdf56bcf 2522
cca3974e 2523static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 2524{
cca3974e 2525 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
2526 u8 regval;
2527
2528 /* disable SATA space for CK804 */
2529 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2530 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2531 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
2532}
2533
fbbb262d
RH
2534static void nv_adma_host_stop(struct ata_host *host)
2535{
2536 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
2537 u32 tmp32;
2538
fbbb262d
RH
2539 /* disable ADMA on the ports */
2540 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2541 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2542 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2543 NV_MCP_SATA_CFG_20_PORT1_EN |
2544 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2545
2546 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2547
2548 nv_ck804_host_stop(host);
2549}
2550
1da177e4
LT
2551static int __init nv_init(void)
2552{
b7887196 2553 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
2554}
2555
2556static void __exit nv_exit(void)
2557{
2558 pci_unregister_driver(&nv_pci_driver);
2559}
2560
2561module_init(nv_init);
2562module_exit(nv_exit);
fbbb262d 2563module_param_named(adma, adma_enabled, bool, 0444);
55f784c8 2564MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
f140f0f1 2565module_param_named(swncq, swncq_enabled, bool, 0444);
d21279f4 2566MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
51c89499
TV
2567module_param_named(msi, msi_enabled, bool, 0444);
2568MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
f140f0f1 2569