]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/sh/drivers/pci/pcie-sh7786.c
sh: pci: Toggle configuration accesses on SH7786.
[net-next-2.6.git] / arch / sh / drivers / pci / pcie-sh7786.c
CommitLineData
5713e602
PM
1/*
2 * Low-Level PCI Express Support for the SH7786
3 *
7561f2dd 4 * Copyright (C) 2009 - 2010 Paul Mundt
5713e602
PM
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/pci.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/io.h>
14#include <linux/delay.h>
5a0e3ad6 15#include <linux/slab.h>
5713e602
PM
16#include "pcie-sh7786.h"
17#include <asm/sizes.h>
18
19struct sh7786_pcie_port {
20 struct pci_channel *hose;
21 unsigned int index;
22 int endpoint;
23 int link;
24};
25
26static struct sh7786_pcie_port *sh7786_pcie_ports;
27static unsigned int nr_ports;
28
29static struct sh7786_pcie_hwops {
30 int (*core_init)(void);
31 int (*port_init_hw)(struct sh7786_pcie_port *port);
32} *sh7786_pcie_hwops;
33
7561f2dd 34static struct resource sh7786_pci0_resources[] = {
5713e602 35 {
7561f2dd
PM
36 .name = "PCIe0 IO",
37 .start = 0xfd000000,
38 .end = 0xfd000000 + SZ_8M - 1,
39 .flags = IORESOURCE_IO,
5713e602 40 }, {
7561f2dd
PM
41 .name = "PCIe0 MEM 0",
42 .start = 0xc0000000,
43 .end = 0xc0000000 + SZ_512M - 1,
44 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
5713e602 45 }, {
7561f2dd
PM
46 .name = "PCIe0 MEM 1",
47 .start = 0x10000000,
48 .end = 0x10000000 + SZ_64M - 1,
5713e602 49 .flags = IORESOURCE_MEM,
7561f2dd
PM
50 }, {
51 .name = "PCIe0 MEM 2",
52 .start = 0xfe100000,
53 .end = 0xfe100000 + SZ_1M - 1,
1c3bb387 54 .flags = IORESOURCE_MEM,
5713e602
PM
55 },
56};
57
7561f2dd
PM
58static struct resource sh7786_pci1_resources[] = {
59 {
60 .name = "PCIe1 IO",
61 .start = 0xfd800000,
62 .end = 0xfd800000 + SZ_8M - 1,
63 .flags = IORESOURCE_IO,
64 }, {
65 .name = "PCIe1 MEM 0",
66 .start = 0xa0000000,
67 .end = 0xa0000000 + SZ_512M - 1,
68 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
69 }, {
70 .name = "PCIe1 MEM 1",
71 .start = 0x30000000,
72 .end = 0x30000000 + SZ_256M - 1,
73 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
74 }, {
75 .name = "PCIe1 MEM 2",
76 .start = 0xfe300000,
77 .end = 0xfe300000 + SZ_1M - 1,
1c3bb387 78 .flags = IORESOURCE_MEM,
7561f2dd 79 },
5713e602
PM
80};
81
7561f2dd 82static struct resource sh7786_pci2_resources[] = {
5713e602 83 {
7561f2dd
PM
84 .name = "PCIe2 IO",
85 .start = 0xfc800000,
86 .end = 0xfc800000 + SZ_4M - 1,
f0485193 87 .flags = IORESOURCE_IO,
5713e602 88 }, {
7561f2dd
PM
89 .name = "PCIe2 MEM 0",
90 .start = 0x80000000,
91 .end = 0x80000000 + SZ_512M - 1,
92 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
5713e602 93 }, {
7561f2dd
PM
94 .name = "PCIe2 MEM 1",
95 .start = 0x20000000,
96 .end = 0x20000000 + SZ_256M - 1,
97 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
98 }, {
99 .name = "PCIe2 MEM 2",
100 .start = 0xfcd00000,
101 .end = 0xfcd00000 + SZ_1M - 1,
1c3bb387 102 .flags = IORESOURCE_MEM,
5713e602
PM
103 },
104};
105
106extern struct pci_ops sh7786_pci_ops;
107
7561f2dd
PM
108#define DEFINE_CONTROLLER(start, idx) \
109{ \
110 .pci_ops = &sh7786_pci_ops, \
111 .resources = sh7786_pci##idx##_resources, \
112 .nr_resources = ARRAY_SIZE(sh7786_pci##idx##_resources), \
113 .reg_base = start, \
114 .mem_offset = 0, \
115 .io_offset = 0, \
5713e602
PM
116}
117
118static struct pci_channel sh7786_pci_channels[] = {
119 DEFINE_CONTROLLER(0xfe000000, 0),
120 DEFINE_CONTROLLER(0xfe200000, 1),
121 DEFINE_CONTROLLER(0xfcc00000, 2),
122};
123
124static int phy_wait_for_ack(struct pci_channel *chan)
125{
126 unsigned int timeout = 100;
127
128 while (timeout--) {
129 if (pci_read_reg(chan, SH4A_PCIEPHYADRR) & (1 << BITS_ACK))
130 return 0;
131
132 udelay(100);
133 }
134
135 return -ETIMEDOUT;
136}
137
138static int pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
139{
140 unsigned int timeout = 100;
141
142 while (timeout--) {
143 if ((pci_read_reg(chan, SH4A_PCIEINTR) & mask) == mask)
144 return 0;
145
146 udelay(100);
147 }
148
149 return -ETIMEDOUT;
150}
151
152static void phy_write_reg(struct pci_channel *chan, unsigned int addr,
153 unsigned int lane, unsigned int data)
154{
53178d71 155 unsigned long phyaddr;
5713e602
PM
156
157 phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) +
158 ((addr & 0xff) << BITS_ADR);
159
5713e602
PM
160 /* Set write data */
161 pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR);
162 pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR);
163
164 phy_wait_for_ack(chan);
165
166 /* Clear command */
53178d71 167 pci_write_reg(chan, 0, SH4A_PCIEPHYDOUTR);
5713e602
PM
168 pci_write_reg(chan, 0, SH4A_PCIEPHYADRR);
169
170 phy_wait_for_ack(chan);
5713e602
PM
171}
172
173static int phy_init(struct pci_channel *chan)
174{
53178d71 175 unsigned long ctrl;
5713e602
PM
176 unsigned int timeout = 100;
177
53178d71
PM
178 /* Enable clock */
179 ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
180 ctrl |= (1 << BITS_CKE);
181 pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
182
5713e602
PM
183 /* Initialize the phy */
184 phy_write_reg(chan, 0x60, 0xf, 0x004b008b);
185 phy_write_reg(chan, 0x61, 0xf, 0x00007b41);
186 phy_write_reg(chan, 0x64, 0xf, 0x00ff4f00);
187 phy_write_reg(chan, 0x65, 0xf, 0x09070907);
188 phy_write_reg(chan, 0x66, 0xf, 0x00000010);
189 phy_write_reg(chan, 0x74, 0xf, 0x0007001c);
190 phy_write_reg(chan, 0x79, 0xf, 0x01fc000d);
53178d71 191 phy_write_reg(chan, 0xb0, 0xf, 0x00000610);
5713e602
PM
192
193 /* Deassert Standby */
53178d71
PM
194 phy_write_reg(chan, 0x67, 0x1, 0x00000400);
195
196 /* Disable clock */
197 ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
198 ctrl &= ~(1 << BITS_CKE);
199 pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
5713e602
PM
200
201 while (timeout--) {
202 if (pci_read_reg(chan, SH4A_PCIEPHYSR))
203 return 0;
204
205 udelay(100);
206 }
207
208 return -ETIMEDOUT;
209}
210
2dbfa1e3
PM
211static void pcie_reset(struct sh7786_pcie_port *port)
212{
213 struct pci_channel *chan = port->hose;
214
215 pci_write_reg(chan, 1, SH4A_PCIESRSTR);
216 pci_write_reg(chan, 0, SH4A_PCIETCTLR);
217 pci_write_reg(chan, 0, SH4A_PCIESRSTR);
218 pci_write_reg(chan, 0, SH4A_PCIETXVC0SR);
219}
220
5713e602
PM
221static int pcie_init(struct sh7786_pcie_port *port)
222{
223 struct pci_channel *chan = port->hose;
224 unsigned int data;
7578a4c6
PM
225 phys_addr_t memphys;
226 size_t memsize;
da03a63a 227 int ret, i, win;
5713e602
PM
228
229 /* Begin initialization */
2dbfa1e3 230 pcie_reset(port);
5713e602
PM
231
232 /* Initialize as type1. */
233 data = pci_read_reg(chan, SH4A_PCIEPCICONF3);
234 data &= ~(0x7f << 16);
235 data |= PCI_HEADER_TYPE_BRIDGE << 16;
236 pci_write_reg(chan, data, SH4A_PCIEPCICONF3);
237
238 /* Initialize default capabilities. */
239 data = pci_read_reg(chan, SH4A_PCIEEXPCAP0);
240 data &= ~(PCI_EXP_FLAGS_TYPE << 16);
241
242 if (port->endpoint)
243 data |= PCI_EXP_TYPE_ENDPOINT << 20;
244 else
245 data |= PCI_EXP_TYPE_ROOT_PORT << 20;
246
247 data |= PCI_CAP_ID_EXP;
248 pci_write_reg(chan, data, SH4A_PCIEEXPCAP0);
249
7578a4c6
PM
250 /* Enable data link layer active state reporting */
251 pci_write_reg(chan, PCI_EXP_LNKCAP_DLLLARC, SH4A_PCIEEXPCAP3);
252
253 /* Enable extended sync and ASPM L0s support */
5713e602 254 data = pci_read_reg(chan, SH4A_PCIEEXPCAP4);
7578a4c6
PM
255 data &= ~PCI_EXP_LNKCTL_ASPMC;
256 data |= PCI_EXP_LNKCTL_ES | 1;
5713e602
PM
257 pci_write_reg(chan, data, SH4A_PCIEEXPCAP4);
258
7578a4c6
PM
259 /* Write out the physical slot number */
260 data = pci_read_reg(chan, SH4A_PCIEEXPCAP5);
261 data &= ~PCI_EXP_SLTCAP_PSN;
262 data |= (port->index + 1) << 19;
263 pci_write_reg(chan, data, SH4A_PCIEEXPCAP5);
264
5713e602
PM
265 /* Set the completion timer timeout to the maximum 32ms. */
266 data = pci_read_reg(chan, SH4A_PCIETLCTLR);
7578a4c6 267 data &= ~0x3f00;
5713e602
PM
268 data |= 0x32 << 8;
269 pci_write_reg(chan, data, SH4A_PCIETLCTLR);
270
271 /*
272 * Set fast training sequences to the maximum 255,
273 * and enable MAC data scrambling.
274 */
275 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
276 data &= ~PCIEMACCTLR_SCR_DIS;
277 data |= (0xff << 16);
278 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
279
7578a4c6
PM
280 memphys = __pa(memory_start);
281 memsize = roundup_pow_of_two(memory_end - memory_start);
282
283 /*
284 * If there's more than 512MB of memory, we need to roll over to
285 * LAR1/LAMR1.
286 */
287 if (memsize > SZ_512M) {
288 __raw_writel(memphys + SZ_512M, chan->reg_base + SH4A_PCIELAR1);
289 __raw_writel(((memsize - SZ_512M) - SZ_256) | 1,
290 chan->reg_base + SH4A_PCIELAMR1);
291 memsize = SZ_512M;
292 } else {
293 /*
294 * Otherwise just zero it out and disable it.
295 */
296 __raw_writel(0, chan->reg_base + SH4A_PCIELAR1);
297 __raw_writel(0, chan->reg_base + SH4A_PCIELAMR1);
298 }
299
300 /*
301 * LAR0/LAMR0 covers up to the first 512MB, which is enough to
302 * cover all of lowmem on most platforms.
303 */
304 __raw_writel(memphys, chan->reg_base + SH4A_PCIELAR0);
305 __raw_writel((memsize - SZ_256) | 1, chan->reg_base + SH4A_PCIELAMR0);
306
53178d71
PM
307 __raw_writel(memphys, chan->reg_base + SH4A_PCIEPCICONF4);
308 __raw_writel(0, chan->reg_base + SH4A_PCIEPCICONF5);
309
5713e602
PM
310 /* Finish initialization */
311 data = pci_read_reg(chan, SH4A_PCIETCTLR);
312 data |= 0x1;
313 pci_write_reg(chan, data, SH4A_PCIETCTLR);
314
315 /* Enable DL_Active Interrupt generation */
316 data = pci_read_reg(chan, SH4A_PCIEDLINTENR);
317 data |= PCIEDLINTENR_DLL_ACT_ENABLE;
318 pci_write_reg(chan, data, SH4A_PCIEDLINTENR);
319
320 /* Disable MAC data scrambling. */
321 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
322 data |= PCIEMACCTLR_SCR_DIS | (0xff << 16);
323 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
324
325 ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL);
326 if (unlikely(ret != 0))
327 return -ENODEV;
328
7578a4c6
PM
329 data = pci_read_reg(chan, SH4A_PCIEPCICONF1);
330 data &= ~(PCI_STATUS_DEVSEL_MASK << 16);
331 data |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
332 (PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST) << 16;
333 pci_write_reg(chan, data, SH4A_PCIEPCICONF1);
334
5713e602
PM
335 pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR);
336 pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR);
5713e602
PM
337
338 wmb();
339
340 data = pci_read_reg(chan, SH4A_PCIEMACSR);
341 printk(KERN_NOTICE "PCI: PCIe#%d link width %d\n",
342 port->index, (data >> 20) & 0x3f);
343
da03a63a 344 for (i = win = 0; i < chan->nr_resources; i++) {
7578a4c6
PM
345 struct resource *res = chan->resources + i;
346 resource_size_t size;
347 u32 enable_mask;
348
da03a63a
PM
349 /*
350 * We can't use the 32-bit mode windows in legacy 29-bit
351 * mode, so just skip them entirely.
352 */
353 if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode())
354 continue;
355
356 pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win));
7578a4c6
PM
357
358 size = resource_size(res);
359
360 /*
361 * The PAMR mask is calculated in units of 256kB, which
362 * keeps things pretty simple.
363 */
364 __raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
da03a63a 365 chan->reg_base + SH4A_PCIEPAMR(win));
7578a4c6 366
da03a63a
PM
367 pci_write_reg(chan, res->start, SH4A_PCIEPARL(win));
368 pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH(win));
7578a4c6
PM
369
370 enable_mask = MASK_PARE;
371 if (res->flags & IORESOURCE_IO)
372 enable_mask |= MASK_SPC;
373
da03a63a
PM
374 pci_write_reg(chan, enable_mask, SH4A_PCIEPTCTLR(win));
375
376 win++;
7578a4c6 377 }
5713e602
PM
378
379 return 0;
380}
381
382int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
383{
384 return 71;
385}
386
387static int sh7786_pcie_core_init(void)
388{
389 /* Return the number of ports */
390 return test_mode_pin(MODE_PIN12) ? 3 : 2;
391}
392
393static int __devinit sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
394{
395 int ret;
396
397 ret = phy_init(port->hose);
398 if (unlikely(ret < 0))
399 return ret;
400
401 /*
402 * Check if we are configured in endpoint or root complex mode,
403 * this is a fixed pin setting that applies to all PCIe ports.
404 */
405 port->endpoint = test_mode_pin(MODE_PIN11);
406
407 ret = pcie_init(port);
408 if (unlikely(ret < 0))
409 return ret;
410
bcf39352 411 return register_pci_controller(port->hose);
5713e602
PM
412}
413
414static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
415 .core_init = sh7786_pcie_core_init,
416 .port_init_hw = sh7786_pcie_init_hw,
417};
418
419static int __init sh7786_pcie_init(void)
420{
421 int ret = 0, i;
422
3b554c33 423 printk(KERN_NOTICE "PCI: Starting initialization.\n");
5713e602
PM
424
425 sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops;
426
427 nr_ports = sh7786_pcie_hwops->core_init();
428 BUG_ON(nr_ports > ARRAY_SIZE(sh7786_pci_channels));
429
430 if (unlikely(nr_ports == 0))
431 return -ENODEV;
432
433 sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port),
434 GFP_KERNEL);
435 if (unlikely(!sh7786_pcie_ports))
436 return -ENOMEM;
437
438 printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
439
440 for (i = 0; i < nr_ports; i++) {
441 struct sh7786_pcie_port *port = sh7786_pcie_ports + i;
442
443 port->index = i;
444 port->hose = sh7786_pci_channels + i;
7561f2dd 445 port->hose->io_map_base = port->hose->resources[0].start;
5713e602
PM
446
447 ret |= sh7786_pcie_hwops->port_init_hw(port);
448 }
449
450 if (unlikely(ret))
451 return ret;
452
453 return 0;
454}
455arch_initcall(sh7786_pcie_init);