]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/infiniband/hw/ipath/ipath_driver.c
IB/ipath: Set LID filtering for HCAs that support it.
[net-next-2.6.git] / drivers / infiniband / hw / ipath / ipath_driver.c
CommitLineData
7bb206e3 1/*
87427da5 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
7bb206e3
BS
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35#include <linux/idr.h>
36#include <linux/pci.h>
9bec3992 37#include <linux/io.h>
7bb206e3
BS
38#include <linux/delay.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41
42#include "ipath_kernel.h"
b1c1b6a3 43#include "ipath_verbs.h"
7bb206e3
BS
44
45static void ipath_update_pio_bufs(struct ipath_devdata *);
46
47const char *ipath_get_unit_name(int unit)
48{
49 static char iname[16];
50 snprintf(iname, sizeof iname, "infinipath%u", unit);
51 return iname;
52}
53
759d5768 54#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
7bb206e3
BS
55#define PFX IPATH_DRV_NAME ": "
56
57/*
58 * The size has to be longer than this string, so we can append
59 * board/chip information to it in the init code.
60 */
b55f4f06 61const char ib_ipath_version[] = IPATH_IDSTR "\n";
7bb206e3
BS
62
63static struct idr unit_table;
64DEFINE_SPINLOCK(ipath_devs_lock);
65LIST_HEAD(ipath_dev_list);
66
0fd41363 67wait_queue_head_t ipath_state_wait;
7bb206e3
BS
68
69unsigned ipath_debug = __IPATH_INFO;
70
71module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
72MODULE_PARM_DESC(debug, "mask for debug prints");
73EXPORT_SYMBOL_GPL(ipath_debug);
74
826d8010
DO
75unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
76module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
77MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
78
58411d1c
JG
79static unsigned ipath_hol_timeout_ms = 13000;
80module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
81MODULE_PARM_DESC(hol_timeout_ms,
82 "duration of user app suspension after link failure");
83
72708a0a
DO
84unsigned ipath_linkrecovery = 1;
85module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
86MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
87
7bb206e3 88MODULE_LICENSE("GPL");
928e3e4b 89MODULE_AUTHOR("QLogic <support@qlogic.com>");
759d5768 90MODULE_DESCRIPTION("QLogic InfiniPath driver");
7bb206e3
BS
91
92const char *ipath_ibcstatus_str[] = {
93 "Disabled",
94 "LinkUp",
95 "PollActive",
96 "PollQuiet",
97 "SleepDelay",
98 "SleepQuiet",
99 "LState6", /* unused */
100 "LState7", /* unused */
101 "CfgDebounce",
102 "CfgRcvfCfg",
103 "CfgWaitRmt",
104 "CfgIdle",
105 "RecovRetrain",
106 "LState0xD", /* unused */
107 "RecovWaitRmt",
108 "RecovIdle",
109};
110
7bb206e3
BS
111static void __devexit ipath_remove_one(struct pci_dev *);
112static int __devinit ipath_init_one(struct pci_dev *,
113 const struct pci_device_id *);
114
115/* Only needed for registration, nothing else needs this info */
116#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
117#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
118#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
119
3588423f
AJ
120/* Number of seconds before our card status check... */
121#define STATUS_TIMEOUT 60
122
7bb206e3 123static const struct pci_device_id ipath_pci_tbl[] = {
6f4bb3d8
RD
124 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
125 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
126 { 0, }
7bb206e3
BS
127};
128
129MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
130
131static struct pci_driver ipath_driver = {
132 .name = IPATH_DRV_NAME,
133 .probe = ipath_init_one,
134 .remove = __devexit_p(ipath_remove_one),
135 .id_table = ipath_pci_tbl,
23b9c1ab
GKH
136 .driver = {
137 .groups = ipath_driver_attr_groups,
138 },
7bb206e3
BS
139};
140
3588423f
AJ
141static void ipath_check_status(struct work_struct *work)
142{
143 struct ipath_devdata *dd = container_of(work, struct ipath_devdata,
144 status_work.work);
145
146 /*
147 * If we don't have any interrupts, let the user know and
148 * don't bother checking again.
149 */
150 if (dd->ipath_int_counter == 0)
151 dev_err(&dd->pcidev->dev, "No interrupts detected.\n");
152}
7bb206e3
BS
153
154static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
155 u32 *bar0, u32 *bar1)
156{
157 int ret;
158
159 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
160 if (ret)
161 ipath_dev_err(dd, "failed to read bar0 before enable: "
162 "error %d\n", -ret);
163
164 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
165 if (ret)
166 ipath_dev_err(dd, "failed to read bar1 before enable: "
167 "error %d\n", -ret);
168
169 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
170}
171
172static void ipath_free_devdata(struct pci_dev *pdev,
173 struct ipath_devdata *dd)
174{
175 unsigned long flags;
176
177 pci_set_drvdata(pdev, NULL);
178
179 if (dd->ipath_unit != -1) {
180 spin_lock_irqsave(&ipath_devs_lock, flags);
181 idr_remove(&unit_table, dd->ipath_unit);
182 list_del(&dd->ipath_list);
183 spin_unlock_irqrestore(&ipath_devs_lock, flags);
184 }
06993ca6 185 vfree(dd);
7bb206e3
BS
186}
187
188static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
189{
190 unsigned long flags;
191 struct ipath_devdata *dd;
7bb206e3
BS
192 int ret;
193
194 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
195 dd = ERR_PTR(-ENOMEM);
196 goto bail;
197 }
198
06993ca6 199 dd = vmalloc(sizeof(*dd));
7bb206e3
BS
200 if (!dd) {
201 dd = ERR_PTR(-ENOMEM);
202 goto bail;
203 }
06993ca6 204 memset(dd, 0, sizeof(*dd));
7bb206e3
BS
205 dd->ipath_unit = -1;
206
207 spin_lock_irqsave(&ipath_devs_lock, flags);
208
209 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
210 if (ret < 0) {
211 printk(KERN_ERR IPATH_DRV_NAME
212 ": Could not allocate unit ID: error %d\n", -ret);
213 ipath_free_devdata(pdev, dd);
214 dd = ERR_PTR(ret);
215 goto bail_unlock;
216 }
217
218 dd->pcidev = pdev;
219 pci_set_drvdata(pdev, dd);
220
3588423f
AJ
221 INIT_DELAYED_WORK(&dd->status_work, ipath_check_status);
222
7bb206e3
BS
223 list_add(&dd->ipath_list, &ipath_dev_list);
224
225bail_unlock:
226 spin_unlock_irqrestore(&ipath_devs_lock, flags);
227
228bail:
229 return dd;
230}
231
232static inline struct ipath_devdata *__ipath_lookup(int unit)
233{
234 return idr_find(&unit_table, unit);
235}
236
237struct ipath_devdata *ipath_lookup(int unit)
238{
239 struct ipath_devdata *dd;
240 unsigned long flags;
241
242 spin_lock_irqsave(&ipath_devs_lock, flags);
243 dd = __ipath_lookup(unit);
244 spin_unlock_irqrestore(&ipath_devs_lock, flags);
245
246 return dd;
247}
248
6ef6aee2 249int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
7bb206e3
BS
250{
251 int nunits, npresent, nup;
252 struct ipath_devdata *dd;
253 unsigned long flags;
6ef6aee2 254 int maxports;
7bb206e3
BS
255
256 nunits = npresent = nup = maxports = 0;
257
258 spin_lock_irqsave(&ipath_devs_lock, flags);
259
260 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
261 nunits++;
262 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
263 npresent++;
264 if (dd->ipath_lid &&
265 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
266 | IPATH_LINKUNK)))
267 nup++;
268 if (dd->ipath_cfgports > maxports)
269 maxports = dd->ipath_cfgports;
270 }
271
272 spin_unlock_irqrestore(&ipath_devs_lock, flags);
273
274 if (npresentp)
275 *npresentp = npresent;
276 if (nupp)
277 *nupp = nup;
278 if (maxportsp)
279 *maxportsp = maxports;
280
281 return nunits;
282}
283
7bb206e3
BS
284/*
285 * These next two routines are placeholders in case we don't have per-arch
286 * code for controlling write combining. If explicit control of write
287 * combining is not available, performance will probably be awful.
288 */
289
290int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
291{
292 return -EOPNOTSUPP;
293}
294
295void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
296{
297}
298
9bec3992
DO
299/*
300 * Perform a PIO buffer bandwidth write test, to verify proper system
301 * configuration. Even when all the setup calls work, occasionally
302 * BIOS or other issues can prevent write combining from working, or
303 * can cause other bandwidth problems to the chip.
304 *
305 * This test simply writes the same buffer over and over again, and
306 * measures close to the peak bandwidth to the chip (not testing
307 * data bandwidth to the wire). On chips that use an address-based
308 * trigger to send packets to the wire, this is easy. On chips that
309 * use a count to trigger, we want to make sure that the packet doesn't
310 * go out on the wire, or trigger flow control checks.
311 */
312static void ipath_verify_pioperf(struct ipath_devdata *dd)
313{
314 u32 pbnum, cnt, lcnt;
315 u32 __iomem *piobuf;
316 u32 *addr;
317 u64 msecs, emsecs;
318
c4b4d16e 319 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
9bec3992
DO
320 if (!piobuf) {
321 dev_info(&dd->pcidev->dev,
322 "No PIObufs for checking perf, skipping\n");
323 return;
324 }
325
326 /*
327 * Enough to give us a reasonable test, less than piobuf size, and
328 * likely multiple of store buffer length.
329 */
330 cnt = 1024;
331
332 addr = vmalloc(cnt);
333 if (!addr) {
334 dev_info(&dd->pcidev->dev,
335 "Couldn't get memory for checking PIO perf,"
336 " skipping\n");
337 goto done;
338 }
339
340 preempt_disable(); /* we want reasonably accurate elapsed time */
341 msecs = 1 + jiffies_to_msecs(jiffies);
342 for (lcnt = 0; lcnt < 10000U; lcnt++) {
343 /* wait until we cross msec boundary */
344 if (jiffies_to_msecs(jiffies) >= msecs)
345 break;
346 udelay(1);
347 }
348
6ac50727
DO
349 ipath_disable_armlaunch(dd);
350
9bec3992
DO
351 writeq(0, piobuf); /* length 0, no dwords actually sent */
352 ipath_flush_wc();
353
354 /*
355 * this is only roughly accurate, since even with preempt we
356 * still take interrupts that could take a while. Running for
357 * >= 5 msec seems to get us "close enough" to accurate values
358 */
359 msecs = jiffies_to_msecs(jiffies);
360 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
361 __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
362 emsecs = jiffies_to_msecs(jiffies) - msecs;
363 }
364
365 /* 1 GiB/sec, slightly over IB SDR line rate */
366 if (lcnt < (emsecs * 1024U))
367 ipath_dev_err(dd,
368 "Performance problem: bandwidth to PIO buffers is "
369 "only %u MiB/sec\n",
370 lcnt / (u32) emsecs);
371 else
372 ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
373 lcnt / (u32) emsecs);
374
375 preempt_enable();
376
377 vfree(addr);
378
379done:
380 /* disarm piobuf, so it's available again */
381 ipath_disarm_piobufs(dd, pbnum, 1);
6ac50727 382 ipath_enable_armlaunch(dd);
9bec3992
DO
383}
384
7bb206e3
BS
385static int __devinit ipath_init_one(struct pci_dev *pdev,
386 const struct pci_device_id *ent)
387{
388 int ret, len, j;
389 struct ipath_devdata *dd;
390 unsigned long long addr;
391 u32 bar0 = 0, bar1 = 0;
7bb206e3 392
7bb206e3
BS
393 dd = ipath_alloc_devdata(pdev);
394 if (IS_ERR(dd)) {
395 ret = PTR_ERR(dd);
396 printk(KERN_ERR IPATH_DRV_NAME
397 ": Could not allocate devdata: error %d\n", -ret);
f37bda92 398 goto bail;
7bb206e3
BS
399 }
400
401 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
402
7bb206e3
BS
403 ret = pci_enable_device(pdev);
404 if (ret) {
405 /* This can happen iff:
406 *
407 * We did a chip reset, and then failed to reprogram the
408 * BAR, or the chip reset due to an internal error. We then
409 * unloaded the driver and reloaded it.
410 *
411 * Both reset cases set the BAR back to initial state. For
412 * the latter case, the AER sticky error bit at offset 0x718
413 * should be set, but the Linux kernel doesn't yet know
414 * about that, it appears. If the original BAR was retained
415 * in the kernel data structures, this may be OK.
416 */
417 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
418 dd->ipath_unit, -ret);
419 goto bail_devdata;
420 }
421 addr = pci_resource_start(pdev, 0);
422 len = pci_resource_len(pdev, 0);
51f65ebc 423 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x "
7bb206e3
BS
424 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
425 ent->device, ent->driver_data);
426
427 read_bars(dd, pdev, &bar0, &bar1);
428
429 if (!bar1 && !(bar0 & ~0xf)) {
430 if (addr) {
431 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
432 "rewriting as %llx\n", addr);
433 ret = pci_write_config_dword(
434 pdev, PCI_BASE_ADDRESS_0, addr);
435 if (ret) {
436 ipath_dev_err(dd, "rewrite of BAR0 "
437 "failed: err %d\n", -ret);
438 goto bail_disable;
439 }
440 ret = pci_write_config_dword(
441 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
442 if (ret) {
443 ipath_dev_err(dd, "rewrite of BAR1 "
444 "failed: err %d\n", -ret);
445 goto bail_disable;
446 }
447 } else {
448 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
449 "not usable until reboot\n");
450 ret = -ENODEV;
451 goto bail_disable;
452 }
453 }
454
455 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
456 if (ret) {
457 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
458 "err %d\n", dd->ipath_unit, -ret);
459 goto bail_disable;
460 }
461
462 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
463 if (ret) {
68dd43a1
BS
464 /*
465 * if the 64 bit setup fails, try 32 bit. Some systems
466 * do not setup 64 bit maps on systems with 2GB or less
467 * memory installed.
468 */
469 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
470 if (ret) {
b1d8865a
BS
471 dev_info(&pdev->dev,
472 "Unable to set DMA mask for unit %u: %d\n",
473 dd->ipath_unit, ret);
68dd43a1
BS
474 goto bail_regions;
475 }
b1d8865a 476 else {
68dd43a1 477 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
b1d8865a
BS
478 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
479 if (ret)
480 dev_info(&pdev->dev,
481 "Unable to set DMA consistent mask "
482 "for unit %u: %d\n",
483 dd->ipath_unit, ret);
484
485 }
486 }
487 else {
488 ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
489 if (ret)
490 dev_info(&pdev->dev,
491 "Unable to set DMA consistent mask "
492 "for unit %u: %d\n",
493 dd->ipath_unit, ret);
7bb206e3
BS
494 }
495
496 pci_set_master(pdev);
497
498 /*
499 * Save BARs to rewrite after device reset. Save all 64 bits of
500 * BAR, just in case.
501 */
502 dd->ipath_pcibar0 = addr;
503 dd->ipath_pcibar1 = addr >> 32;
504 dd->ipath_deviceid = ent->device; /* save for later use */
505 dd->ipath_vendorid = ent->vendor;
506
507 /* setup the chip-specific functions, as early as possible. */
508 switch (ent->device) {
509 case PCI_DEVICE_ID_INFINIPATH_HT:
820054b7 510#ifdef CONFIG_HT_IRQ
525d0ca1 511 ipath_init_iba6110_funcs(dd);
7bb206e3 512 break;
820054b7
BS
513#else
514 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
515 "CONFIG_HT_IRQ is not enabled\n", ent->device);
516 return -ENODEV;
e757bef2 517#endif
7bb206e3 518 case PCI_DEVICE_ID_INFINIPATH_PE800:
820054b7 519#ifdef CONFIG_PCI_MSI
525d0ca1 520 ipath_init_iba6120_funcs(dd);
7bb206e3 521 break;
820054b7
BS
522#else
523 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
524 "CONFIG_PCI_MSI is not enabled\n", ent->device);
525 return -ENODEV;
e757bef2 526#endif
7bb206e3 527 default:
759d5768 528 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
7bb206e3
BS
529 "failing\n", ent->device);
530 return -ENODEV;
531 }
532
533 for (j = 0; j < 6; j++) {
534 if (!pdev->resource[j].start)
535 continue;
e29419ff
GKH
536 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
537 j, (unsigned long long)pdev->resource[j].start,
538 (unsigned long long)pdev->resource[j].end,
539 (unsigned long long)pci_resource_len(pdev, j));
7bb206e3
BS
540 }
541
542 if (!addr) {
543 ipath_dev_err(dd, "No valid address in BAR 0!\n");
544 ret = -ENODEV;
545 goto bail_regions;
546 }
547
44c10138 548 dd->ipath_pcirev = pdev->revision;
7bb206e3 549
eb9dc6f4
BS
550#if defined(__powerpc__)
551 /* There isn't a generic way to specify writethrough mappings */
552 dd->ipath_kregbase = __ioremap(addr, len,
553 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
554#else
7bb206e3 555 dd->ipath_kregbase = ioremap_nocache(addr, len);
eb9dc6f4 556#endif
7bb206e3
BS
557
558 if (!dd->ipath_kregbase) {
559 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
560 addr);
561 ret = -ENOMEM;
562 goto bail_iounmap;
563 }
564 dd->ipath_kregend = (u64 __iomem *)
565 ((void __iomem *)dd->ipath_kregbase + len);
566 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
567 /* for user mmap */
b35f004d
BS
568 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
569 addr, dd->ipath_kregbase);
7bb206e3
BS
570
571 /*
572 * clear ipath_flags here instead of in ipath_init_chip as it is set
573 * by ipath_setup_htconfig.
574 */
575 dd->ipath_flags = 0;
fba75200
BS
576 dd->ipath_lli_counter = 0;
577 dd->ipath_lli_errors = 0;
7bb206e3
BS
578
579 if (dd->ipath_f_bus(dd, pdev))
580 ipath_dev_err(dd, "Failed to setup config space; "
581 "continuing anyway\n");
582
583 /*
dace1453 584 * set up our interrupt handler; IRQF_SHARED probably not needed,
7bb206e3
BS
585 * since MSI interrupts shouldn't be shared but won't hurt for now.
586 * check 0 irq after we return from chip-specific bus setup, since
587 * that can affect this due to setup
588 */
51f65ebc 589 if (!dd->ipath_irq)
7bb206e3
BS
590 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
591 "work\n");
592 else {
51f65ebc 593 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
7bb206e3
BS
594 IPATH_DRV_NAME, dd);
595 if (ret) {
596 ipath_dev_err(dd, "Couldn't setup irq handler, "
51f65ebc 597 "irq=%d: %d\n", dd->ipath_irq, ret);
7bb206e3
BS
598 goto bail_iounmap;
599 }
600 }
601
602 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
603 if (ret)
7b196e2f 604 goto bail_irqsetup;
7bb206e3
BS
605
606 ret = ipath_enable_wc(dd);
607
608 if (ret) {
609 ipath_dev_err(dd, "Write combining not enabled "
610 "(err %d): performance may be poor\n",
611 -ret);
612 ret = 0;
613 }
614
9bec3992
DO
615 ipath_verify_pioperf(dd);
616
7bb206e3
BS
617 ipath_device_create_group(&pdev->dev, dd);
618 ipathfs_add_device(dd);
619 ipath_user_add(dd);
a2acb2ff 620 ipath_diag_add(dd);
b1c1b6a3 621 ipath_register_ib_device(dd);
7bb206e3 622
3588423f
AJ
623 /* Check that card status in STATUS_TIMEOUT seconds. */
624 schedule_delayed_work(&dd->status_work, HZ * STATUS_TIMEOUT);
625
7bb206e3
BS
626 goto bail;
627
7b196e2f 628bail_irqsetup:
2ba3f56e
RC
629 if (pdev->irq)
630 free_irq(pdev->irq, dd);
7b196e2f 631
7bb206e3
BS
632bail_iounmap:
633 iounmap((volatile void __iomem *) dd->ipath_kregbase);
634
635bail_regions:
636 pci_release_regions(pdev);
637
638bail_disable:
639 pci_disable_device(pdev);
640
641bail_devdata:
642 ipath_free_devdata(pdev, dd);
643
7bb206e3
BS
644bail:
645 return ret;
646}
647
7227aac4 648static void __devexit cleanup_device(struct ipath_devdata *dd)
7bb206e3 649{
7227aac4 650 int port;
7bb206e3 651
7227aac4
BS
652 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
653 /* can't do anything more with chip; needs re-init */
654 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
655 if (dd->ipath_kregbase) {
656 /*
657 * if we haven't already cleaned up before these are
658 * to ensure any register reads/writes "fail" until
659 * re-init
660 */
661 dd->ipath_kregbase = NULL;
662 dd->ipath_uregbase = 0;
663 dd->ipath_sregbase = 0;
664 dd->ipath_cregbase = 0;
665 dd->ipath_kregsize = 0;
666 }
667 ipath_disable_wc(dd);
668 }
c78f6415 669
7227aac4
BS
670 if (dd->ipath_pioavailregs_dma) {
671 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
672 (void *) dd->ipath_pioavailregs_dma,
673 dd->ipath_pioavailregs_phys);
674 dd->ipath_pioavailregs_dma = NULL;
675 }
676 if (dd->ipath_dummy_hdrq) {
677 dma_free_coherent(&dd->pcidev->dev,
678 dd->ipath_pd[0]->port_rcvhdrq_size,
679 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
680 dd->ipath_dummy_hdrq = NULL;
681 }
682
683 if (dd->ipath_pageshadow) {
684 struct page **tmpp = dd->ipath_pageshadow;
685 dma_addr_t *tmpd = dd->ipath_physshadow;
686 int i, cnt = 0;
687
688 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
689 "locked\n");
690 for (port = 0; port < dd->ipath_cfgports; port++) {
691 int port_tidbase = port * dd->ipath_rcvtidcnt;
692 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
693 for (i = port_tidbase; i < maxtid; i++) {
694 if (!tmpp[i])
695 continue;
696 pci_unmap_page(dd->pcidev, tmpd[i],
697 PAGE_SIZE, PCI_DMA_FROMDEVICE);
698 ipath_release_user_pages(&tmpp[i], 1);
699 tmpp[i] = NULL;
700 cnt++;
701 }
702 }
703 if (cnt) {
704 ipath_stats.sps_pageunlocks += cnt;
705 ipath_cdbg(VERBOSE, "There were still %u expTID "
706 "entries locked\n", cnt);
707 }
708 if (ipath_stats.sps_pagelocks ||
709 ipath_stats.sps_pageunlocks)
710 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
711 "unlocked via ipath_m{un}lock\n",
712 (unsigned long long)
713 ipath_stats.sps_pagelocks,
714 (unsigned long long)
715 ipath_stats.sps_pageunlocks);
716
717 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
718 dd->ipath_pageshadow);
9783ab40 719 tmpp = dd->ipath_pageshadow;
7227aac4 720 dd->ipath_pageshadow = NULL;
9783ab40 721 vfree(tmpp);
9355fb6a
RC
722
723 dd->ipath_egrtidbase = NULL;
c78f6415
BS
724 }
725
7227aac4
BS
726 /*
727 * free any resources still in use (usually just kernel ports)
728 * at unload; we do for portcnt, not cfgports, because cfgports
729 * could have changed while we were loaded.
730 */
731 for (port = 0; port < dd->ipath_portcnt; port++) {
732 struct ipath_portdata *pd = dd->ipath_pd[port];
733 dd->ipath_pd[port] = NULL;
734 ipath_free_pddata(dd, pd);
735 }
736 kfree(dd->ipath_pd);
737 /*
738 * debuggability, in case some cleanup path tries to use it
739 * after this
740 */
741 dd->ipath_pd = NULL;
742}
743
744static void __devexit ipath_remove_one(struct pci_dev *pdev)
745{
746 struct ipath_devdata *dd = pci_get_drvdata(pdev);
747
748 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
749
53c1d2c9
BS
750 /*
751 * disable the IB link early, to be sure no new packets arrive, which
752 * complicates the shutdown process
753 */
754 ipath_shutdown_device(dd);
755
3588423f
AJ
756 cancel_delayed_work(&dd->status_work);
757 flush_scheduled_work();
758
7227aac4
BS
759 if (dd->verbs_dev)
760 ipath_unregister_ib_device(dd->verbs_dev);
761
a2acb2ff
BS
762 ipath_diag_remove(dd);
763 ipath_user_remove(dd);
7bb206e3
BS
764 ipathfs_remove_device(dd);
765 ipath_device_remove_group(&pdev->dev, dd);
7227aac4 766
7bb206e3
BS
767 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
768 "unit %u\n", dd, (u32) dd->ipath_unit);
7227aac4
BS
769
770 cleanup_device(dd);
771
772 /*
773 * turn off rcv, send, and interrupts for all ports, all drivers
774 * should also hard reset the chip here?
775 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
776 * for all versions of the driver, if they were allocated
777 */
51f65ebc
BS
778 if (dd->ipath_irq) {
779 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
780 dd->ipath_unit, dd->ipath_irq);
781 dd->ipath_f_free_irq(dd);
7227aac4
BS
782 } else
783 ipath_dbg("irq is 0, not doing free_irq "
784 "for unit %u\n", dd->ipath_unit);
785 /*
786 * we check for NULL here, because it's outside
787 * the kregbase check, and we need to call it
788 * after the free_irq. Thus it's possible that
789 * the function pointers were never initialized.
790 */
791 if (dd->ipath_f_cleanup)
792 /* clean up chip-specific stuff */
793 dd->ipath_f_cleanup(dd);
794
795 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
796 iounmap((volatile void __iomem *) dd->ipath_kregbase);
7bb206e3
BS
797 pci_release_regions(pdev);
798 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
799 pci_disable_device(pdev);
800
801 ipath_free_devdata(pdev, dd);
7bb206e3
BS
802}
803
804/* general driver use */
805DEFINE_MUTEX(ipath_mutex);
806
807static DEFINE_SPINLOCK(ipath_pioavail_lock);
808
809/**
810 * ipath_disarm_piobufs - cancel a range of PIO buffers
811 * @dd: the infinipath device
812 * @first: the first PIO buffer to cancel
813 * @cnt: the number of PIO buffers to cancel
814 *
815 * cancel a range of PIO buffers, used when they might be armed, but
816 * not triggered. Used at init to ensure buffer state, and also user
817 * process close, in case it died while writing to a PIO buffer
818 * Also after errors.
819 */
820void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
821 unsigned cnt)
822{
823 unsigned i, last = first + cnt;
e342c119 824 unsigned long flags;
7bb206e3
BS
825
826 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
7bb206e3 827 for (i = first; i < last; i++) {
e342c119
JG
828 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
829 /*
830 * The disarm-related bits are write-only, so it
831 * is ok to OR them in with our copy of sendctrl
832 * while we hold the lock.
833 */
7bb206e3 834 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
e342c119
JG
835 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
836 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
837 /* can't disarm bufs back-to-back per iba7220 spec */
838 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
839 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
7bb206e3 840 }
c4b4d16e
RC
841 /* on some older chips, update may not happen after cancel */
842 ipath_force_pio_avail_update(dd);
7bb206e3
BS
843}
844
845/**
846 * ipath_wait_linkstate - wait for an IB link state change to occur
847 * @dd: the infinipath device
848 * @state: the state to wait for
849 * @msecs: the number of milliseconds to wait
850 *
851 * wait up to msecs milliseconds for IB link state change to occur for
852 * now, take the easy polling route. Currently used only by
34b2aafe 853 * ipath_set_linkstate. Returns 0 if state reached, otherwise
7bb206e3
BS
854 * -ETIMEDOUT state can have multiple states set, for any of several
855 * transitions.
856 */
140277e9 857int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
7bb206e3 858{
0fd41363
BS
859 dd->ipath_state_wanted = state;
860 wait_event_interruptible_timeout(ipath_state_wait,
7bb206e3
BS
861 (dd->ipath_flags & state),
862 msecs_to_jiffies(msecs));
0fd41363 863 dd->ipath_state_wanted = 0;
7bb206e3
BS
864
865 if (!(dd->ipath_flags & state)) {
866 u64 val;
0fd41363
BS
867 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
868 " ms\n",
7bb206e3
BS
869 /* test INIT ahead of DOWN, both can be set */
870 (state & IPATH_LINKINIT) ? "INIT" :
871 ((state & IPATH_LINKDOWN) ? "DOWN" :
872 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
873 msecs);
874 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
875 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
876 (unsigned long long) ipath_read_kreg64(
877 dd, dd->ipath_kregs->kr_ibcctrl),
878 (unsigned long long) val,
879 ipath_ibcstatus_str[val & 0xf]);
880 }
881 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
882}
883
8ec1077b
BS
884/*
885 * Decode the error status into strings, deciding whether to always
886 * print * it or not depending on "normal packet errors" vs everything
887 * else. Return 1 if "real" errors, otherwise 0 if only packet
888 * errors, so caller can decide what to print with the string.
889 */
890int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
7bb206e3 891{
8ec1077b 892 int iserr = 1;
7bb206e3 893 *buf = '\0';
8ec1077b
BS
894 if (err & INFINIPATH_E_PKTERRS) {
895 if (!(err & ~INFINIPATH_E_PKTERRS))
896 iserr = 0; // if only packet errors.
897 if (ipath_debug & __IPATH_ERRPKTDBG) {
898 if (err & INFINIPATH_E_REBP)
899 strlcat(buf, "EBP ", blen);
900 if (err & INFINIPATH_E_RVCRC)
901 strlcat(buf, "VCRC ", blen);
902 if (err & INFINIPATH_E_RICRC) {
903 strlcat(buf, "CRC ", blen);
904 // clear for check below, so only once
905 err &= INFINIPATH_E_RICRC;
906 }
907 if (err & INFINIPATH_E_RSHORTPKTLEN)
908 strlcat(buf, "rshortpktlen ", blen);
909 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
910 strlcat(buf, "sdroppeddatapkt ", blen);
911 if (err & INFINIPATH_E_SPKTLEN)
912 strlcat(buf, "spktlen ", blen);
913 }
914 if ((err & INFINIPATH_E_RICRC) &&
915 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
916 strlcat(buf, "CRC ", blen);
917 if (!iserr)
918 goto done;
919 }
7bb206e3
BS
920 if (err & INFINIPATH_E_RHDRLEN)
921 strlcat(buf, "rhdrlen ", blen);
922 if (err & INFINIPATH_E_RBADTID)
923 strlcat(buf, "rbadtid ", blen);
924 if (err & INFINIPATH_E_RBADVERSION)
925 strlcat(buf, "rbadversion ", blen);
926 if (err & INFINIPATH_E_RHDR)
927 strlcat(buf, "rhdr ", blen);
928 if (err & INFINIPATH_E_RLONGPKTLEN)
929 strlcat(buf, "rlongpktlen ", blen);
7bb206e3
BS
930 if (err & INFINIPATH_E_RMAXPKTLEN)
931 strlcat(buf, "rmaxpktlen ", blen);
932 if (err & INFINIPATH_E_RMINPKTLEN)
933 strlcat(buf, "rminpktlen ", blen);
8ec1077b
BS
934 if (err & INFINIPATH_E_SMINPKTLEN)
935 strlcat(buf, "sminpktlen ", blen);
7bb206e3
BS
936 if (err & INFINIPATH_E_RFORMATERR)
937 strlcat(buf, "rformaterr ", blen);
938 if (err & INFINIPATH_E_RUNSUPVL)
939 strlcat(buf, "runsupvl ", blen);
940 if (err & INFINIPATH_E_RUNEXPCHAR)
941 strlcat(buf, "runexpchar ", blen);
942 if (err & INFINIPATH_E_RIBFLOW)
943 strlcat(buf, "ribflow ", blen);
7bb206e3
BS
944 if (err & INFINIPATH_E_SUNDERRUN)
945 strlcat(buf, "sunderrun ", blen);
946 if (err & INFINIPATH_E_SPIOARMLAUNCH)
947 strlcat(buf, "spioarmlaunch ", blen);
948 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
949 strlcat(buf, "sunexperrpktnum ", blen);
7bb206e3
BS
950 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
951 strlcat(buf, "sdroppedsmppkt ", blen);
952 if (err & INFINIPATH_E_SMAXPKTLEN)
953 strlcat(buf, "smaxpktlen ", blen);
7bb206e3
BS
954 if (err & INFINIPATH_E_SUNSUPVL)
955 strlcat(buf, "sunsupVL ", blen);
7bb206e3
BS
956 if (err & INFINIPATH_E_INVALIDADDR)
957 strlcat(buf, "invalidaddr ", blen);
7bb206e3
BS
958 if (err & INFINIPATH_E_RRCVEGRFULL)
959 strlcat(buf, "rcvegrfull ", blen);
960 if (err & INFINIPATH_E_RRCVHDRFULL)
961 strlcat(buf, "rcvhdrfull ", blen);
962 if (err & INFINIPATH_E_IBSTATUSCHANGED)
963 strlcat(buf, "ibcstatuschg ", blen);
964 if (err & INFINIPATH_E_RIBLOSTLINK)
965 strlcat(buf, "riblostlink ", blen);
966 if (err & INFINIPATH_E_HARDWARE)
967 strlcat(buf, "hardware ", blen);
968 if (err & INFINIPATH_E_RESET)
969 strlcat(buf, "reset ", blen);
8ec1077b
BS
970done:
971 return iserr;
7bb206e3
BS
972}
973
974/**
975 * get_rhf_errstring - decode RHF errors
976 * @err: the err number
977 * @msg: the output buffer
978 * @len: the length of the output buffer
979 *
980 * only used one place now, may want more later
981 */
982static void get_rhf_errstring(u32 err, char *msg, size_t len)
983{
984 /* if no errors, and so don't need to check what's first */
985 *msg = '\0';
986
987 if (err & INFINIPATH_RHF_H_ICRCERR)
988 strlcat(msg, "icrcerr ", len);
989 if (err & INFINIPATH_RHF_H_VCRCERR)
990 strlcat(msg, "vcrcerr ", len);
991 if (err & INFINIPATH_RHF_H_PARITYERR)
992 strlcat(msg, "parityerr ", len);
993 if (err & INFINIPATH_RHF_H_LENERR)
994 strlcat(msg, "lenerr ", len);
995 if (err & INFINIPATH_RHF_H_MTUERR)
996 strlcat(msg, "mtuerr ", len);
997 if (err & INFINIPATH_RHF_H_IHDRERR)
998 /* infinipath hdr checksum error */
999 strlcat(msg, "ipathhdrerr ", len);
1000 if (err & INFINIPATH_RHF_H_TIDERR)
1001 strlcat(msg, "tiderr ", len);
1002 if (err & INFINIPATH_RHF_H_MKERR)
1003 /* bad port, offset, etc. */
1004 strlcat(msg, "invalid ipathhdr ", len);
1005 if (err & INFINIPATH_RHF_H_IBERR)
1006 strlcat(msg, "iberr ", len);
1007 if (err & INFINIPATH_RHF_L_SWA)
1008 strlcat(msg, "swA ", len);
1009 if (err & INFINIPATH_RHF_L_SWB)
1010 strlcat(msg, "swB ", len);
1011}
1012
1013/**
1014 * ipath_get_egrbuf - get an eager buffer
1015 * @dd: the infinipath device
1016 * @bufnum: the eager buffer to get
7bb206e3
BS
1017 *
1018 * must only be called if ipath_pd[port] is known to be allocated
1019 */
d65708f3 1020static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
7bb206e3 1021{
1fd3b40f
BS
1022 return dd->ipath_port0_skbinfo ?
1023 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
7bb206e3
BS
1024}
1025
1026/**
1027 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
1028 * @dd: the infinipath device
1029 * @gfp_mask: the sk_buff SFP mask
1030 */
1031struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1032 gfp_t gfp_mask)
1033{
1034 struct sk_buff *skb;
1035 u32 len;
1036
1037 /*
1038 * Only fully supported way to handle this is to allocate lots
1039 * extra, align as needed, and then do skb_reserve(). That wastes
1040 * a lot of memory... I'll have to hack this into infinipath_copy
1041 * also.
1042 */
1043
1044 /*
1fd3b40f
BS
1045 * We need 2 extra bytes for ipath_ether data sent in the
1046 * key header. In order to keep everything dword aligned,
1047 * we'll reserve 4 bytes.
7bb206e3 1048 */
1fd3b40f
BS
1049 len = dd->ipath_ibmaxlen + 4;
1050
7bb206e3 1051 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1fd3b40f 1052 /* We need a 2KB multiple alignment, and there is no way
7bb206e3
BS
1053 * to do it except to allocate extra and then skb_reserve
1054 * enough to bring it up to the right alignment.
1055 */
1fd3b40f 1056 len += 2047;
7bb206e3 1057 }
1fd3b40f 1058
7bb206e3
BS
1059 skb = __dev_alloc_skb(len, gfp_mask);
1060 if (!skb) {
1061 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1062 len);
1063 goto bail;
1064 }
1fd3b40f
BS
1065
1066 skb_reserve(skb, 4);
1067
7bb206e3 1068 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1fd3b40f 1069 u32 una = (unsigned long)skb->data & 2047;
7bb206e3 1070 if (una)
1fd3b40f
BS
1071 skb_reserve(skb, 2048 - una);
1072 }
7bb206e3
BS
1073
1074bail:
1075 return skb;
1076}
1077
3d37b9e2
RC
1078static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1079 u32 eflags,
1080 u32 l,
1081 u32 etail,
9355fb6a
RC
1082 __le32 *rhf_addr,
1083 struct ipath_message_header *hdr)
3d37b9e2
RC
1084{
1085 char emsg[128];
3d37b9e2
RC
1086
1087 get_rhf_errstring(eflags, emsg, sizeof emsg);
3d37b9e2
RC
1088 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1089 "tlen=%x opcode=%x egridx=%x: %s\n",
1090 eflags, l,
9355fb6a
RC
1091 ipath_hdrget_rcv_type(rhf_addr),
1092 ipath_hdrget_length_in_bytes(rhf_addr),
3d37b9e2
RC
1093 be32_to_cpu(hdr->bth[0]) >> 24,
1094 etail, emsg);
1095
1096 /* Count local link integrity errors. */
1097 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
1098 u8 n = (dd->ipath_ibcctrl >>
1099 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1100 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1101
1102 if (++dd->ipath_lli_counter > n) {
1103 dd->ipath_lli_counter = 0;
1104 dd->ipath_lli_errors++;
1105 }
1106 }
1107}
1108
7bb206e3
BS
1109/*
1110 * ipath_kreceive - receive a packet
c59a80ac 1111 * @pd: the infinipath port
7bb206e3
BS
1112 *
1113 * called from interrupt handler for errors or receive interrupt
1114 */
c59a80ac 1115void ipath_kreceive(struct ipath_portdata *pd)
7bb206e3 1116{
c59a80ac 1117 struct ipath_devdata *dd = pd->port_dd;
9355fb6a 1118 __le32 *rhf_addr;
7bb206e3
BS
1119 void *ebuf;
1120 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1121 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1122 u32 etail = -1, l, hdrqtail;
27b678dd 1123 struct ipath_message_header *hdr;
9355fb6a 1124 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
7bb206e3 1125 static u64 totcalls; /* stats, may eventually remove */
9355fb6a 1126 int last;
7bb206e3 1127
c59a80ac 1128 l = pd->port_head;
9355fb6a
RC
1129 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1130 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1131 u32 seq = ipath_hdrget_seq(rhf_addr);
7bb206e3 1132
9355fb6a
RC
1133 if (seq != pd->port_seq_cnt)
1134 goto bail;
1135 hdrqtail = 0;
1136 } else {
1137 hdrqtail = ipath_get_rcvhdrtail(pd);
1138 if (l == hdrqtail)
1139 goto bail;
1140 smp_rmb();
1141 }
7bb206e3 1142
9355fb6a
RC
1143reloop:
1144 for (last = 0, i = 1; !last; i++) {
1145 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1146 eflags = ipath_hdrget_err_flags(rhf_addr);
1147 etype = ipath_hdrget_rcv_type(rhf_addr);
7bb206e3 1148 /* total length */
9355fb6a 1149 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
7bb206e3 1150 ebuf = NULL;
9355fb6a
RC
1151 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1152 ipath_hdrget_use_egr_buf(rhf_addr) :
1153 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
7bb206e3 1154 /*
9355fb6a 1155 * It turns out that the chip uses an eager buffer
7bb206e3
BS
1156 * for all non-expected packets, whether it "needs"
1157 * one or not. So always get the index, but don't
1158 * set ebuf (so we try to copy data) unless the
1159 * length requires it.
1160 */
9355fb6a
RC
1161 etail = ipath_hdrget_index(rhf_addr);
1162 updegr = 1;
7bb206e3
BS
1163 if (tlen > sizeof(*hdr) ||
1164 etype == RCVHQ_RCV_TYPE_NON_KD)
d65708f3 1165 ebuf = ipath_get_egrbuf(dd, etail);
7bb206e3
BS
1166 }
1167
1168 /*
1169 * both tiderr and ipathhdrerr are set for all plain IB
1170 * packets; only ipathhdrerr should be set.
1171 */
1172
9355fb6a
RC
1173 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1174 etype != RCVHQ_RCV_TYPE_ERROR &&
1175 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1176 IPS_PROTO_VERSION)
7bb206e3
BS
1177 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1178 "%x\n", etype);
7bb206e3 1179
3d37b9e2 1180 if (unlikely(eflags))
9355fb6a 1181 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
3d37b9e2 1182 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
9355fb6a 1183 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
34b2aafe
BS
1184 if (dd->ipath_lli_counter)
1185 dd->ipath_lli_counter--;
9355fb6a
RC
1186 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1187 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1188 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
34b2aafe
BS
1189 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1190 "qp=%x), len %x; ignored\n",
9355fb6a 1191 etype, opcode, qp, tlen);
7bb206e3
BS
1192 }
1193 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1194 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
9355fb6a 1195 be32_to_cpu(hdr->bth[0]) >> 24);
3d37b9e2 1196 else {
7bb206e3 1197 /*
9e2ef36b 1198 * error packet, type of error unknown.
7bb206e3
BS
1199 * Probably type 3, but we don't know, so don't
1200 * even try to print the opcode, etc.
9355fb6a
RC
1201 * Usually caused by a "bad packet", that has no
1202 * BTH, when the LRH says it should.
7bb206e3 1203 */
9355fb6a
RC
1204 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1205 " %x, len %x hdrq+%x rhf: %Lx\n",
1206 etail, tlen, l,
1207 le64_to_cpu(*(__le64 *) rhf_addr));
1208 if (ipath_debug & __IPATH_ERRPKTDBG) {
1209 u32 j, *d, dw = rsize-2;
1210 if (rsize > (tlen>>2))
1211 dw = tlen>>2;
1212 d = (u32 *)hdr;
1213 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1214 dw);
1215 for (j = 0; j < dw; j++)
1216 printk(KERN_DEBUG "%8x%s", d[j],
1217 (j%8) == 7 ? "\n" : " ");
1218 printk(KERN_DEBUG ".\n");
1219 }
7bb206e3
BS
1220 }
1221 l += rsize;
1222 if (l >= maxcnt)
1223 l = 0;
9355fb6a
RC
1224 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1225 l + dd->ipath_rhf_offset;
1226 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1227 u32 seq = ipath_hdrget_seq(rhf_addr);
1228
1229 if (++pd->port_seq_cnt > 13)
1230 pd->port_seq_cnt = 1;
1231 if (seq != pd->port_seq_cnt)
1232 last = 1;
1233 } else if (l == hdrqtail)
1234 last = 1;
7bb206e3 1235 /*
f5f99929
BS
1236 * update head regs on last packet, and every 16 packets.
1237 * Reduce bus traffic, while still trying to prevent
1238 * rcvhdrq overflows, for when the queue is nearly full
7bb206e3 1239 */
9355fb6a
RC
1240 if (last || !(i & 0xf)) {
1241 u64 lval = l;
1242
1243 /* request IBA6120 and 7220 interrupt only on last */
1244 if (last)
1245 lval |= dd->ipath_rhdrhead_intr_off;
1246 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1247 pd->port_port);
f5f99929 1248 if (updegr) {
8c641d4b 1249 ipath_write_ureg(dd, ur_rcvegrindexhead,
9355fb6a 1250 etail, pd->port_port);
f5f99929
BS
1251 updegr = 0;
1252 }
1253 }
7bb206e3
BS
1254 }
1255
9355fb6a
RC
1256 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1257 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
525d0ca1 1258 /* IBA6110 workaround; we can have a race clearing chip
57abad25
BS
1259 * interrupt with another interrupt about to be delivered,
1260 * and can clear it before it is delivered on the GPIO
1261 * workaround. By doing the extra check here for the
1262 * in-memory tail register updating while we were doing
1263 * earlier packets, we "almost" guarantee we have covered
1264 * that case.
1265 */
c59a80ac 1266 u32 hqtail = ipath_get_rcvhdrtail(pd);
57abad25
BS
1267 if (hqtail != hdrqtail) {
1268 hdrqtail = hqtail;
1269 reloop = 1; /* loop 1 extra time at most */
1270 goto reloop;
1271 }
1272 }
1273
7bb206e3
BS
1274 pkttot += i;
1275
c59a80ac 1276 pd->port_head = l;
7bb206e3 1277
7bb206e3
BS
1278 if (pkttot > ipath_stats.sps_maxpkts_call)
1279 ipath_stats.sps_maxpkts_call = pkttot;
1280 ipath_stats.sps_port0pkts += pkttot;
1281 ipath_stats.sps_avgpkts_call =
1282 ipath_stats.sps_port0pkts / ++totcalls;
1283
7bb206e3
BS
1284bail:;
1285}
1286
1287/**
1288 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
1289 * @dd: the infinipath device
1290 *
1291 * called whenever our local copy indicates we have run out of send buffers
1292 * NOTE: This can be called from interrupt context by some code
1293 * and from non-interrupt context by ipath_getpiobuf().
1294 */
1295
1296static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1297{
1298 unsigned long flags;
1299 int i;
1300 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1301
1302 /* If the generation (check) bits have changed, then we update the
1303 * busy bit for the corresponding PIO buffer. This algorithm will
1304 * modify positions to the value they already have in some cases
1305 * (i.e., no change), but it's faster than changing only the bits
1306 * that have changed.
1307 *
1308 * We would like to do this atomicly, to avoid spinlocks in the
1309 * critical send path, but that's not really possible, given the
1310 * type of changes, and that this routine could be called on
1311 * multiple cpu's simultaneously, so we lock in this routine only,
1312 * to avoid conflicting updates; all we change is the shadow, and
1313 * it's a single 64 bit memory location, so by definition the update
1314 * is atomic in terms of what other cpu's can see in testing the
1315 * bits. The spin_lock overhead isn't too bad, since it only
1316 * happens when all buffers are in use, so only cpu overhead, not
1317 * latency or bandwidth is affected.
1318 */
7bb206e3
BS
1319 if (!dd->ipath_pioavailregs_dma) {
1320 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1321 return;
1322 }
1323 if (ipath_debug & __IPATH_VERBDBG) {
1324 /* only if packet debug and verbose */
1325 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1326 unsigned long *shadow = dd->ipath_pioavailshadow;
1327
1328 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1329 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1330 "s3=%lx\n",
1331 (unsigned long long) le64_to_cpu(dma[0]),
1332 shadow[0],
1333 (unsigned long long) le64_to_cpu(dma[1]),
1334 shadow[1],
1335 (unsigned long long) le64_to_cpu(dma[2]),
1336 shadow[2],
1337 (unsigned long long) le64_to_cpu(dma[3]),
1338 shadow[3]);
1339 if (piobregs > 4)
1340 ipath_cdbg(
1341 PKT, "2nd group, dma4=%llx shad4=%lx, "
1342 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1343 "d7=%llx s7=%lx\n",
1344 (unsigned long long) le64_to_cpu(dma[4]),
1345 shadow[4],
1346 (unsigned long long) le64_to_cpu(dma[5]),
1347 shadow[5],
1348 (unsigned long long) le64_to_cpu(dma[6]),
1349 shadow[6],
1350 (unsigned long long) le64_to_cpu(dma[7]),
1351 shadow[7]);
1352 }
1353 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1354 for (i = 0; i < piobregs; i++) {
1355 u64 pchbusy, pchg, piov, pnew;
1356 /*
1357 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1358 */
4ea61b54
RC
1359 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1360 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1361 else
7bb206e3 1362 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
c4b4d16e 1363 pchg = dd->ipath_pioavailkernel[i] &
7bb206e3
BS
1364 ~(dd->ipath_pioavailshadow[i] ^ piov);
1365 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1366 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1367 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1368 pnew |= piov & pchbusy;
1369 dd->ipath_pioavailshadow[i] = pnew;
1370 }
1371 }
1372 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1373}
1374
1375/**
1376 * ipath_setrcvhdrsize - set the receive header size
1377 * @dd: the infinipath device
1378 * @rhdrsize: the receive header size
1379 *
1380 * called from user init code, and also layered driver init
1381 */
1382int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1383{
1384 int ret = 0;
1385
1386 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1387 if (dd->ipath_rcvhdrsize != rhdrsize) {
1388 dev_info(&dd->pcidev->dev,
1389 "Error: can't set protocol header "
1390 "size %u, already %u\n",
1391 rhdrsize, dd->ipath_rcvhdrsize);
1392 ret = -EAGAIN;
1393 } else
1394 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1395 "size %u\n", dd->ipath_rcvhdrsize);
1396 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1397 (sizeof(u64) / sizeof(u32)))) {
1398 ipath_dbg("Error: can't set protocol header size %u "
1399 "(> max %u)\n", rhdrsize,
1400 dd->ipath_rcvhdrentsize -
1401 (u32) (sizeof(u64) / sizeof(u32)));
1402 ret = -EOVERFLOW;
1403 } else {
1404 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1405 dd->ipath_rcvhdrsize = rhdrsize;
1406 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1407 dd->ipath_rcvhdrsize);
1408 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1409 dd->ipath_rcvhdrsize);
1410 }
1411 return ret;
1412}
1413
c4b4d16e
RC
1414/*
1415 * debugging code and stats updates if no pio buffers available.
1416 */
1417static noinline void no_pio_bufs(struct ipath_devdata *dd)
1418{
1419 unsigned long *shadow = dd->ipath_pioavailshadow;
1420 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1421
1422 dd->ipath_upd_pio_shadow = 1;
1423
1424 /*
1425 * not atomic, but if we lose a stat count in a while, that's OK
1426 */
1427 ipath_stats.sps_nopiobufs++;
1428 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1429 ipath_dbg("%u pio sends with no bufavail; dmacopy: "
1430 "%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n",
1431 dd->ipath_consec_nopiobuf,
1432 (unsigned long long) le64_to_cpu(dma[0]),
1433 (unsigned long long) le64_to_cpu(dma[1]),
1434 (unsigned long long) le64_to_cpu(dma[2]),
1435 (unsigned long long) le64_to_cpu(dma[3]),
1436 shadow[0], shadow[1], shadow[2], shadow[3]);
1437 /*
1438 * 4 buffers per byte, 4 registers above, cover rest
1439 * below
1440 */
1441 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1442 (sizeof(shadow[0]) * 4 * 4))
1443 ipath_dbg("2nd group: dmacopy: %llx %llx "
1444 "%llx %llx; shadow: %lx %lx %lx %lx\n",
1445 (unsigned long long)le64_to_cpu(dma[4]),
1446 (unsigned long long)le64_to_cpu(dma[5]),
1447 (unsigned long long)le64_to_cpu(dma[6]),
1448 (unsigned long long)le64_to_cpu(dma[7]),
1449 shadow[4], shadow[5], shadow[6],
1450 shadow[7]);
1451 }
1452}
1453
1454/*
1455 * common code for normal driver pio buffer allocation, and reserved
1456 * allocation.
7bb206e3
BS
1457 *
1458 * do appropriate marking as busy, etc.
1459 * returns buffer number if one found (>=0), negative number is error.
7bb206e3 1460 */
c4b4d16e
RC
1461static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1462 u32 *pbufnum, u32 first, u32 last, u32 firsti)
7bb206e3 1463{
c4b4d16e
RC
1464 int i, j, updated = 0;
1465 unsigned piobcnt;
7bb206e3
BS
1466 unsigned long flags;
1467 unsigned long *shadow = dd->ipath_pioavailshadow;
1468 u32 __iomem *buf;
1469
c4b4d16e 1470 piobcnt = last - first;
7bb206e3
BS
1471 if (dd->ipath_upd_pio_shadow) {
1472 /*
1473 * Minor optimization. If we had no buffers on last call,
1474 * start out by doing the update; continue and do scan even
1475 * if no buffers were updated, to be paranoid
1476 */
1477 ipath_update_pio_bufs(dd);
c4b4d16e
RC
1478 updated++;
1479 i = first;
7bb206e3 1480 } else
c4b4d16e 1481 i = firsti;
7bb206e3
BS
1482rescan:
1483 /*
1484 * while test_and_set_bit() is atomic, we do that and then the
1485 * change_bit(), and the pair is not. See if this is the cause
1486 * of the remaining armlaunch errors.
1487 */
1488 spin_lock_irqsave(&ipath_pioavail_lock, flags);
c4b4d16e
RC
1489 for (j = 0; j < piobcnt; j++, i++) {
1490 if (i >= last)
1491 i = first;
1492 if (__test_and_set_bit((2 * i) + 1, shadow))
7bb206e3
BS
1493 continue;
1494 /* flip generation bit */
c4b4d16e 1495 __change_bit(2 * i, shadow);
7bb206e3
BS
1496 break;
1497 }
1498 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1499
c4b4d16e 1500 if (j == piobcnt) {
7bb206e3 1501 if (!updated) {
c4b4d16e
RC
1502 /*
1503 * first time through; shadow exhausted, but may be
1504 * buffers available, try an update and then rescan.
1505 */
7bb206e3 1506 ipath_update_pio_bufs(dd);
c4b4d16e
RC
1507 updated++;
1508 i = first;
7bb206e3 1509 goto rescan;
c4b4d16e
RC
1510 } else if (updated == 1 && piobcnt <=
1511 ((dd->ipath_sendctrl
1512 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1513 INFINIPATH_S_UPDTHRESH_MASK)) {
7bb206e3 1514 /*
c4b4d16e
RC
1515 * for chips supporting and using the update
1516 * threshold we need to force an update of the
1517 * in-memory copy if the count is less than the
1518 * thershold, then check one more time.
7bb206e3 1519 */
c4b4d16e
RC
1520 ipath_force_pio_avail_update(dd);
1521 ipath_update_pio_bufs(dd);
1522 updated++;
1523 i = first;
1524 goto rescan;
7bb206e3 1525 }
c4b4d16e
RC
1526
1527 no_pio_bufs(dd);
7bb206e3 1528 buf = NULL;
c4b4d16e
RC
1529 } else {
1530 if (i < dd->ipath_piobcnt2k)
1531 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1532 i * dd->ipath_palign);
1533 else
1534 buf = (u32 __iomem *)
1535 (dd->ipath_pio4kbase +
1536 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1537 if (pbufnum)
1538 *pbufnum = i;
7bb206e3
BS
1539 }
1540
c4b4d16e
RC
1541 return buf;
1542}
7bb206e3 1543
c4b4d16e
RC
1544/**
1545 * ipath_getpiobuf - find an available pio buffer
1546 * @dd: the infinipath device
1547 * @plen: the size of the PIO buffer needed in 32-bit words
1548 * @pbufnum: the buffer number is placed here
1549 */
1550u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1551{
1552 u32 __iomem *buf;
1553 u32 pnum, nbufs;
1554 u32 first, lasti;
1555
1556 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1557 first = dd->ipath_piobcnt2k;
1558 lasti = dd->ipath_lastpioindexl;
1559 } else {
1560 first = 0;
1561 lasti = dd->ipath_lastpioindex;
1562 }
1563 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1564 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1565
1566 if (buf) {
1567 /*
1568 * Set next starting place. It's just an optimization,
1569 * it doesn't matter who wins on this, so no locking
1570 */
1571 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1572 dd->ipath_lastpioindexl = pnum + 1;
1573 else
1574 dd->ipath_lastpioindex = pnum + 1;
1575 if (dd->ipath_upd_pio_shadow)
1576 dd->ipath_upd_pio_shadow = 0;
1577 if (dd->ipath_consec_nopiobuf)
1578 dd->ipath_consec_nopiobuf = 0;
1579 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1580 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1581 if (pbufnum)
1582 *pbufnum = pnum;
1583
1584 }
7bb206e3
BS
1585 return buf;
1586}
1587
c4b4d16e
RC
1588/**
1589 * ipath_chg_pioavailkernel - change which send buffers are available for kernel
1590 * @dd: the infinipath device
1591 * @start: the starting send buffer number
1592 * @len: the number of send buffers
1593 * @avail: true if the buffers are available for kernel use, false otherwise
1594 */
1595void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1596 unsigned len, int avail)
1597{
1598 unsigned long flags;
1599 unsigned end;
1600
1601 /* There are two bits per send buffer (busy and generation) */
1602 start *= 2;
1603 len *= 2;
1604 end = start + len;
1605
1606 /* Set or clear the generation bits. */
1607 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1608 while (start < end) {
1609 if (avail) {
1610 __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1611 dd->ipath_pioavailshadow);
1612 __set_bit(start, dd->ipath_pioavailkernel);
1613 } else {
1614 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1615 dd->ipath_pioavailshadow);
1616 __clear_bit(start, dd->ipath_pioavailkernel);
1617 }
1618 start += 2;
1619 }
1620 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1621}
1622
7bb206e3
BS
1623/**
1624 * ipath_create_rcvhdrq - create a receive header queue
1625 * @dd: the infinipath device
1626 * @pd: the port data
1627 *
f37bda92
BS
1628 * this must be contiguous memory (from an i/o perspective), and must be
1629 * DMA'able (which means for some systems, it will go through an IOMMU,
1630 * or be forced into a low address range).
7bb206e3
BS
1631 */
1632int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1633 struct ipath_portdata *pd)
1634{
f37bda92 1635 int ret = 0;
7bb206e3 1636
7bb206e3 1637 if (!pd->port_rcvhdrq) {
f37bda92 1638 dma_addr_t phys_hdrqtail;
7bb206e3 1639 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
f37bda92
BS
1640 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1641 sizeof(u32), PAGE_SIZE);
7bb206e3
BS
1642
1643 pd->port_rcvhdrq = dma_alloc_coherent(
1644 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1645 gfp_flags);
1646
1647 if (!pd->port_rcvhdrq) {
1648 ipath_dev_err(dd, "attempt to allocate %d bytes "
1649 "for port %u rcvhdrq failed\n",
1650 amt, pd->port_port);
1651 ret = -ENOMEM;
1652 goto bail;
1653 }
9355fb6a
RC
1654
1655 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1656 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1657 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1658 GFP_KERNEL);
1659 if (!pd->port_rcvhdrtail_kvaddr) {
1660 ipath_dev_err(dd, "attempt to allocate 1 page "
1661 "for port %u rcvhdrqtailaddr "
1662 "failed\n", pd->port_port);
1663 ret = -ENOMEM;
1664 dma_free_coherent(&dd->pcidev->dev, amt,
1665 pd->port_rcvhdrq,
1666 pd->port_rcvhdrq_phys);
1667 pd->port_rcvhdrq = NULL;
1668 goto bail;
1669 }
1670 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1671 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1672 "physical\n", pd->port_port,
1673 (unsigned long long) phys_hdrqtail);
f37bda92 1674 }
7bb206e3
BS
1675
1676 pd->port_rcvhdrq_size = amt;
1677
1678 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1679 "for port %u rcvhdr Q\n",
1680 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1681 (unsigned long) pd->port_rcvhdrq_phys,
1682 (unsigned long) pd->port_rcvhdrq_size,
1683 pd->port_port);
7bb206e3 1684 }
f37bda92
BS
1685 else
1686 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1687 "hdrtailaddr@%p %llx physical\n",
1688 pd->port_port, pd->port_rcvhdrq,
1fd3b40f
BS
1689 (unsigned long long) pd->port_rcvhdrq_phys,
1690 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1691 pd->port_rcvhdrqtailaddr_phys);
f37bda92
BS
1692
1693 /* clear for security and sanity on each use */
1694 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
c59a80ac
RC
1695 if (pd->port_rcvhdrtail_kvaddr)
1696 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
7bb206e3
BS
1697
1698 /*
1699 * tell chip each time we init it, even if we are re-using previous
f37bda92 1700 * memory (we zero the register at process close)
7bb206e3 1701 */
f37bda92
BS
1702 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1703 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
7bb206e3
BS
1704 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1705 pd->port_port, pd->port_rcvhdrq_phys);
1706
7bb206e3
BS
1707bail:
1708 return ret;
1709}
1710
9380068f
DO
1711
1712/*
1713 * Flush all sends that might be in the ready to send state, as well as any
1714 * that are in the process of being sent. Used whenever we need to be
1715 * sure the send side is idle. Cleans up all buffer state by canceling
1716 * all pio buffers, and issuing an abort, which cleans up anything in the
1717 * launch fifo. The cancel is superfluous on some chip versions, but
1718 * it's safer to always do it.
1719 * PIOAvail bits are updated by the chip as if normal send had happened.
1720 */
3810f2a8 1721void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
9380068f
DO
1722{
1723 ipath_dbg("Cancelling all in-progress send buffers\n");
2ba3f56e
RC
1724
1725 /* skip armlaunch errs for a while */
1726 dd->ipath_lastcancel = jiffies + HZ / 2;
1727
9380068f
DO
1728 /*
1729 * the abort bit is auto-clearing. We read scratch to be sure
1730 * that cancels and the abort have taken effect in the chip.
1731 */
1732 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1733 INFINIPATH_S_ABORT);
1734 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1735 ipath_disarm_piobufs(dd, 0,
1736 (unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k));
3810f2a8
DO
1737 if (restore_sendctrl) /* else done by caller later */
1738 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1739 dd->ipath_sendctrl);
9380068f
DO
1740
1741 /* and again, be sure all have hit the chip */
1742 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1743}
1744
c4b4d16e
RC
1745/*
1746 * Force an update of in-memory copy of the pioavail registers, when
1747 * needed for any of a variety of reasons. We read the scratch register
1748 * to make it highly likely that the update will have happened by the
1749 * time we return. If already off (as in cancel_sends above), this
1750 * routine is a nop, on the assumption that the caller will "do the
1751 * right thing".
1752 */
1753void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1754{
1755 unsigned long flags;
1756
1757 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1758 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1759 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1760 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1761 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1762 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1763 dd->ipath_sendctrl);
1764 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1765 }
1766 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1767}
1768
4330e4da
MA
1769static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1770 int linitcmd)
7bb206e3 1771{
4330e4da 1772 u64 mod_wd;
7bb206e3 1773 static const char *what[4] = {
140277e9
RC
1774 [0] = "NOP",
1775 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
7bb206e3
BS
1776 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1777 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1778 };
f37bda92 1779
4330e4da
MA
1780 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
1781 /*
1782 * If we are told to disable, note that so link-recovery
1783 * code does not attempt to bring us back up.
1784 */
1785 preempt_disable();
1786 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
1787 preempt_enable();
1788 } else if (linitcmd) {
1789 /*
1790 * Any other linkinitcmd will lead to LINKDOWN and then
1791 * to INIT (if all is well), so clear flag to let
1792 * link-recovery code attempt to bring us back up.
1793 */
1794 preempt_disable();
1795 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
1796 preempt_enable();
1797 }
1798
1799 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
1800 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1801 ipath_cdbg(VERBOSE,
1802 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
1803 dd->ipath_unit, what[linkcmd], linitcmd,
1804 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
58411d1c 1805 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
7bb206e3
BS
1806
1807 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
4330e4da
MA
1808 dd->ipath_ibcctrl | mod_wd);
1809 /* read from chip so write is flushed */
1810 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
7bb206e3
BS
1811}
1812
34b2aafe
BS
1813int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1814{
1815 u32 lstate;
1816 int ret;
1817
1818 switch (newstate) {
140277e9 1819 case IPATH_IB_LINKDOWN_ONLY:
4330e4da 1820 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
140277e9
RC
1821 /* don't wait */
1822 ret = 0;
1823 goto bail;
1824
34b2aafe 1825 case IPATH_IB_LINKDOWN:
4330e4da
MA
1826 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1827 INFINIPATH_IBCC_LINKINITCMD_POLL);
34b2aafe
BS
1828 /* don't wait */
1829 ret = 0;
1830 goto bail;
1831
1832 case IPATH_IB_LINKDOWN_SLEEP:
4330e4da
MA
1833 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1834 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
34b2aafe
BS
1835 /* don't wait */
1836 ret = 0;
1837 goto bail;
1838
1839 case IPATH_IB_LINKDOWN_DISABLE:
4330e4da
MA
1840 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1841 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
34b2aafe
BS
1842 /* don't wait */
1843 ret = 0;
1844 goto bail;
1845
34b2aafe
BS
1846 case IPATH_IB_LINKARM:
1847 if (dd->ipath_flags & IPATH_LINKARMED) {
1848 ret = 0;
1849 goto bail;
1850 }
1851 if (!(dd->ipath_flags &
1852 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
1853 ret = -EINVAL;
1854 goto bail;
1855 }
4330e4da
MA
1856 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
1857
34b2aafe
BS
1858 /*
1859 * Since the port can transition to ACTIVE by receiving
1860 * a non VL 15 packet, wait for either state.
1861 */
1862 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
1863 break;
1864
1865 case IPATH_IB_LINKACTIVE:
1866 if (dd->ipath_flags & IPATH_LINKACTIVE) {
1867 ret = 0;
1868 goto bail;
1869 }
1870 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
1871 ret = -EINVAL;
1872 goto bail;
1873 }
4330e4da 1874 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
34b2aafe
BS
1875 lstate = IPATH_LINKACTIVE;
1876 break;
1877
946db67f
BS
1878 case IPATH_IB_LINK_LOOPBACK:
1879 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
1880 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
1881 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1882 dd->ipath_ibcctrl);
b3e8f541
DO
1883
1884 /* turn heartbeat off, as it causes loopback to fail */
1885 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
1886 IPATH_IB_HRTBT_OFF);
1887 /* don't wait */
946db67f 1888 ret = 0;
b3e8f541 1889 goto bail;
946db67f
BS
1890
1891 case IPATH_IB_LINK_EXTERNAL:
b3e8f541
DO
1892 dev_info(&dd->pcidev->dev,
1893 "Disabling IB local loopback (normal)\n");
1894 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
1895 IPATH_IB_HRTBT_ON);
946db67f
BS
1896 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
1897 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1898 dd->ipath_ibcctrl);
b3e8f541 1899 /* don't wait */
946db67f 1900 ret = 0;
b3e8f541
DO
1901 goto bail;
1902
1903 /*
1904 * Heartbeat can be explicitly enabled by the user via
1905 * "hrtbt_enable" "file", and if disabled, trying to enable here
1906 * will have no effect. Implicit changes (heartbeat off when
1907 * loopback on, and vice versa) are included to ease testing.
1908 */
1909 case IPATH_IB_LINK_HRTBT:
1910 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
1911 IPATH_IB_HRTBT_ON);
1912 goto bail;
1913
1914 case IPATH_IB_LINK_NO_HRTBT:
1915 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
1916 IPATH_IB_HRTBT_OFF);
1917 goto bail;
946db67f 1918
34b2aafe
BS
1919 default:
1920 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
1921 ret = -EINVAL;
1922 goto bail;
1923 }
1924 ret = ipath_wait_linkstate(dd, lstate, 2000);
1925
1926bail:
1927 return ret;
1928}
1929
1930/**
1931 * ipath_set_mtu - set the MTU
1932 * @dd: the infinipath device
1933 * @arg: the new MTU
1934 *
1935 * we can handle "any" incoming size, the issue here is whether we
1936 * need to restrict our outgoing size. For now, we don't do any
1937 * sanity checking on this, and we don't deal with what happens to
1938 * programs that are already running when the size changes.
1939 * NOTE: changing the MTU will usually cause the IBC to go back to
1940 * link initialize (IPATH_IBSTATE_INIT) state...
1941 */
1942int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1943{
1944 u32 piosize;
1945 int changed = 0;
1946 int ret;
1947
1948 /*
1949 * mtu is IB data payload max. It's the largest power of 2 less
1950 * than piosize (or even larger, since it only really controls the
1951 * largest we can receive; we can send the max of the mtu and
1952 * piosize). We check that it's one of the valid IB sizes.
1953 */
1954 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
826d8010 1955 (arg != 4096 || !ipath_mtu4096)) {
34b2aafe
BS
1956 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
1957 ret = -EINVAL;
1958 goto bail;
1959 }
1960 if (dd->ipath_ibmtu == arg) {
1961 ret = 0; /* same as current */
1962 goto bail;
1963 }
1964
1965 piosize = dd->ipath_ibmaxlen;
1966 dd->ipath_ibmtu = arg;
1967
1968 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
1969 /* Only if it's not the initial value (or reset to it) */
1970 if (piosize != dd->ipath_init_ibmaxlen) {
826d8010
DO
1971 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
1972 piosize = dd->ipath_init_ibmaxlen;
34b2aafe
BS
1973 dd->ipath_ibmaxlen = piosize;
1974 changed = 1;
1975 }
1976 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
1977 piosize = arg + IPATH_PIO_MAXIBHDR;
1978 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
1979 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
1980 arg);
1981 dd->ipath_ibmaxlen = piosize;
1982 changed = 1;
1983 }
1984
1985 if (changed) {
826d8010 1986 u64 ibc = dd->ipath_ibcctrl, ibdw;
34b2aafe 1987 /*
826d8010
DO
1988 * update our housekeeping variables, and set IBC max
1989 * size, same as init code; max IBC is max we allow in
1990 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
34b2aafe 1991 */
826d8010
DO
1992 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
1993 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
34b2aafe 1994 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
826d8010
DO
1995 dd->ibcc_mpl_shift);
1996 ibc |= ibdw << dd->ibcc_mpl_shift;
34b2aafe
BS
1997 dd->ipath_ibcctrl = ibc;
1998 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1999 dd->ipath_ibcctrl);
2000 dd->ipath_f_tidtemplate(dd);
2001 }
2002
2003 ret = 0;
2004
2005bail:
2006 return ret;
2007}
2008
0ab6b2b9 2009int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
34b2aafe 2010{
0ab6b2b9 2011 dd->ipath_lid = lid;
34b2aafe
BS
2012 dd->ipath_lmc = lmc;
2013
0ab6b2b9
DO
2014 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2015 (~((1U << lmc) - 1)) << 16);
2016
2017 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2018
34b2aafe
BS
2019 return 0;
2020}
2021
7bb206e3
BS
2022
2023/**
2024 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
2025 * @dd: the infinipath device
2026 * @regno: the register number to write
2027 * @port: the port containing the register
2028 * @value: the value to write
2029 *
2030 * Registers that vary with the chip implementation constants (port)
2031 * use this routine.
2032 */
2033void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
2034 unsigned port, u64 value)
2035{
2036 u16 where;
2037
2038 if (port < dd->ipath_portcnt &&
2039 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
2040 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
2041 where = regno + port;
2042 else
2043 where = -1;
2044
2045 ipath_write_kreg(dd, where, value);
2046}
2047
82466f00
MA
2048/*
2049 * Following deal with the "obviously simple" task of overriding the state
2050 * of the LEDS, which normally indicate link physical and logical status.
2051 * The complications arise in dealing with different hardware mappings
2052 * and the board-dependent routine being called from interrupts.
2053 * and then there's the requirement to _flash_ them.
2054 */
2055#define LED_OVER_FREQ_SHIFT 8
2056#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
2057/* Below is "non-zero" to force override, but both actual LEDs are off */
2058#define LED_OVER_BOTH_OFF (8)
2059
da9aec7b 2060static void ipath_run_led_override(unsigned long opaque)
82466f00
MA
2061{
2062 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2063 int timeoff;
2064 int pidx;
2065 u64 lstate, ltstate, val;
2066
2067 if (!(dd->ipath_flags & IPATH_INITTED))
2068 return;
2069
2070 pidx = dd->ipath_led_override_phase++ & 1;
2071 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
2072 timeoff = dd->ipath_led_override_timeoff;
2073
2074 /*
2075 * below potentially restores the LED values per current status,
2076 * should also possibly setup the traffic-blink register,
2077 * but leave that to per-chip functions.
2078 */
2079 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2080 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
58411d1c
JG
2081 dd->ibcs_lts_mask;
2082 lstate = (val >> dd->ibcs_ls_shift) & INFINIPATH_IBCS_LINKSTATE_MASK;
82466f00
MA
2083
2084 dd->ipath_f_setextled(dd, lstate, ltstate);
2085 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2086}
2087
2088void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2089{
2090 int timeoff, freq;
2091
2092 if (!(dd->ipath_flags & IPATH_INITTED))
2093 return;
2094
2095 /* First check if we are blinking. If not, use 1HZ polling */
2096 timeoff = HZ;
2097 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
2098
2099 if (freq) {
2100 /* For blink, set each phase from one nybble of val */
2101 dd->ipath_led_override_vals[0] = val & 0xF;
2102 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2103 timeoff = (HZ << 4)/freq;
2104 } else {
2105 /* Non-blink set both phases the same. */
2106 dd->ipath_led_override_vals[0] = val & 0xF;
2107 dd->ipath_led_override_vals[1] = val & 0xF;
2108 }
2109 dd->ipath_led_override_timeoff = timeoff;
2110
2111 /*
2112 * If the timer has not already been started, do so. Use a "quick"
2113 * timeout so the function will be called soon, to look at our request.
2114 */
2115 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2116 /* Need to start timer */
2117 init_timer(&dd->ipath_led_override_timer);
2118 dd->ipath_led_override_timer.function =
2119 ipath_run_led_override;
2120 dd->ipath_led_override_timer.data = (unsigned long) dd;
2121 dd->ipath_led_override_timer.expires = jiffies + 1;
2122 add_timer(&dd->ipath_led_override_timer);
2ba3f56e 2123 } else
82466f00 2124 atomic_dec(&dd->ipath_led_override_timer_active);
82466f00
MA
2125}
2126
7bb206e3
BS
2127/**
2128 * ipath_shutdown_device - shut down a device
2129 * @dd: the infinipath device
2130 *
2131 * This is called to make the device quiet when we are about to
2132 * unload the driver, and also when the device is administratively
2133 * disabled. It does not free any data structures.
2134 * Everything it does has to be setup again by ipath_init_chip(dd,1)
2135 */
2136void ipath_shutdown_device(struct ipath_devdata *dd)
2137{
e342c119
JG
2138 unsigned long flags;
2139
7bb206e3
BS
2140 ipath_dbg("Shutting down the device\n");
2141
58411d1c
JG
2142 ipath_hol_up(dd); /* make sure user processes aren't suspended */
2143
7bb206e3
BS
2144 dd->ipath_flags |= IPATH_LINKUNK;
2145 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2146 IPATH_LINKINIT | IPATH_LINKARMED |
2147 IPATH_LINKACTIVE);
2148 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2149 IPATH_STATUS_IB_READY);
2150
2151 /* mask interrupts, but not errors */
2152 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2153
2154 dd->ipath_rcvctrl = 0;
2155 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2156 dd->ipath_rcvctrl);
2157
2158 /*
2159 * gracefully stop all sends allowing any in progress to trickle out
2160 * first.
2161 */
e342c119
JG
2162 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2163 dd->ipath_sendctrl = 0;
2164 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
7bb206e3 2165 /* flush it */
44f8e3f3 2166 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
e342c119
JG
2167 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2168
7bb206e3
BS
2169 /*
2170 * enough for anything that's going to trickle out to have actually
2171 * done so.
2172 */
2173 udelay(5);
2174
4330e4da 2175 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
3810f2a8 2176 ipath_cancel_sends(dd, 0);
7bb206e3 2177
49739b3e
RC
2178 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2179
7bb206e3
BS
2180 /* disable IBC */
2181 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2182 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
a40f55fc 2183 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
7bb206e3
BS
2184
2185 /*
2186 * clear SerdesEnable and turn the leds off; do this here because
2187 * we are unloading, so don't count on interrupts to move along
2188 * Turn the LEDs off explictly for the same reason.
2189 */
2190 dd->ipath_f_quiet_serdes(dd);
7bb206e3 2191
58411d1c
JG
2192 /* stop all the timers that might still be running */
2193 del_timer_sync(&dd->ipath_hol_timer);
7bb206e3
BS
2194 if (dd->ipath_stats_timer_active) {
2195 del_timer_sync(&dd->ipath_stats_timer);
2196 dd->ipath_stats_timer_active = 0;
2197 }
2198
2199 /*
2200 * clear all interrupts and errors, so that the next time the driver
2201 * is loaded or device is enabled, we know that whatever is set
2202 * happened while we were unloaded
2203 */
2204 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2205 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
2206 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2207 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
aecd3b5a
MA
2208
2209 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2210 ipath_update_eeprom_log(dd);
7bb206e3
BS
2211}
2212
2213/**
2214 * ipath_free_pddata - free a port's allocated data
2215 * @dd: the infinipath device
f37bda92 2216 * @pd: the portdata structure
7bb206e3 2217 *
f37bda92
BS
2218 * free up any allocated data for a port
2219 * This should not touch anything that would affect a simultaneous
2220 * re-allocation of port data, because it is called after ipath_mutex
2221 * is released (and can be called from reinit as well).
2222 * It should never change any chip state, or global driver state.
2223 * (The only exception to global state is freeing the port0 port0_skbs.)
7bb206e3 2224 */
f37bda92 2225void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
7bb206e3 2226{
7bb206e3
BS
2227 if (!pd)
2228 return;
f37bda92
BS
2229
2230 if (pd->port_rcvhdrq) {
7bb206e3
BS
2231 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
2232 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
2233 (unsigned long) pd->port_rcvhdrq_size);
2234 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2235 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
2236 pd->port_rcvhdrq = NULL;
f37bda92
BS
2237 if (pd->port_rcvhdrtail_kvaddr) {
2238 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
076fafcd 2239 pd->port_rcvhdrtail_kvaddr,
f37bda92
BS
2240 pd->port_rcvhdrqtailaddr_phys);
2241 pd->port_rcvhdrtail_kvaddr = NULL;
2242 }
7bb206e3 2243 }
f37bda92
BS
2244 if (pd->port_port && pd->port_rcvegrbuf) {
2245 unsigned e;
2246
2247 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
2248 void *base = pd->port_rcvegrbuf[e];
2249 size_t size = pd->port_rcvegrbuf_size;
2250
2251 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
2252 "chunk %u/%u\n", base,
2253 (unsigned long) size,
2254 e, pd->port_rcvegrbuf_chunks);
2255 dma_free_coherent(&dd->pcidev->dev, size,
2256 base, pd->port_rcvegrbuf_phys[e]);
7bb206e3 2257 }
9929b0fb 2258 kfree(pd->port_rcvegrbuf);
f37bda92 2259 pd->port_rcvegrbuf = NULL;
9929b0fb 2260 kfree(pd->port_rcvegrbuf_phys);
f37bda92 2261 pd->port_rcvegrbuf_phys = NULL;
7bb206e3 2262 pd->port_rcvegrbuf_chunks = 0;
1fd3b40f 2263 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
7bb206e3 2264 unsigned e;
1fd3b40f 2265 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
7bb206e3 2266
1fd3b40f
BS
2267 dd->ipath_port0_skbinfo = NULL;
2268 ipath_cdbg(VERBOSE, "free closed port %d "
2269 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2270 skbinfo);
9355fb6a 2271 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2ba3f56e
RC
2272 if (skbinfo[e].skb) {
2273 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2274 dd->ipath_ibmaxlen,
2275 PCI_DMA_FROMDEVICE);
2276 dev_kfree_skb(skbinfo[e].skb);
2277 }
1fd3b40f 2278 vfree(skbinfo);
7bb206e3 2279 }
f37bda92 2280 kfree(pd->port_tid_pg_list);
9929b0fb
BS
2281 vfree(pd->subport_uregbase);
2282 vfree(pd->subport_rcvegrbuf);
2283 vfree(pd->subport_rcvhdr_base);
f37bda92 2284 kfree(pd);
7bb206e3
BS
2285}
2286
ac2ae4c9 2287static int __init infinipath_init(void)
7bb206e3
BS
2288{
2289 int ret;
2290
39c0d0b9
BS
2291 if (ipath_debug & __IPATH_DBG)
2292 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
7bb206e3
BS
2293
2294 /*
2295 * These must be called before the driver is registered with
2296 * the PCI subsystem.
2297 */
2298 idr_init(&unit_table);
2299 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2300 ret = -ENOMEM;
2301 goto bail;
2302 }
2303
2304 ret = pci_register_driver(&ipath_driver);
2305 if (ret < 0) {
2306 printk(KERN_ERR IPATH_DRV_NAME
2307 ": Unable to register driver: error %d\n", -ret);
2308 goto bail_unit;
2309 }
2310
7bb206e3
BS
2311 ret = ipath_init_ipathfs();
2312 if (ret < 0) {
2313 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2314 "ipathfs: error %d\n", -ret);
23b9c1ab 2315 goto bail_pci;
7bb206e3
BS
2316 }
2317
2318 goto bail;
2319
7bb206e3
BS
2320bail_pci:
2321 pci_unregister_driver(&ipath_driver);
2322
2323bail_unit:
2324 idr_destroy(&unit_table);
2325
2326bail:
2327 return ret;
2328}
2329
7bb206e3
BS
2330static void __exit infinipath_cleanup(void)
2331{
7bb206e3
BS
2332 ipath_exit_ipathfs();
2333
7bb206e3
BS
2334 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2335 pci_unregister_driver(&ipath_driver);
2336
2337 idr_destroy(&unit_table);
2338}
2339
2340/**
2341 * ipath_reset_device - reset the chip if possible
2342 * @unit: the device to reset
2343 *
2344 * Whether or not reset is successful, we attempt to re-initialize the chip
2345 * (that is, much like a driver unload/reload). We clear the INITTED flag
2346 * so that the various entry points will fail until we reinitialize. For
2347 * now, we only allow this if no user ports are open that use chip resources
2348 */
2349int ipath_reset_device(int unit)
2350{
2351 int ret, i;
2352 struct ipath_devdata *dd = ipath_lookup(unit);
2353
2354 if (!dd) {
2355 ret = -ENODEV;
2356 goto bail;
2357 }
2358
82466f00
MA
2359 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2360 /* Need to stop LED timer, _then_ shut off LEDs */
2361 del_timer_sync(&dd->ipath_led_override_timer);
2362 atomic_set(&dd->ipath_led_override_timer_active, 0);
2363 }
2364
2365 /* Shut off LEDs after we are sure timer is not running */
2366 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2367 dd->ipath_f_setextled(dd, 0, 0);
2368
7bb206e3
BS
2369 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2370
2371 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2372 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2373 "not initialized or not present\n", unit);
2374 ret = -ENXIO;
2375 goto bail;
2376 }
2377
2378 if (dd->ipath_pd)
23e86a45 2379 for (i = 1; i < dd->ipath_cfgports; i++) {
7bb206e3
BS
2380 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
2381 ipath_dbg("unit %u port %d is in use "
2382 "(PID %u cmd %s), can't reset\n",
2383 unit, i,
2384 dd->ipath_pd[i]->port_pid,
2385 dd->ipath_pd[i]->port_comm);
2386 ret = -EBUSY;
2387 goto bail;
2388 }
2389 }
2390
2391 dd->ipath_flags &= ~IPATH_INITTED;
2392 ret = dd->ipath_f_reset(dd);
2393 if (ret != 1)
2394 ipath_dbg("reset was not successful\n");
2395 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
2396 unit);
2397 ret = ipath_init_chip(dd, 1);
2398 if (ret)
2399 ipath_dev_err(dd, "Reinitialize unit %u after "
2400 "reset failed with %d\n", unit, ret);
2401 else
2402 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2403 "resetting\n", unit);
2404
2405bail:
2406 return ret;
2407}
2408
58411d1c
JG
2409/*
2410 * send a signal to all the processes that have the driver open
2411 * through the normal interfaces (i.e., everything other than diags
2412 * interface). Returns number of signalled processes.
2413 */
2414static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2415{
2416 int i, sub, any = 0;
2417 pid_t pid;
2418
2419 if (!dd->ipath_pd)
2420 return 0;
2421 for (i = 1; i < dd->ipath_cfgports; i++) {
2422 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
2423 !dd->ipath_pd[i]->port_pid)
2424 continue;
2425 pid = dd->ipath_pd[i]->port_pid;
2426 dev_info(&dd->pcidev->dev, "context %d in use "
2427 "(PID %u), sending signal %d\n",
2428 i, pid, sig);
2429 kill_proc(pid, sig, 1);
2430 any++;
2431 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2432 pid = dd->ipath_pd[i]->port_subpid[sub];
2433 if (!pid)
2434 continue;
2435 dev_info(&dd->pcidev->dev, "sub-context "
2436 "%d:%d in use (PID %u), sending "
2437 "signal %d\n", i, sub, pid, sig);
2438 kill_proc(pid, sig, 1);
2439 any++;
2440 }
2441 }
2442 return any;
2443}
2444
2445static void ipath_hol_signal_down(struct ipath_devdata *dd)
2446{
2447 if (ipath_signal_procs(dd, SIGSTOP))
2448 ipath_dbg("Stopped some processes\n");
2449 ipath_cancel_sends(dd, 1);
2450}
2451
2452
2453static void ipath_hol_signal_up(struct ipath_devdata *dd)
2454{
2455 if (ipath_signal_procs(dd, SIGCONT))
2456 ipath_dbg("Continued some processes\n");
2457}
2458
2459/*
2460 * link is down, stop any users processes, and flush pending sends
2461 * to prevent HoL blocking, then start the HoL timer that
2462 * periodically continues, then stop procs, so they can detect
2463 * link down if they want, and do something about it.
2464 * Timer may already be running, so use __mod_timer, not add_timer.
2465 */
2466void ipath_hol_down(struct ipath_devdata *dd)
2467{
2468 dd->ipath_hol_state = IPATH_HOL_DOWN;
2469 ipath_hol_signal_down(dd);
2470 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2471 dd->ipath_hol_timer.expires = jiffies +
2472 msecs_to_jiffies(ipath_hol_timeout_ms);
2473 __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2474}
2475
2476/*
2477 * link is up, continue any user processes, and ensure timer
2478 * is a nop, if running. Let timer keep running, if set; it
2479 * will nop when it sees the link is up
2480 */
2481void ipath_hol_up(struct ipath_devdata *dd)
2482{
2483 ipath_hol_signal_up(dd);
2484 dd->ipath_hol_state = IPATH_HOL_UP;
2485}
2486
2487/*
2488 * toggle the running/not running state of user proceses
2489 * to prevent HoL blocking on chip resources, but still allow
2490 * user processes to do link down special case handling.
2491 * Should only be called via the timer
2492 */
2493void ipath_hol_event(unsigned long opaque)
2494{
2495 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2496
2497 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2498 && dd->ipath_hol_state != IPATH_HOL_UP) {
2499 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2500 ipath_dbg("Stopping processes\n");
2501 ipath_hol_signal_down(dd);
2502 } else { /* may do "extra" if also in ipath_hol_up() */
2503 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2504 ipath_dbg("Continuing processes\n");
2505 ipath_hol_signal_up(dd);
2506 }
2507 if (dd->ipath_hol_state == IPATH_HOL_UP)
2508 ipath_dbg("link's up, don't resched timer\n");
2509 else {
2510 dd->ipath_hol_timer.expires = jiffies +
2511 msecs_to_jiffies(ipath_hol_timeout_ms);
2512 __mod_timer(&dd->ipath_hol_timer,
2513 dd->ipath_hol_timer.expires);
2514 }
2515}
2516
30fc5c31
BS
2517int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2518{
2519 u64 val;
2ba3f56e
RC
2520
2521 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
30fc5c31 2522 return -1;
2ba3f56e 2523 if (dd->ipath_rx_pol_inv != new_pol_inv) {
30fc5c31
BS
2524 dd->ipath_rx_pol_inv = new_pol_inv;
2525 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2526 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
3cd96564
RD
2527 INFINIPATH_XGXS_RX_POL_SHIFT);
2528 val |= ((u64)dd->ipath_rx_pol_inv) <<
2529 INFINIPATH_XGXS_RX_POL_SHIFT;
30fc5c31
BS
2530 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2531 }
2532 return 0;
2533}
6ac50727
DO
2534
2535/*
2536 * Disable and enable the armlaunch error. Used for PIO bandwidth testing on
2537 * the 7220, which is count-based, rather than trigger-based. Safe for the
2538 * driver check, since it's at init. Not completely safe when used for
2539 * user-mode checking, since some error checking can be lost, but not
2540 * particularly risky, and only has problematic side-effects in the face of
2541 * very buggy user code. There is no reference counting, but that's also
2542 * fine, given the intended use.
2543 */
2544void ipath_enable_armlaunch(struct ipath_devdata *dd)
2545{
2546 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2547 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2548 INFINIPATH_E_SPIOARMLAUNCH);
2549 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2550 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2551 dd->ipath_errormask);
2552}
2553
2554void ipath_disable_armlaunch(struct ipath_devdata *dd)
2555{
2556 /* so don't re-enable if already set */
2557 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2558 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2559 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2560 dd->ipath_errormask);
2561}
2562
7bb206e3
BS
2563module_init(infinipath_init);
2564module_exit(infinipath_cleanup);