]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/dpt_i2o.c
tg3: use dma_alloc_coherent() instead of pci_alloc_consistent()
[net-next-2.6.git] / drivers / scsi / dpt_i2o.c
CommitLineData
1da177e4
LT
1/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
1da177e4
LT
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
1da177e4
LT
44#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
910638ae 53#include <linux/dma-mapping.h>
1da177e4
LT
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
0b950672 58#include <linux/mutex.h>
1da177e4
LT
59
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
c45d15d2 78static DEFINE_MUTEX(adpt_mutex);
1da177e4
LT
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
0b950672 105static DEFINE_MUTEX(adpt_configuration_lock);
1da177e4 106
67af2b06
MS
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
1da177e4 111
1da177e4
LT
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
1ed43910
MS
115static struct class *adpt_sysfs_class;
116
f4927c45 117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
62ac5aed
MS
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
00977a59 122static const struct file_operations adpt_fops = {
f4927c45 123 .unlocked_ioctl = adpt_unlocked_ioctl,
1da177e4 124 .open = adpt_open,
62ac5aed
MS
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
1da177e4 128#endif
6038f373 129 .llseek = noop_llseek,
1da177e4 130};
1da177e4
LT
131
132/* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148/*============================================================================
149 * Functions
150 *============================================================================
151 */
152
62ac5aed
MS
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
67af2b06
MS
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
1da177e4
LT
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
172c122d 170 if (host->FwDebugBLEDflag_P) {
1da177e4
LT
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178/*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
181 */
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
24601bbc 190static int adpt_detect(struct scsi_host_template* sht)
1da177e4
LT
191{
192 struct pci_dev *pDev = NULL;
229bab6b
DC
193 adpt_hba *pHba;
194 adpt_hba *next;
1da177e4 195
1da177e4
LT
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
a07f3537 199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
1da177e4
LT
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
24601bbc 202 if(adpt_install_hba(sht, pDev) ){
1da177e4
LT
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
a07f3537 207 pci_dev_get(pDev);
1da177e4
LT
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
229bab6b
DC
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
1da177e4
LT
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
229bab6b
DC
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
1da177e4
LT
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
1ed43910
MS
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
229bab6b
DC
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
c864cb14 273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
1da177e4
LT
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
1ed43910 279 if (adpt_sysfs_class) {
d73a1a67 280 struct device *dev = device_create(adpt_sysfs_class,
9def0b97 281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
1ed43910
MS
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
1da177e4
LT
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
24601bbc 295 adpt_i2o_sys_shutdown();
1da177e4
LT
296 return 0;
297 }
298 return hba_count;
299}
300
301
24601bbc
AM
302/*
303 * scsi_unregister will be called AFTER we return.
304 */
305static int adpt_release(struct Scsi_Host *host)
1da177e4 306{
24601bbc 307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
1da177e4
LT
308// adpt_i2o_quiesce_hba(pHba);
309 adpt_i2o_delete_hba(pHba);
24601bbc 310 scsi_unregister(host);
1da177e4
LT
311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
62ac5aed 317 u32 msg[17];
1da177e4
LT
318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
67af2b06 325 dma_addr_t addr;
1da177e4
LT
326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
67af2b06 330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
1da177e4
LT
331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000; // DATA IN (iop<--dev)
340
62ac5aed
MS
341 if (dpt_dma64(pHba))
342 reqlen = 17; // SINGLE SGE, 64 bit
343 else
344 reqlen = 14; // SINGLE SGE, 32 bit
1da177e4
LT
345 /* Stick the headers on */
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350 // Adaptec/DPT Private stuff
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362 // Write SCSI command into the message - always 16 byte block
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369 // Don't care about the rest of scb
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++; /* Remember me - fill in when we know */
374
375 /* Now fill in the SGList and command */
376 *lenptr = len;
62ac5aed
MS
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
1da177e4
LT
387
388 // Send it on it's way
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
67af2b06 394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0'; /* precautionary */
67af2b06 403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 }
423 return 0;
424}
425
426static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
427{
428 adpt_hba* pHba = NULL;
429 struct adpt_device* pDev = NULL; /* dpt per device information */
1da177e4
LT
430
431 cmd->scsi_done = done;
432 /*
433 * SCSI REQUEST_SENSE commands will be executed automatically by the
434 * Host Adapter for any errors, so they should not be executed
435 * explicitly unless the Sense Data is zero indicating that no error
436 * occurred.
437 */
438
439 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440 cmd->result = (DID_OK << 16);
441 cmd->scsi_done(cmd);
442 return 0;
443 }
444
445 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446 if (!pHba) {
447 return FAILED;
448 }
449
450 rmb();
451 /*
452 * TODO: I need to block here if I am processing ioctl cmds
453 * but if the outstanding cmds all finish before the ioctl,
454 * the scsi-core will not know to start sending cmds to me again.
455 * I need to a way to restart the scsi-cores queues or should I block
456 * calling scsi_done on the outstanding cmds instead
457 * for now we don't set the IOCTL state
458 */
459 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
460 pHba->host->last_reset = jiffies;
461 pHba->host->resetting = 1;
462 return 1;
463 }
464
1da177e4
LT
465 // TODO if the cmd->device if offline then I may need to issue a bus rescan
466 // followed by a get_lct to see if the device is there anymore
467 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
468 /*
469 * First command request for this device. Set up a pointer
470 * to the device structure. This should be a TEST_UNIT_READY
471 * command from scan_scsis_single.
472 */
473 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
474 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
475 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
476 cmd->result = (DID_NO_CONNECT << 16);
477 cmd->scsi_done(cmd);
478 return 0;
479 }
480 cmd->device->hostdata = pDev;
481 }
482 pDev->pScsi_dev = cmd->device;
483
484 /*
485 * If we are being called from when the device is being reset,
486 * delay processing of the command until later.
487 */
488 if (pDev->state & DPTI_DEV_RESET ) {
489 return FAILED;
490 }
491 return adpt_scsi_to_i2o(pHba, cmd, pDev);
492}
493
494static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
495 sector_t capacity, int geom[])
496{
497 int heads=-1;
498 int sectors=-1;
499 int cylinders=-1;
500
501 // *** First lets set the default geometry ****
502
503 // If the capacity is less than ox2000
504 if (capacity < 0x2000 ) { // floppy
505 heads = 18;
506 sectors = 2;
507 }
508 // else if between 0x2000 and 0x20000
509 else if (capacity < 0x20000) {
510 heads = 64;
511 sectors = 32;
512 }
513 // else if between 0x20000 and 0x40000
514 else if (capacity < 0x40000) {
515 heads = 65;
516 sectors = 63;
517 }
518 // else if between 0x4000 and 0x80000
519 else if (capacity < 0x80000) {
520 heads = 128;
521 sectors = 63;
522 }
523 // else if greater than 0x80000
524 else {
525 heads = 255;
526 sectors = 63;
527 }
528 cylinders = sector_div(capacity, heads * sectors);
529
530 // Special case if CDROM
531 if(sdev->type == 5) { // CDROM
532 heads = 252;
533 sectors = 63;
534 cylinders = 1111;
535 }
536
537 geom[0] = heads;
538 geom[1] = sectors;
539 geom[2] = cylinders;
540
541 PDEBUG("adpt_bios_param: exit\n");
542 return 0;
543}
544
545
546static const char *adpt_info(struct Scsi_Host *host)
547{
548 adpt_hba* pHba;
549
550 pHba = (adpt_hba *) host->hostdata[0];
551 return (char *) (pHba->detail);
552}
553
554static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
555 int length, int inout)
556{
557 struct adpt_device* d;
558 int id;
559 int chan;
560 int len = 0;
561 int begin = 0;
562 int pos = 0;
563 adpt_hba* pHba;
564 int unit;
565
566 *start = buffer;
567 if (inout == TRUE) {
568 /*
569 * The user has done a write and wants us to take the
570 * data in the buffer and do something with it.
571 * proc_scsiwrite calls us with inout = 1
572 *
573 * Read data from buffer (writing to us) - NOT SUPPORTED
574 */
575 return -EINVAL;
576 }
577
578 /*
579 * inout = 0 means the user has done a read and wants information
580 * returned, so we write information about the cards into the buffer
581 * proc_scsiread() calls us with inout = 0
582 */
583
584 // Find HBA (host bus adapter) we are looking for
0b950672 585 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
586 for (pHba = hba_chain; pHba; pHba = pHba->next) {
587 if (pHba->host == host) {
588 break; /* found adapter */
589 }
590 }
0b950672 591 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
592 if (pHba == NULL) {
593 return 0;
594 }
595 host = pHba->host;
596
597 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
598 len += sprintf(buffer+len, "%s\n", pHba->detail);
599 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
600 pHba->host->host_no, pHba->name, host->irq);
601 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
602 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
603
604 pos = begin + len;
605
606 /* CHECKPOINT */
607 if(pos > offset + length) {
608 goto stop_output;
609 }
610 if(pos <= offset) {
611 /*
612 * If we haven't even written to where we last left
613 * off (the last time we were called), reset the
614 * beginning pointer.
615 */
616 len = 0;
617 begin = pos;
618 }
619 len += sprintf(buffer+len, "Devices:\n");
620 for(chan = 0; chan < MAX_CHANNEL; chan++) {
621 for(id = 0; id < MAX_ID; id++) {
622 d = pHba->channel[chan].device[id];
623 while(d){
624 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
625 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
626 pos = begin + len;
627
628
629 /* CHECKPOINT */
630 if(pos > offset + length) {
631 goto stop_output;
632 }
633 if(pos <= offset) {
634 len = 0;
635 begin = pos;
636 }
637
638 unit = d->pI2o_dev->lct_data.tid;
639 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
640 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
641 scsi_device_online(d->pScsi_dev)? "online":"offline");
642 pos = begin + len;
643
644 /* CHECKPOINT */
645 if(pos > offset + length) {
646 goto stop_output;
647 }
648 if(pos <= offset) {
649 len = 0;
650 begin = pos;
651 }
652
653 d = d->next_lun;
654 }
655 }
656 }
657
658 /*
659 * begin is where we last checked our position with regards to offset
660 * begin is always less than offset. len is relative to begin. It
661 * is the number of bytes written past begin
662 *
663 */
664stop_output:
665 /* stop the output and calculate the correct length */
666 *(buffer + len) = '\0';
667
668 *start = buffer + (offset - begin); /* Start of wanted data */
669 len -= (offset - begin);
670 if(len > length) {
671 len = length;
672 } else if(len < 0){
673 len = 0;
674 **start = '\0';
675 }
676 return len;
677}
678
62ac5aed
MS
679/*
680 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
681 */
682static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
683{
684 return (u32)cmd->serial_number;
685}
686
687/*
688 * Go from a u32 'context' to a struct scsi_cmnd * .
689 * This could probably be made more efficient.
690 */
691static struct scsi_cmnd *
692 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
693{
694 struct scsi_cmnd * cmd;
695 struct scsi_device * d;
696
697 if (context == 0)
698 return NULL;
699
700 spin_unlock(pHba->host->host_lock);
701 shost_for_each_device(d, pHba->host) {
702 unsigned long flags;
703 spin_lock_irqsave(&d->list_lock, flags);
704 list_for_each_entry(cmd, &d->cmd_list, list) {
705 if (((u32)cmd->serial_number == context)) {
706 spin_unlock_irqrestore(&d->list_lock, flags);
707 scsi_device_put(d);
708 spin_lock(pHba->host->host_lock);
709 return cmd;
710 }
711 }
712 spin_unlock_irqrestore(&d->list_lock, flags);
713 }
714 spin_lock(pHba->host->host_lock);
715
716 return NULL;
717}
718
719/*
720 * Turn a pointer to ioctl reply data into an u32 'context'
721 */
722static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
723{
724#if BITS_PER_LONG == 32
725 return (u32)(unsigned long)reply;
726#else
727 ulong flags = 0;
728 u32 nr, i;
729
730 spin_lock_irqsave(pHba->host->host_lock, flags);
731 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
732 for (i = 0; i < nr; i++) {
733 if (pHba->ioctl_reply_context[i] == NULL) {
734 pHba->ioctl_reply_context[i] = reply;
735 break;
736 }
737 }
738 spin_unlock_irqrestore(pHba->host->host_lock, flags);
739 if (i >= nr) {
740 kfree (reply);
741 printk(KERN_WARNING"%s: Too many outstanding "
742 "ioctl commands\n", pHba->name);
743 return (u32)-1;
744 }
745
746 return i;
747#endif
748}
749
750/*
751 * Go from an u32 'context' to a pointer to ioctl reply data.
752 */
753static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
754{
755#if BITS_PER_LONG == 32
756 return (void *)(unsigned long)context;
757#else
758 void *p = pHba->ioctl_reply_context[context];
759 pHba->ioctl_reply_context[context] = NULL;
760
761 return p;
762#endif
763}
1da177e4
LT
764
765/*===========================================================================
766 * Error Handling routines
767 *===========================================================================
768 */
769
770static int adpt_abort(struct scsi_cmnd * cmd)
771{
772 adpt_hba* pHba = NULL; /* host bus adapter structure */
773 struct adpt_device* dptdevice; /* dpt per device information */
774 u32 msg[5];
775 int rcode;
776
777 if(cmd->serial_number == 0){
778 return FAILED;
779 }
780 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
781 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
782 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
783 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
784 return FAILED;
785 }
786
787 memset(msg, 0, sizeof(msg));
788 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
789 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
790 msg[2] = 0;
791 msg[3]= 0;
62ac5aed 792 msg[4] = adpt_cmd_to_context(cmd);
e5508c13
SM
793 if (pHba->host)
794 spin_lock_irq(pHba->host->host_lock);
795 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
796 if (pHba->host)
797 spin_unlock_irq(pHba->host->host_lock);
798 if (rcode != 0) {
1da177e4
LT
799 if(rcode == -EOPNOTSUPP ){
800 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
801 return FAILED;
802 }
803 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
804 return FAILED;
805 }
806 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
807 return SUCCESS;
808}
809
810
811#define I2O_DEVICE_RESET 0x27
812// This is the same for BLK and SCSI devices
813// NOTE this is wrong in the i2o.h definitions
814// This is not currently supported by our adapter but we issue it anyway
815static int adpt_device_reset(struct scsi_cmnd* cmd)
816{
817 adpt_hba* pHba;
818 u32 msg[4];
819 u32 rcode;
820 int old_state;
1c2fb3f3 821 struct adpt_device* d = cmd->device->hostdata;
1da177e4
LT
822
823 pHba = (void*) cmd->device->host->hostdata[0];
824 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
825 if (!d) {
826 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
827 return FAILED;
828 }
829 memset(msg, 0, sizeof(msg));
830 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
831 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
832 msg[2] = 0;
833 msg[3] = 0;
834
e5508c13
SM
835 if (pHba->host)
836 spin_lock_irq(pHba->host->host_lock);
1da177e4
LT
837 old_state = d->state;
838 d->state |= DPTI_DEV_RESET;
e5508c13
SM
839 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
840 d->state = old_state;
841 if (pHba->host)
842 spin_unlock_irq(pHba->host->host_lock);
843 if (rcode != 0) {
1da177e4
LT
844 if(rcode == -EOPNOTSUPP ){
845 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
846 return FAILED;
847 }
848 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
849 return FAILED;
850 } else {
1da177e4
LT
851 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
852 return SUCCESS;
853 }
854}
855
856
857#define I2O_HBA_BUS_RESET 0x87
858// This version of bus reset is called by the eh_error handler
859static int adpt_bus_reset(struct scsi_cmnd* cmd)
860{
861 adpt_hba* pHba;
862 u32 msg[4];
e5508c13 863 u32 rcode;
1da177e4
LT
864
865 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
866 memset(msg, 0, sizeof(msg));
867 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
868 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
869 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
870 msg[2] = 0;
871 msg[3] = 0;
e5508c13
SM
872 if (pHba->host)
873 spin_lock_irq(pHba->host->host_lock);
874 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
875 if (pHba->host)
876 spin_unlock_irq(pHba->host->host_lock);
877 if (rcode != 0) {
1da177e4
LT
878 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
879 return FAILED;
880 } else {
881 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
882 return SUCCESS;
883 }
884}
885
886// This version of reset is called by the eh_error_handler
df0ae249 887static int __adpt_reset(struct scsi_cmnd* cmd)
1da177e4
LT
888{
889 adpt_hba* pHba;
890 int rcode;
891 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
892 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
893 rcode = adpt_hba_reset(pHba);
894 if(rcode == 0){
895 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
896 return SUCCESS;
897 } else {
898 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
899 return FAILED;
900 }
901}
902
df0ae249
JG
903static int adpt_reset(struct scsi_cmnd* cmd)
904{
905 int rc;
906
907 spin_lock_irq(cmd->device->host->host_lock);
908 rc = __adpt_reset(cmd);
909 spin_unlock_irq(cmd->device->host->host_lock);
910
911 return rc;
912}
913
1da177e4
LT
914// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
915static int adpt_hba_reset(adpt_hba* pHba)
916{
917 int rcode;
918
919 pHba->state |= DPTI_STATE_RESET;
920
921 // Activate does get status , init outbound, and get hrt
922 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
923 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
924 adpt_i2o_delete_hba(pHba);
925 return rcode;
926 }
927
928 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
929 adpt_i2o_delete_hba(pHba);
930 return rcode;
931 }
932 PDEBUG("%s: in HOLD state\n",pHba->name);
933
934 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
935 adpt_i2o_delete_hba(pHba);
936 return rcode;
937 }
938 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
939
940 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
941 adpt_i2o_delete_hba(pHba);
942 return rcode;
943 }
944
945 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
946 adpt_i2o_delete_hba(pHba);
947 return rcode;
948 }
949 pHba->state &= ~DPTI_STATE_RESET;
950
951 adpt_fail_posted_scbs(pHba);
952 return 0; /* return success */
953}
954
955/*===========================================================================
956 *
957 *===========================================================================
958 */
959
960
961static void adpt_i2o_sys_shutdown(void)
962{
963 adpt_hba *pHba, *pNext;
458af543 964 struct adpt_i2o_post_wait_data *p1, *old;
1da177e4
LT
965
966 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
967 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
968 /* Delete all IOPs from the controller chain */
969 /* They should have already been released by the
970 * scsi-core
971 */
972 for (pHba = hba_chain; pHba; pHba = pNext) {
973 pNext = pHba->next;
974 adpt_i2o_delete_hba(pHba);
975 }
976
977 /* Remove any timedout entries from the wait queue. */
1da177e4
LT
978// spin_lock_irqsave(&adpt_post_wait_lock, flags);
979 /* Nothing should be outstanding at this point so just
980 * free them
981 */
458af543
AB
982 for(p1 = adpt_post_wait_queue; p1;) {
983 old = p1;
984 p1 = p1->next;
985 kfree(old);
1da177e4
LT
986 }
987// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
988 adpt_post_wait_queue = NULL;
989
990 printk(KERN_INFO "Adaptec I2O controllers down.\n");
991}
992
24601bbc 993static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
1da177e4
LT
994{
995
996 adpt_hba* pHba = NULL;
997 adpt_hba* p = NULL;
998 ulong base_addr0_phys = 0;
999 ulong base_addr1_phys = 0;
1000 u32 hba_map0_area_size = 0;
1001 u32 hba_map1_area_size = 0;
1002 void __iomem *base_addr_virt = NULL;
1003 void __iomem *msg_addr_virt = NULL;
62ac5aed 1004 int dma64 = 0;
1da177e4
LT
1005
1006 int raptorFlag = FALSE;
1da177e4
LT
1007
1008 if(pci_enable_device(pDev)) {
1009 return -EINVAL;
1010 }
9638d89a
SM
1011
1012 if (pci_request_regions(pDev, "dpt_i2o")) {
1013 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1014 return -EINVAL;
1015 }
1016
1da177e4 1017 pci_set_master(pDev);
62ac5aed
MS
1018
1019 /*
1020 * See if we should enable dma64 mode.
1021 */
1022 if (sizeof(dma_addr_t) > 4 &&
6a35528a 1023 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
284901a9 1024 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
62ac5aed
MS
1025 dma64 = 1;
1026 }
284901a9 1027 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
1da177e4
LT
1028 return -EINVAL;
1029
67af2b06 1030 /* adapter only supports message blocks below 4GB */
284901a9 1031 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
67af2b06 1032
1da177e4
LT
1033 base_addr0_phys = pci_resource_start(pDev,0);
1034 hba_map0_area_size = pci_resource_len(pDev,0);
1035
1036 // Check if standard PCI card or single BAR Raptor
1037 if(pDev->device == PCI_DPT_DEVICE_ID){
1038 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1039 // Raptor card with this device id needs 4M
1040 hba_map0_area_size = 0x400000;
1041 } else { // Not Raptor - it is a PCI card
1042 if(hba_map0_area_size > 0x100000 ){
1043 hba_map0_area_size = 0x100000;
1044 }
1045 }
1046 } else {// Raptor split BAR config
1047 // Use BAR1 in this configuration
1048 base_addr1_phys = pci_resource_start(pDev,1);
1049 hba_map1_area_size = pci_resource_len(pDev,1);
1050 raptorFlag = TRUE;
1051 }
1052
62ac5aed
MS
1053#if BITS_PER_LONG == 64
1054 /*
1055 * The original Adaptec 64 bit driver has this comment here:
1056 * "x86_64 machines need more optimal mappings"
1057 *
1058 * I assume some HBAs report ridiculously large mappings
1059 * and we need to limit them on platforms with IOMMUs.
1060 */
1061 if (raptorFlag == TRUE) {
1062 if (hba_map0_area_size > 128)
1063 hba_map0_area_size = 128;
1064 if (hba_map1_area_size > 524288)
1065 hba_map1_area_size = 524288;
1066 } else {
1067 if (hba_map0_area_size > 524288)
1068 hba_map0_area_size = 524288;
1069 }
1070#endif
1071
1da177e4
LT
1072 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1073 if (!base_addr_virt) {
9c472dd9 1074 pci_release_regions(pDev);
1da177e4
LT
1075 PERROR("dpti: adpt_config_hba: io remap failed\n");
1076 return -EINVAL;
1077 }
1078
1079 if(raptorFlag == TRUE) {
1080 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1081 if (!msg_addr_virt) {
1082 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1083 iounmap(base_addr_virt);
9c472dd9 1084 pci_release_regions(pDev);
1da177e4
LT
1085 return -EINVAL;
1086 }
1087 } else {
1088 msg_addr_virt = base_addr_virt;
1089 }
1090
1091 // Allocate and zero the data structure
bbfbbbc1
MK
1092 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1093 if (!pHba) {
1094 if (msg_addr_virt != base_addr_virt)
1da177e4 1095 iounmap(msg_addr_virt);
1da177e4 1096 iounmap(base_addr_virt);
9c472dd9 1097 pci_release_regions(pDev);
1da177e4
LT
1098 return -ENOMEM;
1099 }
1da177e4 1100
0b950672 1101 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1102
1103 if(hba_chain != NULL){
1104 for(p = hba_chain; p->next; p = p->next);
1105 p->next = pHba;
1106 } else {
1107 hba_chain = pHba;
1108 }
1109 pHba->next = NULL;
1110 pHba->unit = hba_count;
23a2bc22 1111 sprintf(pHba->name, "dpti%d", hba_count);
1da177e4
LT
1112 hba_count++;
1113
0b950672 1114 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1115
1116 pHba->pDev = pDev;
1117 pHba->base_addr_phys = base_addr0_phys;
1118
1119 // Set up the Virtual Base Address of the I2O Device
1120 pHba->base_addr_virt = base_addr_virt;
1121 pHba->msg_addr_virt = msg_addr_virt;
1122 pHba->irq_mask = base_addr_virt+0x30;
1123 pHba->post_port = base_addr_virt+0x40;
1124 pHba->reply_port = base_addr_virt+0x44;
1125
1126 pHba->hrt = NULL;
1127 pHba->lct = NULL;
1128 pHba->lct_size = 0;
1129 pHba->status_block = NULL;
1130 pHba->post_count = 0;
1131 pHba->state = DPTI_STATE_RESET;
1132 pHba->pDev = pDev;
1133 pHba->devices = NULL;
62ac5aed 1134 pHba->dma64 = dma64;
1da177e4
LT
1135
1136 // Initializing the spinlocks
1137 spin_lock_init(&pHba->state_lock);
1138 spin_lock_init(&adpt_post_wait_lock);
1139
1140 if(raptorFlag == 0){
62ac5aed
MS
1141 printk(KERN_INFO "Adaptec I2O RAID controller"
1142 " %d at %p size=%x irq=%d%s\n",
1143 hba_count-1, base_addr_virt,
1144 hba_map0_area_size, pDev->irq,
1145 dma64 ? " (64-bit DMA)" : "");
1da177e4 1146 } else {
62ac5aed
MS
1147 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1148 hba_count-1, pDev->irq,
1149 dma64 ? " (64-bit DMA)" : "");
1da177e4
LT
1150 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1151 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1152 }
1153
1d6f359a 1154 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1da177e4
LT
1155 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1156 adpt_i2o_delete_hba(pHba);
1157 return -EINVAL;
1158 }
1159
1160 return 0;
1161}
1162
1163
1164static void adpt_i2o_delete_hba(adpt_hba* pHba)
1165{
1166 adpt_hba* p1;
1167 adpt_hba* p2;
1168 struct i2o_device* d;
1169 struct i2o_device* next;
1170 int i;
1171 int j;
1172 struct adpt_device* pDev;
1173 struct adpt_device* pNext;
1174
1175
0b950672 1176 mutex_lock(&adpt_configuration_lock);
24601bbc
AM
1177 // scsi_unregister calls our adpt_release which
1178 // does a quiese
1da177e4
LT
1179 if(pHba->host){
1180 free_irq(pHba->host->irq, pHba);
1181 }
1da177e4
LT
1182 p2 = NULL;
1183 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1184 if(p1 == pHba) {
1185 if(p2) {
1186 p2->next = p1->next;
1187 } else {
1188 hba_chain = p1->next;
1189 }
1190 break;
1191 }
1192 }
1193
1194 hba_count--;
0b950672 1195 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1196
1197 iounmap(pHba->base_addr_virt);
9c472dd9 1198 pci_release_regions(pHba->pDev);
1da177e4
LT
1199 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1200 iounmap(pHba->msg_addr_virt);
1201 }
62ac5aed
MS
1202 if(pHba->FwDebugBuffer_P)
1203 iounmap(pHba->FwDebugBuffer_P);
67af2b06
MS
1204 if(pHba->hrt) {
1205 dma_free_coherent(&pHba->pDev->dev,
1206 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1207 pHba->hrt, pHba->hrt_pa);
1208 }
1209 if(pHba->lct) {
1210 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1211 pHba->lct, pHba->lct_pa);
1212 }
1213 if(pHba->status_block) {
1214 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1215 pHba->status_block, pHba->status_block_pa);
1216 }
1217 if(pHba->reply_pool) {
1218 dma_free_coherent(&pHba->pDev->dev,
1219 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1220 pHba->reply_pool, pHba->reply_pool_pa);
1221 }
1da177e4
LT
1222
1223 for(d = pHba->devices; d ; d = next){
1224 next = d->next;
1225 kfree(d);
1226 }
1227 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1228 for(j = 0; j < MAX_ID; j++){
1229 if(pHba->channel[i].device[j] != NULL){
1230 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1231 pNext = pDev->next_lun;
1232 kfree(pDev);
1233 }
1234 }
1235 }
1236 }
a07f3537 1237 pci_dev_put(pHba->pDev);
1ed43910
MS
1238 if (adpt_sysfs_class)
1239 device_destroy(adpt_sysfs_class,
1240 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
229bab6b 1241 kfree(pHba);
1ed43910 1242
1da177e4
LT
1243 if(hba_count <= 0){
1244 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1ed43910
MS
1245 if (adpt_sysfs_class) {
1246 class_destroy(adpt_sysfs_class);
1247 adpt_sysfs_class = NULL;
1248 }
1da177e4
LT
1249 }
1250}
1251
1da177e4
LT
1252static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1253{
1254 struct adpt_device* d;
1255
1256 if(chan < 0 || chan >= MAX_CHANNEL)
1257 return NULL;
1258
1259 if( pHba->channel[chan].device == NULL){
1260 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1261 return NULL;
1262 }
1263
1264 d = pHba->channel[chan].device[id];
1265 if(!d || d->tid == 0) {
1266 return NULL;
1267 }
1268
1269 /* If it is the only lun at that address then this should match*/
1270 if(d->scsi_lun == lun){
1271 return d;
1272 }
1273
1274 /* else we need to look through all the luns */
1275 for(d=d->next_lun ; d ; d = d->next_lun){
1276 if(d->scsi_lun == lun){
1277 return d;
1278 }
1279 }
1280 return NULL;
1281}
1282
1283
1284static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1285{
1286 // I used my own version of the WAIT_QUEUE_HEAD
1287 // to handle some version differences
1288 // When embedded in the kernel this could go back to the vanilla one
1289 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1290 int status = 0;
1291 ulong flags = 0;
1292 struct adpt_i2o_post_wait_data *p1, *p2;
1293 struct adpt_i2o_post_wait_data *wait_data =
da2907ff 1294 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
4452ea50 1295 DECLARE_WAITQUEUE(wait, current);
1da177e4 1296
4452ea50 1297 if (!wait_data)
1da177e4 1298 return -ENOMEM;
4452ea50 1299
1da177e4
LT
1300 /*
1301 * The spin locking is needed to keep anyone from playing
1302 * with the queue pointers and id while we do the same
1303 */
1304 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1305 // TODO we need a MORE unique way of getting ids
1306 // to support async LCT get
1307 wait_data->next = adpt_post_wait_queue;
1308 adpt_post_wait_queue = wait_data;
1309 adpt_post_wait_id++;
1310 adpt_post_wait_id &= 0x7fff;
1311 wait_data->id = adpt_post_wait_id;
1312 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1313
1314 wait_data->wq = &adpt_wq_i2o_post;
1315 wait_data->status = -ETIMEDOUT;
1316
4452ea50 1317 add_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1318
1319 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1320 timeout *= HZ;
1321 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1322 set_current_state(TASK_INTERRUPTIBLE);
1323 if(pHba->host)
1324 spin_unlock_irq(pHba->host->host_lock);
1325 if (!timeout)
1326 schedule();
1327 else{
1328 timeout = schedule_timeout(timeout);
1329 if (timeout == 0) {
1330 // I/O issued, but cannot get result in
1331 // specified time. Freeing resorces is
1332 // dangerous.
1333 status = -ETIME;
1334 }
1335 }
1336 if(pHba->host)
1337 spin_lock_irq(pHba->host->host_lock);
1338 }
4452ea50 1339 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1340
1341 if(status == -ETIMEDOUT){
1342 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1343 // We will have to free the wait_data memory during shutdown
1344 return status;
1345 }
1346
1347 /* Remove the entry from the queue. */
1348 p2 = NULL;
1349 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1350 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1351 if(p1 == wait_data) {
1352 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1353 status = -EOPNOTSUPP;
1354 }
1355 if(p2) {
1356 p2->next = p1->next;
1357 } else {
1358 adpt_post_wait_queue = p1->next;
1359 }
1360 break;
1361 }
1362 }
1363 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1364
1365 kfree(wait_data);
1366
1367 return status;
1368}
1369
1370
1371static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1372{
1373
1374 u32 m = EMPTY_QUEUE;
1375 u32 __iomem *msg;
1376 ulong timeout = jiffies + 30*HZ;
1377 do {
1378 rmb();
1379 m = readl(pHba->post_port);
1380 if (m != EMPTY_QUEUE) {
1381 break;
1382 }
1383 if(time_after(jiffies,timeout)){
1384 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1385 return -ETIMEDOUT;
1386 }
a9a3047d 1387 schedule_timeout_uninterruptible(1);
1da177e4
LT
1388 } while(m == EMPTY_QUEUE);
1389
1390 msg = pHba->msg_addr_virt + m;
1391 memcpy_toio(msg, data, len);
1392 wmb();
1393
1394 //post message
1395 writel(m, pHba->post_port);
1396 wmb();
1397
1398 return 0;
1399}
1400
1401
1402static void adpt_i2o_post_wait_complete(u32 context, int status)
1403{
1404 struct adpt_i2o_post_wait_data *p1 = NULL;
1405 /*
1406 * We need to search through the adpt_post_wait
1407 * queue to see if the given message is still
1408 * outstanding. If not, it means that the IOP
1409 * took longer to respond to the message than we
1410 * had allowed and timer has already expired.
1411 * Not much we can do about that except log
1412 * it for debug purposes, increase timeout, and recompile
1413 *
1414 * Lock needed to keep anyone from moving queue pointers
1415 * around while we're looking through them.
1416 */
1417
1418 context &= 0x7fff;
1419
1420 spin_lock(&adpt_post_wait_lock);
1421 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1422 if(p1->id == context) {
1423 p1->status = status;
1424 spin_unlock(&adpt_post_wait_lock);
1425 wake_up_interruptible(p1->wq);
1426 return;
1427 }
1428 }
1429 spin_unlock(&adpt_post_wait_lock);
1430 // If this happens we lose commands that probably really completed
1431 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1432 printk(KERN_DEBUG" Tasks in wait queue:\n");
1433 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1434 printk(KERN_DEBUG" %d\n",p1->id);
1435 }
1436 return;
1437}
1438
1439static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1440{
1441 u32 msg[8];
1442 u8* status;
67af2b06 1443 dma_addr_t addr;
1da177e4
LT
1444 u32 m = EMPTY_QUEUE ;
1445 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1446
1447 if(pHba->initialized == FALSE) { // First time reset should be quick
1448 timeout = jiffies + (25*HZ);
1449 } else {
1450 adpt_i2o_quiesce_hba(pHba);
1451 }
1452
1453 do {
1454 rmb();
1455 m = readl(pHba->post_port);
1456 if (m != EMPTY_QUEUE) {
1457 break;
1458 }
1459 if(time_after(jiffies,timeout)){
1460 printk(KERN_WARNING"Timeout waiting for message!\n");
1461 return -ETIMEDOUT;
1462 }
a9a3047d 1463 schedule_timeout_uninterruptible(1);
1da177e4
LT
1464 } while (m == EMPTY_QUEUE);
1465
67af2b06 1466 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1da177e4
LT
1467 if(status == NULL) {
1468 adpt_send_nop(pHba, m);
1469 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1470 return -ENOMEM;
1471 }
67af2b06 1472 memset(status,0,4);
1da177e4
LT
1473
1474 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1475 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1476 msg[2]=0;
1477 msg[3]=0;
1478 msg[4]=0;
1479 msg[5]=0;
67af2b06
MS
1480 msg[6]=dma_low(addr);
1481 msg[7]=dma_high(addr);
1da177e4
LT
1482
1483 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1484 wmb();
1485 writel(m, pHba->post_port);
1486 wmb();
1487
1488 while(*status == 0){
1489 if(time_after(jiffies,timeout)){
1490 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
67af2b06
MS
1491 /* We lose 4 bytes of "status" here, but we cannot
1492 free these because controller may awake and corrupt
1493 those bytes at any time */
1494 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1495 return -ETIMEDOUT;
1496 }
1497 rmb();
a9a3047d 1498 schedule_timeout_uninterruptible(1);
1da177e4
LT
1499 }
1500
1501 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1502 PDEBUG("%s: Reset in progress...\n", pHba->name);
1503 // Here we wait for message frame to become available
1504 // indicated that reset has finished
1505 do {
1506 rmb();
1507 m = readl(pHba->post_port);
1508 if (m != EMPTY_QUEUE) {
1509 break;
1510 }
1511 if(time_after(jiffies,timeout)){
1512 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
67af2b06
MS
1513 /* We lose 4 bytes of "status" here, but we
1514 cannot free these because controller may
1515 awake and corrupt those bytes at any time */
1516 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1517 return -ETIMEDOUT;
1518 }
a9a3047d 1519 schedule_timeout_uninterruptible(1);
1da177e4
LT
1520 } while (m == EMPTY_QUEUE);
1521 // Flush the offset
1522 adpt_send_nop(pHba, m);
1523 }
1524 adpt_i2o_status_get(pHba);
1525 if(*status == 0x02 ||
1526 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1527 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1528 pHba->name);
1529 } else {
1530 PDEBUG("%s: Reset completed.\n", pHba->name);
1531 }
1532
67af2b06 1533 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
1534#ifdef UARTDELAY
1535 // This delay is to allow someone attached to the card through the debug UART to
1536 // set up the dump levels that they want before the rest of the initialization sequence
1537 adpt_delay(20000);
1538#endif
1539 return 0;
1540}
1541
1542
1543static int adpt_i2o_parse_lct(adpt_hba* pHba)
1544{
1545 int i;
1546 int max;
1547 int tid;
1548 struct i2o_device *d;
1549 i2o_lct *lct = pHba->lct;
1550 u8 bus_no = 0;
1551 s16 scsi_id;
1552 s16 scsi_lun;
1553 u32 buf[10]; // larger than 7, or 8 ...
1554 struct adpt_device* pDev;
1555
1556 if (lct == NULL) {
1557 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1558 return -1;
1559 }
1560
1561 max = lct->table_size;
1562 max -= 3;
1563 max /= 9;
1564
1565 for(i=0;i<max;i++) {
1566 if( lct->lct_entry[i].user_tid != 0xfff){
1567 /*
1568 * If we have hidden devices, we need to inform the upper layers about
1569 * the possible maximum id reference to handle device access when
1570 * an array is disassembled. This code has no other purpose but to
1571 * allow us future access to devices that are currently hidden
1572 * behind arrays, hotspares or have not been configured (JBOD mode).
1573 */
1574 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1575 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1576 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1577 continue;
1578 }
1579 tid = lct->lct_entry[i].tid;
1580 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1581 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1582 continue;
1583 }
1584 bus_no = buf[0]>>16;
1585 scsi_id = buf[1];
1586 scsi_lun = (buf[2]>>8 )&0xff;
1587 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1588 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1589 continue;
1590 }
1591 if (scsi_id >= MAX_ID){
1592 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1593 continue;
1594 }
1595 if(bus_no > pHba->top_scsi_channel){
1596 pHba->top_scsi_channel = bus_no;
1597 }
1598 if(scsi_id > pHba->top_scsi_id){
1599 pHba->top_scsi_id = scsi_id;
1600 }
1601 if(scsi_lun > pHba->top_scsi_lun){
1602 pHba->top_scsi_lun = scsi_lun;
1603 }
1604 continue;
1605 }
5cbded58 1606 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
1607 if(d==NULL)
1608 {
1609 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1610 return -ENOMEM;
1611 }
1612
1c2fb3f3 1613 d->controller = pHba;
1da177e4
LT
1614 d->next = NULL;
1615
1616 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1617
1618 d->flags = 0;
1619 tid = d->lct_data.tid;
1620 adpt_i2o_report_hba_unit(pHba, d);
1621 adpt_i2o_install_device(pHba, d);
1622 }
1623 bus_no = 0;
1624 for(d = pHba->devices; d ; d = d->next) {
1625 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1626 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1627 tid = d->lct_data.tid;
1628 // TODO get the bus_no from hrt-but for now they are in order
1629 //bus_no =
1630 if(bus_no > pHba->top_scsi_channel){
1631 pHba->top_scsi_channel = bus_no;
1632 }
1633 pHba->channel[bus_no].type = d->lct_data.class_id;
1634 pHba->channel[bus_no].tid = tid;
1635 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1636 {
1637 pHba->channel[bus_no].scsi_id = buf[1];
1638 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1639 }
1640 // TODO remove - this is just until we get from hrt
1641 bus_no++;
1642 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1643 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1644 break;
1645 }
1646 }
1647 }
1648
1649 // Setup adpt_device table
1650 for(d = pHba->devices; d ; d = d->next) {
1651 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1652 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1653 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1654
1655 tid = d->lct_data.tid;
1656 scsi_id = -1;
1657 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1658 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1659 bus_no = buf[0]>>16;
1660 scsi_id = buf[1];
1661 scsi_lun = (buf[2]>>8 )&0xff;
1662 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1663 continue;
1664 }
1665 if (scsi_id >= MAX_ID) {
1666 continue;
1667 }
1668 if( pHba->channel[bus_no].device[scsi_id] == NULL){
ab552204 1669 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1670 if(pDev == NULL) {
1671 return -ENOMEM;
1672 }
1673 pHba->channel[bus_no].device[scsi_id] = pDev;
1da177e4
LT
1674 } else {
1675 for( pDev = pHba->channel[bus_no].device[scsi_id];
1676 pDev->next_lun; pDev = pDev->next_lun){
1677 }
ab552204 1678 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1679 if(pDev->next_lun == NULL) {
1680 return -ENOMEM;
1681 }
1da177e4
LT
1682 pDev = pDev->next_lun;
1683 }
1684 pDev->tid = tid;
1685 pDev->scsi_channel = bus_no;
1686 pDev->scsi_id = scsi_id;
1687 pDev->scsi_lun = scsi_lun;
1688 pDev->pI2o_dev = d;
1689 d->owner = pDev;
1690 pDev->type = (buf[0])&0xff;
1691 pDev->flags = (buf[0]>>8)&0xff;
1692 if(scsi_id > pHba->top_scsi_id){
1693 pHba->top_scsi_id = scsi_id;
1694 }
1695 if(scsi_lun > pHba->top_scsi_lun){
1696 pHba->top_scsi_lun = scsi_lun;
1697 }
1698 }
1699 if(scsi_id == -1){
1700 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1701 d->lct_data.identity_tag);
1702 }
1703 }
1704 }
1705 return 0;
1706}
1707
1708
1709/*
1710 * Each I2O controller has a chain of devices on it - these match
1711 * the useful parts of the LCT of the board.
1712 */
1713
1714static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1715{
0b950672 1716 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1717 d->controller=pHba;
1718 d->owner=NULL;
1719 d->next=pHba->devices;
1720 d->prev=NULL;
1721 if (pHba->devices != NULL){
1722 pHba->devices->prev=d;
1723 }
1724 pHba->devices=d;
1725 *d->dev_name = 0;
1726
0b950672 1727 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1728 return 0;
1729}
1730
1731static int adpt_open(struct inode *inode, struct file *file)
1732{
1733 int minor;
1734 adpt_hba* pHba;
1735
c45d15d2 1736 mutex_lock(&adpt_mutex);
1da177e4
LT
1737 //TODO check for root access
1738 //
1739 minor = iminor(inode);
1740 if (minor >= hba_count) {
c45d15d2 1741 mutex_unlock(&adpt_mutex);
1da177e4
LT
1742 return -ENXIO;
1743 }
0b950672 1744 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1745 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1746 if (pHba->unit == minor) {
1747 break; /* found adapter */
1748 }
1749 }
1750 if (pHba == NULL) {
0b950672 1751 mutex_unlock(&adpt_configuration_lock);
c45d15d2 1752 mutex_unlock(&adpt_mutex);
1da177e4
LT
1753 return -ENXIO;
1754 }
1755
1756// if(pHba->in_use){
0b950672 1757 // mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1758// return -EBUSY;
1759// }
1760
1761 pHba->in_use = 1;
0b950672 1762 mutex_unlock(&adpt_configuration_lock);
c45d15d2 1763 mutex_unlock(&adpt_mutex);
1da177e4
LT
1764
1765 return 0;
1766}
1767
1768static int adpt_close(struct inode *inode, struct file *file)
1769{
1770 int minor;
1771 adpt_hba* pHba;
1772
1773 minor = iminor(inode);
1774 if (minor >= hba_count) {
1775 return -ENXIO;
1776 }
0b950672 1777 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1778 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1779 if (pHba->unit == minor) {
1780 break; /* found adapter */
1781 }
1782 }
0b950672 1783 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1784 if (pHba == NULL) {
1785 return -ENXIO;
1786 }
1787
1788 pHba->in_use = 0;
1789
1790 return 0;
1791}
1792
1793
1794static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1795{
1796 u32 msg[MAX_MESSAGE_SIZE];
1797 u32* reply = NULL;
1798 u32 size = 0;
1799 u32 reply_size = 0;
1800 u32 __user *user_msg = arg;
1801 u32 __user * user_reply = NULL;
1802 void *sg_list[pHba->sg_tablesize];
1803 u32 sg_offset = 0;
1804 u32 sg_count = 0;
1805 int sg_index = 0;
1806 u32 i = 0;
1807 u32 rcode = 0;
1808 void *p = NULL;
67af2b06 1809 dma_addr_t addr;
1da177e4
LT
1810 ulong flags = 0;
1811
1812 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1813 // get user msg size in u32s
1814 if(get_user(size, &user_msg[0])){
1815 return -EFAULT;
1816 }
1817 size = size>>16;
1818
1819 user_reply = &user_msg[size];
1820 if(size > MAX_MESSAGE_SIZE){
1821 return -EFAULT;
1822 }
1823 size *= 4; // Convert to bytes
1824
1825 /* Copy in the user's I2O command */
1826 if(copy_from_user(msg, user_msg, size)) {
1827 return -EFAULT;
1828 }
1829 get_user(reply_size, &user_reply[0]);
1830 reply_size = reply_size>>16;
1831 if(reply_size > REPLY_FRAME_SIZE){
1832 reply_size = REPLY_FRAME_SIZE;
1833 }
1834 reply_size *= 4;
ab552204 1835 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1da177e4
LT
1836 if(reply == NULL) {
1837 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1838 return -ENOMEM;
1839 }
1da177e4
LT
1840 sg_offset = (msg[0]>>4)&0xf;
1841 msg[2] = 0x40000000; // IOCTL context
62ac5aed
MS
1842 msg[3] = adpt_ioctl_to_context(pHba, reply);
1843 if (msg[3] == (u32)-1)
1844 return -EBUSY;
1845
1da177e4
LT
1846 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1847 if(sg_offset) {
62ac5aed 1848 // TODO add 64 bit API
1da177e4
LT
1849 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1850 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1851 if (sg_count > pHba->sg_tablesize){
1852 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1853 kfree (reply);
1854 return -EINVAL;
1855 }
1856
1857 for(i = 0; i < sg_count; i++) {
1858 int sg_size;
1859
1860 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1861 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1862 rcode = -EINVAL;
1863 goto cleanup;
1864 }
1865 sg_size = sg[i].flag_count & 0xffffff;
1866 /* Allocate memory for the transfer */
67af2b06 1867 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1da177e4
LT
1868 if(!p) {
1869 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1870 pHba->name,sg_size,i,sg_count);
1871 rcode = -ENOMEM;
1872 goto cleanup;
1873 }
1874 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1875 /* Copy in the user's SG buffer if necessary */
1876 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
62ac5aed
MS
1877 // sg_simple_element API is 32 bit
1878 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1da177e4
LT
1879 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1880 rcode = -EFAULT;
1881 goto cleanup;
1882 }
1883 }
62ac5aed
MS
1884 /* sg_simple_element API is 32 bit, but addr < 4GB */
1885 sg[i].addr_bus = addr;
1da177e4
LT
1886 }
1887 }
1888
1889 do {
1890 if(pHba->host)
1891 spin_lock_irqsave(pHba->host->host_lock, flags);
1892 // This state stops any new commands from enterring the
1893 // controller while processing the ioctl
1894// pHba->state |= DPTI_STATE_IOCTL;
1895// We can't set this now - The scsi subsystem sets host_blocked and
1896// the queue empties and stops. We need a way to restart the queue
1897 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1898 if (rcode != 0)
1899 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1900 rcode, reply);
1901// pHba->state &= ~DPTI_STATE_IOCTL;
1902 if(pHba->host)
1903 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1904 } while(rcode == -ETIMEDOUT);
1905
1906 if(rcode){
1907 goto cleanup;
1908 }
1909
1910 if(sg_offset) {
1911 /* Copy back the Scatter Gather buffers back to user space */
1912 u32 j;
62ac5aed 1913 // TODO add 64 bit API
1da177e4
LT
1914 struct sg_simple_element* sg;
1915 int sg_size;
1916
1917 // re-acquire the original message to handle correctly the sg copy operation
1918 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1919 // get user msg size in u32s
1920 if(get_user(size, &user_msg[0])){
1921 rcode = -EFAULT;
1922 goto cleanup;
1923 }
1924 size = size>>16;
1925 size *= 4;
ef7562b7 1926 if (size > MAX_MESSAGE_SIZE) {
aefba418 1927 rcode = -EINVAL;
ef7562b7
AC
1928 goto cleanup;
1929 }
1da177e4
LT
1930 /* Copy in the user's I2O command */
1931 if (copy_from_user (msg, user_msg, size)) {
1932 rcode = -EFAULT;
1933 goto cleanup;
1934 }
1935 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1936
62ac5aed 1937 // TODO add 64 bit API
1da177e4
LT
1938 sg = (struct sg_simple_element*)(msg + sg_offset);
1939 for (j = 0; j < sg_count; j++) {
1940 /* Copy out the SG list to user's buffer if necessary */
1941 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1942 sg_size = sg[j].flag_count & 0xffffff;
62ac5aed
MS
1943 // sg_simple_element API is 32 bit
1944 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1da177e4
LT
1945 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1946 rcode = -EFAULT;
1947 goto cleanup;
1948 }
1949 }
1950 }
1951 }
1952
1953 /* Copy back the reply to user space */
1954 if (reply_size) {
1955 // we wrote our own values for context - now restore the user supplied ones
1956 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1957 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1958 rcode = -EFAULT;
1959 }
1960 if(copy_to_user(user_reply, reply, reply_size)) {
1961 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1962 rcode = -EFAULT;
1963 }
1964 }
1965
1966
1967cleanup:
67af2b06
MS
1968 if (rcode != -ETIME && rcode != -EINTR) {
1969 struct sg_simple_element *sg =
1970 (struct sg_simple_element*) (msg +sg_offset);
1da177e4 1971 kfree (reply);
67af2b06
MS
1972 while(sg_index) {
1973 if(sg_list[--sg_index]) {
1974 dma_free_coherent(&pHba->pDev->dev,
1975 sg[sg_index].flag_count & 0xffffff,
1976 sg_list[sg_index],
1977 sg[sg_index].addr_bus);
1978 }
1da177e4
LT
1979 }
1980 }
1981 return rcode;
1982}
1983
1da177e4
LT
1984#if defined __ia64__
1985static void adpt_ia64_info(sysInfo_S* si)
1986{
1987 // This is all the info we need for now
1988 // We will add more info as our new
1989 // managmenent utility requires it
1990 si->processorType = PROC_IA64;
1991}
1992#endif
1993
1da177e4
LT
1994#if defined __sparc__
1995static void adpt_sparc_info(sysInfo_S* si)
1996{
1997 // This is all the info we need for now
1998 // We will add more info as our new
1999 // managmenent utility requires it
2000 si->processorType = PROC_ULTRASPARC;
2001}
2002#endif
1da177e4
LT
2003#if defined __alpha__
2004static void adpt_alpha_info(sysInfo_S* si)
2005{
2006 // This is all the info we need for now
2007 // We will add more info as our new
2008 // managmenent utility requires it
2009 si->processorType = PROC_ALPHA;
2010}
2011#endif
2012
2013#if defined __i386__
1da177e4
LT
2014static void adpt_i386_info(sysInfo_S* si)
2015{
2016 // This is all the info we need for now
2017 // We will add more info as our new
2018 // managmenent utility requires it
2019 switch (boot_cpu_data.x86) {
2020 case CPU_386:
2021 si->processorType = PROC_386;
2022 break;
2023 case CPU_486:
2024 si->processorType = PROC_486;
2025 break;
2026 case CPU_586:
2027 si->processorType = PROC_PENTIUM;
2028 break;
2029 default: // Just in case
2030 si->processorType = PROC_PENTIUM;
2031 break;
2032 }
2033}
8b2cc917
AM
2034#endif
2035
2036/*
2037 * This routine returns information about the system. This does not effect
2038 * any logic and if the info is wrong - it doesn't matter.
2039 */
1da177e4 2040
8b2cc917
AM
2041/* Get all the info we can not get from kernel services */
2042static int adpt_system_info(void __user *buffer)
2043{
2044 sysInfo_S si;
2045
2046 memset(&si, 0, sizeof(si));
2047
2048 si.osType = OS_LINUX;
2049 si.osMajorVersion = 0;
2050 si.osMinorVersion = 0;
2051 si.osRevision = 0;
2052 si.busType = SI_PCI_BUS;
2053 si.processorFamily = DPTI_sig.dsProcessorFamily;
2054
2055#if defined __i386__
2056 adpt_i386_info(&si);
2057#elif defined (__ia64__)
2058 adpt_ia64_info(&si);
2059#elif defined(__sparc__)
2060 adpt_sparc_info(&si);
2061#elif defined (__alpha__)
2062 adpt_alpha_info(&si);
2063#else
2064 si.processorType = 0xff ;
1da177e4 2065#endif
8b2cc917
AM
2066 if (copy_to_user(buffer, &si, sizeof(si))){
2067 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2068 return -EFAULT;
2069 }
1da177e4 2070
8b2cc917
AM
2071 return 0;
2072}
1da177e4 2073
f4927c45 2074static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1da177e4
LT
2075{
2076 int minor;
2077 int error = 0;
2078 adpt_hba* pHba;
2079 ulong flags = 0;
2080 void __user *argp = (void __user *)arg;
2081
2082 minor = iminor(inode);
2083 if (minor >= DPTI_MAX_HBA){
2084 return -ENXIO;
2085 }
0b950672 2086 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
2087 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2088 if (pHba->unit == minor) {
2089 break; /* found adapter */
2090 }
2091 }
0b950672 2092 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
2093 if(pHba == NULL){
2094 return -ENXIO;
2095 }
2096
a9a3047d
NA
2097 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2098 schedule_timeout_uninterruptible(2);
1da177e4
LT
2099
2100 switch (cmd) {
2101 // TODO: handle 3 cases
2102 case DPT_SIGNATURE:
2103 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2104 return -EFAULT;
2105 }
2106 break;
2107 case I2OUSRCMD:
2108 return adpt_i2o_passthru(pHba, argp);
2109
2110 case DPT_CTRLINFO:{
2111 drvrHBAinfo_S HbaInfo;
2112
2113#define FLG_OSD_PCI_VALID 0x0001
2114#define FLG_OSD_DMA 0x0002
2115#define FLG_OSD_I2O 0x0004
2116 memset(&HbaInfo, 0, sizeof(HbaInfo));
2117 HbaInfo.drvrHBAnum = pHba->unit;
2118 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2119 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2120 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2121 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2122 HbaInfo.Interrupt = pHba->pDev->irq;
2123 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2124 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2125 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2126 return -EFAULT;
2127 }
2128 break;
2129 }
2130 case DPT_SYSINFO:
2131 return adpt_system_info(argp);
2132 case DPT_BLINKLED:{
2133 u32 value;
2134 value = (u32)adpt_read_blink_led(pHba);
2135 if (copy_to_user(argp, &value, sizeof(value))) {
2136 return -EFAULT;
2137 }
2138 break;
2139 }
2140 case I2ORESETCMD:
2141 if(pHba->host)
2142 spin_lock_irqsave(pHba->host->host_lock, flags);
2143 adpt_hba_reset(pHba);
2144 if(pHba->host)
2145 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2146 break;
2147 case I2ORESCANCMD:
2148 adpt_rescan(pHba);
2149 break;
2150 default:
2151 return -EINVAL;
2152 }
2153
2154 return error;
2155}
2156
f4927c45
AB
2157static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2158{
2159 struct inode *inode;
2160 long ret;
2161
2162 inode = file->f_dentry->d_inode;
2163
c45d15d2 2164 mutex_lock(&adpt_mutex);
f4927c45 2165 ret = adpt_ioctl(inode, file, cmd, arg);
c45d15d2 2166 mutex_unlock(&adpt_mutex);
f4927c45
AB
2167
2168 return ret;
2169}
2170
62ac5aed
MS
2171#ifdef CONFIG_COMPAT
2172static long compat_adpt_ioctl(struct file *file,
2173 unsigned int cmd, unsigned long arg)
2174{
2175 struct inode *inode;
2176 long ret;
2177
2178 inode = file->f_dentry->d_inode;
2179
c45d15d2 2180 mutex_lock(&adpt_mutex);
62ac5aed
MS
2181
2182 switch(cmd) {
2183 case DPT_SIGNATURE:
2184 case I2OUSRCMD:
2185 case DPT_CTRLINFO:
2186 case DPT_SYSINFO:
2187 case DPT_BLINKLED:
2188 case I2ORESETCMD:
2189 case I2ORESCANCMD:
2190 case (DPT_TARGET_BUSY & 0xFFFF):
2191 case DPT_TARGET_BUSY:
2192 ret = adpt_ioctl(inode, file, cmd, arg);
2193 break;
2194 default:
2195 ret = -ENOIOCTLCMD;
2196 }
2197
c45d15d2 2198 mutex_unlock(&adpt_mutex);
62ac5aed
MS
2199
2200 return ret;
2201}
2202#endif
1da177e4 2203
7d12e780 2204static irqreturn_t adpt_isr(int irq, void *dev_id)
1da177e4
LT
2205{
2206 struct scsi_cmnd* cmd;
2207 adpt_hba* pHba = dev_id;
2208 u32 m;
1c2fb3f3 2209 void __iomem *reply;
1da177e4
LT
2210 u32 status=0;
2211 u32 context;
2212 ulong flags = 0;
2213 int handled = 0;
2214
2215 if (pHba == NULL){
2216 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2217 return IRQ_NONE;
2218 }
2219 if(pHba->host)
2220 spin_lock_irqsave(pHba->host->host_lock, flags);
2221
2222 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2223 m = readl(pHba->reply_port);
2224 if(m == EMPTY_QUEUE){
2225 // Try twice then give up
2226 rmb();
2227 m = readl(pHba->reply_port);
2228 if(m == EMPTY_QUEUE){
2229 // This really should not happen
2230 printk(KERN_ERR"dpti: Could not get reply frame\n");
2231 goto out;
2232 }
2233 }
67af2b06
MS
2234 if (pHba->reply_pool_pa <= m &&
2235 m < pHba->reply_pool_pa +
2236 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2237 reply = (u8 *)pHba->reply_pool +
2238 (m - pHba->reply_pool_pa);
2239 } else {
2240 /* Ick, we should *never* be here */
2241 printk(KERN_ERR "dpti: reply frame not from pool\n");
2242 reply = (u8 *)bus_to_virt(m);
2243 }
1da177e4
LT
2244
2245 if (readl(reply) & MSG_FAIL) {
2246 u32 old_m = readl(reply+28);
1c2fb3f3 2247 void __iomem *msg;
1da177e4
LT
2248 u32 old_context;
2249 PDEBUG("%s: Failed message\n",pHba->name);
2250 if(old_m >= 0x100000){
2251 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2252 writel(m,pHba->reply_port);
2253 continue;
2254 }
2255 // Transaction context is 0 in failed reply frame
1c2fb3f3 2256 msg = pHba->msg_addr_virt + old_m;
1da177e4
LT
2257 old_context = readl(msg+12);
2258 writel(old_context, reply+12);
2259 adpt_send_nop(pHba, old_m);
2260 }
2261 context = readl(reply+8);
2262 if(context & 0x40000000){ // IOCTL
62ac5aed 2263 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
1c2fb3f3
BB
2264 if( p != NULL) {
2265 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
1da177e4
LT
2266 }
2267 // All IOCTLs will also be post wait
2268 }
2269 if(context & 0x80000000){ // Post wait message
2270 status = readl(reply+16);
2271 if(status >> 24){
2272 status &= 0xffff; /* Get detail status */
2273 } else {
2274 status = I2O_POST_WAIT_OK;
2275 }
2276 if(!(context & 0x40000000)) {
62ac5aed
MS
2277 cmd = adpt_cmd_from_context(pHba,
2278 readl(reply+12));
1da177e4
LT
2279 if(cmd != NULL) {
2280 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2281 }
2282 }
2283 adpt_i2o_post_wait_complete(context, status);
2284 } else { // SCSI message
62ac5aed 2285 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
1da177e4 2286 if(cmd != NULL){
67af2b06 2287 scsi_dma_unmap(cmd);
1da177e4
LT
2288 if(cmd->serial_number != 0) { // If not timedout
2289 adpt_i2o_to_scsi(reply, cmd);
2290 }
2291 }
2292 }
2293 writel(m, pHba->reply_port);
2294 wmb();
2295 rmb();
2296 }
2297 handled = 1;
2298out: if(pHba->host)
2299 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2300 return IRQ_RETVAL(handled);
2301}
2302
2303static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2304{
2305 int i;
2306 u32 msg[MAX_MESSAGE_SIZE];
2307 u32* mptr;
62ac5aed 2308 u32* lptr;
1da177e4
LT
2309 u32 *lenptr;
2310 int direction;
2311 int scsidir;
10803de4 2312 int nseg;
1da177e4
LT
2313 u32 len;
2314 u32 reqlen;
2315 s32 rcode;
62ac5aed 2316 dma_addr_t addr;
1da177e4
LT
2317
2318 memset(msg, 0 , sizeof(msg));
10803de4 2319 len = scsi_bufflen(cmd);
1da177e4
LT
2320 direction = 0x00000000;
2321
2322 scsidir = 0x00000000; // DATA NO XFER
2323 if(len) {
2324 /*
2325 * Set SCBFlags to indicate if data is being transferred
2326 * in or out, or no data transfer
2327 * Note: Do not have to verify index is less than 0 since
2328 * cmd->cmnd[0] is an unsigned char
2329 */
2330 switch(cmd->sc_data_direction){
2331 case DMA_FROM_DEVICE:
2332 scsidir =0x40000000; // DATA IN (iop<--dev)
2333 break;
2334 case DMA_TO_DEVICE:
2335 direction=0x04000000; // SGL OUT
2336 scsidir =0x80000000; // DATA OUT (iop-->dev)
2337 break;
2338 case DMA_NONE:
2339 break;
2340 case DMA_BIDIRECTIONAL:
2341 scsidir =0x40000000; // DATA IN (iop<--dev)
2342 // Assume In - and continue;
2343 break;
2344 default:
2345 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2346 pHba->name, cmd->cmnd[0]);
2347 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2348 cmd->scsi_done(cmd);
2349 return 0;
2350 }
2351 }
2352 // msg[0] is set later
2353 // I2O_CMD_SCSI_EXEC
2354 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2355 msg[2] = 0;
62ac5aed 2356 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
1da177e4
LT
2357 // Our cards use the transaction context as the tag for queueing
2358 // Adaptec/DPT Private stuff
2359 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2360 msg[5] = d->tid;
2361 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2362 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2363 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2364 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2365 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2366
2367 mptr=msg+7;
2368
2369 // Write SCSI command into the message - always 16 byte block
2370 memset(mptr, 0, 16);
2371 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2372 mptr+=4;
2373 lenptr=mptr++; /* Remember me - fill in when we know */
62ac5aed
MS
2374 if (dpt_dma64(pHba)) {
2375 reqlen = 16; // SINGLE SGE
2376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2377 *mptr++ = 1 << PAGE_SHIFT;
2378 } else {
2379 reqlen = 14; // SINGLE SGE
2380 }
1da177e4 2381 /* Now fill in the SGList and command */
1da177e4 2382
10803de4
FT
2383 nseg = scsi_dma_map(cmd);
2384 BUG_ON(nseg < 0);
2385 if (nseg) {
2386 struct scatterlist *sg;
1da177e4
LT
2387
2388 len = 0;
10803de4 2389 scsi_for_each_sg(cmd, sg, nseg, i) {
62ac5aed 2390 lptr = mptr;
1da177e4
LT
2391 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2392 len+=sg_dma_len(sg);
62ac5aed
MS
2393 addr = sg_dma_address(sg);
2394 *mptr++ = dma_low(addr);
2395 if (dpt_dma64(pHba))
2396 *mptr++ = dma_high(addr);
10803de4
FT
2397 /* Make this an end of list */
2398 if (i == nseg - 1)
62ac5aed 2399 *lptr = direction|0xD0000000|sg_dma_len(sg);
1da177e4 2400 }
1da177e4
LT
2401 reqlen = mptr - msg;
2402 *lenptr = len;
2403
2404 if(cmd->underflow && len != cmd->underflow){
2405 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2406 len, cmd->underflow);
2407 }
2408 } else {
10803de4
FT
2409 *lenptr = len = 0;
2410 reqlen = 12;
1da177e4
LT
2411 }
2412
2413 /* Stick the headers on */
2414 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2415
2416 // Send it on it's way
2417 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2418 if (rcode == 0) {
2419 return 0;
2420 }
2421 return rcode;
2422}
2423
2424
c864cb14 2425static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
24601bbc 2426{
c864cb14 2427 struct Scsi_Host *host;
24601bbc 2428
c864cb14 2429 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
24601bbc 2430 if (host == NULL) {
c864cb14 2431 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
24601bbc
AM
2432 return -1;
2433 }
2434 host->hostdata[0] = (unsigned long)pHba;
2435 pHba->host = host;
2436
2437 host->irq = pHba->pDev->irq;
2438 /* no IO ports, so don't have to set host->io_port and
2439 * host->n_io_port
2440 */
2441 host->io_port = 0;
2442 host->n_io_port = 0;
2443 /* see comments in scsi_host.h */
2444 host->max_id = 16;
2445 host->max_lun = 256;
2446 host->max_channel = pHba->top_scsi_channel + 1;
2447 host->cmd_per_lun = 1;
67af2b06 2448 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
24601bbc
AM
2449 host->sg_tablesize = pHba->sg_tablesize;
2450 host->can_queue = pHba->post_fifo_size;
2451
2452 return 0;
2453}
2454
2455
1c2fb3f3 2456static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
1da177e4
LT
2457{
2458 adpt_hba* pHba;
2459 u32 hba_status;
2460 u32 dev_status;
2461 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2462 // I know this would look cleaner if I just read bytes
2463 // but the model I have been using for all the rest of the
2464 // io is in 4 byte words - so I keep that model
2465 u16 detailed_status = readl(reply+16) &0xffff;
2466 dev_status = (detailed_status & 0xff);
2467 hba_status = detailed_status >> 8;
2468
2469 // calculate resid for sg
df81d237 2470 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
1da177e4
LT
2471
2472 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2473
2474 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2475
2476 if(!(reply_flags & MSG_FAIL)) {
2477 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2478 case I2O_SCSI_DSC_SUCCESS:
2479 cmd->result = (DID_OK << 16);
2480 // handle underflow
df81d237 2481 if (readl(reply+20) < cmd->underflow) {
1da177e4
LT
2482 cmd->result = (DID_ERROR <<16);
2483 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2484 }
2485 break;
2486 case I2O_SCSI_DSC_REQUEST_ABORTED:
2487 cmd->result = (DID_ABORT << 16);
2488 break;
2489 case I2O_SCSI_DSC_PATH_INVALID:
2490 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2491 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2492 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2493 case I2O_SCSI_DSC_NO_ADAPTER:
2494 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2495 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2496 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2497 cmd->result = (DID_TIME_OUT << 16);
2498 break;
2499 case I2O_SCSI_DSC_ADAPTER_BUSY:
2500 case I2O_SCSI_DSC_BUS_BUSY:
2501 cmd->result = (DID_BUS_BUSY << 16);
2502 break;
2503 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2504 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2505 cmd->result = (DID_RESET << 16);
2506 break;
2507 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2508 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2509 cmd->result = (DID_PARITY << 16);
2510 break;
2511 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2512 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2513 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2514 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2515 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2516 case I2O_SCSI_DSC_DATA_OVERRUN:
2517 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2518 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2519 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2520 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2521 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2522 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2523 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2524 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2525 case I2O_SCSI_DSC_INVALID_CDB:
2526 case I2O_SCSI_DSC_LUN_INVALID:
2527 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2528 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2529 case I2O_SCSI_DSC_NO_NEXUS:
2530 case I2O_SCSI_DSC_CDB_RECEIVED:
2531 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2532 case I2O_SCSI_DSC_QUEUE_FROZEN:
2533 case I2O_SCSI_DSC_REQUEST_INVALID:
2534 default:
2535 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2536 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2537 hba_status, dev_status, cmd->cmnd[0]);
2538 cmd->result = (DID_ERROR << 16);
2539 break;
2540 }
2541
2542 // copy over the request sense data if it was a check
2543 // condition status
d814c517 2544 if (dev_status == SAM_STAT_CHECK_CONDITION) {
b80ca4f7 2545 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
1da177e4 2546 // Copy over the sense data
1c2fb3f3 2547 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
1da177e4
LT
2548 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2549 cmd->sense_buffer[2] == DATA_PROTECT ){
2550 /* This is to handle an array failed */
2551 cmd->result = (DID_TIME_OUT << 16);
2552 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2553 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2554 hba_status, dev_status, cmd->cmnd[0]);
2555
2556 }
2557 }
2558 } else {
2559 /* In this condtion we could not talk to the tid
2560 * the card rejected it. We should signal a retry
2561 * for a limitted number of retries.
2562 */
2563 cmd->result = (DID_TIME_OUT << 16);
2564 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2565 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2566 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2567 }
2568
2569 cmd->result |= (dev_status);
2570
2571 if(cmd->scsi_done != NULL){
2572 cmd->scsi_done(cmd);
2573 }
2574 return cmd->result;
2575}
2576
2577
2578static s32 adpt_rescan(adpt_hba* pHba)
2579{
2580 s32 rcode;
2581 ulong flags = 0;
2582
2583 if(pHba->host)
2584 spin_lock_irqsave(pHba->host->host_lock, flags);
2585 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2586 goto out;
2587 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2588 goto out;
2589 rcode = 0;
2590out: if(pHba->host)
2591 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2592 return rcode;
2593}
2594
2595
2596static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2597{
2598 int i;
2599 int max;
2600 int tid;
2601 struct i2o_device *d;
2602 i2o_lct *lct = pHba->lct;
2603 u8 bus_no = 0;
2604 s16 scsi_id;
2605 s16 scsi_lun;
2606 u32 buf[10]; // at least 8 u32's
2607 struct adpt_device* pDev = NULL;
2608 struct i2o_device* pI2o_dev = NULL;
2609
2610 if (lct == NULL) {
2611 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2612 return -1;
2613 }
2614
2615 max = lct->table_size;
2616 max -= 3;
2617 max /= 9;
2618
2619 // Mark each drive as unscanned
2620 for (d = pHba->devices; d; d = d->next) {
2621 pDev =(struct adpt_device*) d->owner;
2622 if(!pDev){
2623 continue;
2624 }
2625 pDev->state |= DPTI_DEV_UNSCANNED;
2626 }
2627
2628 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2629
2630 for(i=0;i<max;i++) {
2631 if( lct->lct_entry[i].user_tid != 0xfff){
2632 continue;
2633 }
2634
2635 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2636 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2637 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2638 tid = lct->lct_entry[i].tid;
2639 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2640 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2641 continue;
2642 }
2643 bus_no = buf[0]>>16;
e84d96db
DC
2644 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2645 printk(KERN_WARNING
2646 "%s: Channel number %d out of range\n",
2647 pHba->name, bus_no);
2648 continue;
2649 }
2650
1da177e4
LT
2651 scsi_id = buf[1];
2652 scsi_lun = (buf[2]>>8 )&0xff;
2653 pDev = pHba->channel[bus_no].device[scsi_id];
2654 /* da lun */
2655 while(pDev) {
2656 if(pDev->scsi_lun == scsi_lun) {
2657 break;
2658 }
2659 pDev = pDev->next_lun;
2660 }
2661 if(!pDev ) { // Something new add it
da2907ff
JL
2662 d = kmalloc(sizeof(struct i2o_device),
2663 GFP_ATOMIC);
1da177e4
LT
2664 if(d==NULL)
2665 {
2666 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2667 return -ENOMEM;
2668 }
2669
1c2fb3f3 2670 d->controller = pHba;
1da177e4
LT
2671 d->next = NULL;
2672
2673 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2674
2675 d->flags = 0;
2676 adpt_i2o_report_hba_unit(pHba, d);
2677 adpt_i2o_install_device(pHba, d);
2678
1da177e4
LT
2679 pDev = pHba->channel[bus_no].device[scsi_id];
2680 if( pDev == NULL){
da2907ff
JL
2681 pDev =
2682 kzalloc(sizeof(struct adpt_device),
2683 GFP_ATOMIC);
1da177e4
LT
2684 if(pDev == NULL) {
2685 return -ENOMEM;
2686 }
2687 pHba->channel[bus_no].device[scsi_id] = pDev;
2688 } else {
2689 while (pDev->next_lun) {
2690 pDev = pDev->next_lun;
2691 }
da2907ff
JL
2692 pDev = pDev->next_lun =
2693 kzalloc(sizeof(struct adpt_device),
2694 GFP_ATOMIC);
1da177e4
LT
2695 if(pDev == NULL) {
2696 return -ENOMEM;
2697 }
2698 }
1da177e4
LT
2699 pDev->tid = d->lct_data.tid;
2700 pDev->scsi_channel = bus_no;
2701 pDev->scsi_id = scsi_id;
2702 pDev->scsi_lun = scsi_lun;
2703 pDev->pI2o_dev = d;
2704 d->owner = pDev;
2705 pDev->type = (buf[0])&0xff;
2706 pDev->flags = (buf[0]>>8)&0xff;
2707 // Too late, SCSI system has made up it's mind, but what the hey ...
2708 if(scsi_id > pHba->top_scsi_id){
2709 pHba->top_scsi_id = scsi_id;
2710 }
2711 if(scsi_lun > pHba->top_scsi_lun){
2712 pHba->top_scsi_lun = scsi_lun;
2713 }
2714 continue;
2715 } // end of new i2o device
2716
2717 // We found an old device - check it
2718 while(pDev) {
2719 if(pDev->scsi_lun == scsi_lun) {
2720 if(!scsi_device_online(pDev->pScsi_dev)) {
2721 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2722 pHba->name,bus_no,scsi_id,scsi_lun);
2723 if (pDev->pScsi_dev) {
2724 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2725 }
2726 }
2727 d = pDev->pI2o_dev;
2728 if(d->lct_data.tid != tid) { // something changed
2729 pDev->tid = tid;
2730 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2731 if (pDev->pScsi_dev) {
2732 pDev->pScsi_dev->changed = TRUE;
2733 pDev->pScsi_dev->removable = TRUE;
2734 }
2735 }
2736 // Found it - mark it scanned
2737 pDev->state = DPTI_DEV_ONLINE;
2738 break;
2739 }
2740 pDev = pDev->next_lun;
2741 }
2742 }
2743 }
2744 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2745 pDev =(struct adpt_device*) pI2o_dev->owner;
2746 if(!pDev){
2747 continue;
2748 }
2749 // Drive offline drives that previously existed but could not be found
2750 // in the LCT table
2751 if (pDev->state & DPTI_DEV_UNSCANNED){
2752 pDev->state = DPTI_DEV_OFFLINE;
2753 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2754 if (pDev->pScsi_dev) {
2755 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2756 }
2757 }
2758 }
2759 return 0;
2760}
2761
2762static void adpt_fail_posted_scbs(adpt_hba* pHba)
2763{
2764 struct scsi_cmnd* cmd = NULL;
2765 struct scsi_device* d = NULL;
2766
2767 shost_for_each_device(d, pHba->host) {
2768 unsigned long flags;
2769 spin_lock_irqsave(&d->list_lock, flags);
2770 list_for_each_entry(cmd, &d->cmd_list, list) {
2771 if(cmd->serial_number == 0){
2772 continue;
2773 }
2774 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2775 cmd->scsi_done(cmd);
2776 }
2777 spin_unlock_irqrestore(&d->list_lock, flags);
2778 }
2779}
2780
2781
2782/*============================================================================
2783 * Routines from i2o subsystem
2784 *============================================================================
2785 */
2786
2787
2788
2789/*
2790 * Bring an I2O controller into HOLD state. See the spec.
2791 */
2792static int adpt_i2o_activate_hba(adpt_hba* pHba)
2793{
2794 int rcode;
2795
2796 if(pHba->initialized ) {
2797 if (adpt_i2o_status_get(pHba) < 0) {
2798 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2799 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2800 return rcode;
2801 }
2802 if (adpt_i2o_status_get(pHba) < 0) {
2803 printk(KERN_INFO "HBA not responding.\n");
2804 return -1;
2805 }
2806 }
2807
2808 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2809 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2810 return -1;
2811 }
2812
2813 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2814 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2815 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2816 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2817 adpt_i2o_reset_hba(pHba);
2818 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2819 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2820 return -1;
2821 }
2822 }
2823 } else {
2824 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2825 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2826 return rcode;
2827 }
2828
2829 }
2830
2831 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2832 return -1;
2833 }
2834
2835 /* In HOLD state */
2836
2837 if (adpt_i2o_hrt_get(pHba) < 0) {
2838 return -1;
2839 }
2840
2841 return 0;
2842}
2843
2844/*
2845 * Bring a controller online into OPERATIONAL state.
2846 */
2847
2848static int adpt_i2o_online_hba(adpt_hba* pHba)
2849{
2850 if (adpt_i2o_systab_send(pHba) < 0) {
2851 adpt_i2o_delete_hba(pHba);
2852 return -1;
2853 }
2854 /* In READY state */
2855
2856 if (adpt_i2o_enable_hba(pHba) < 0) {
2857 adpt_i2o_delete_hba(pHba);
2858 return -1;
2859 }
2860
2861 /* In OPERATIONAL state */
2862 return 0;
2863}
2864
2865static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2866{
2867 u32 __iomem *msg;
2868 ulong timeout = jiffies + 5*HZ;
2869
2870 while(m == EMPTY_QUEUE){
2871 rmb();
2872 m = readl(pHba->post_port);
2873 if(m != EMPTY_QUEUE){
2874 break;
2875 }
2876 if(time_after(jiffies,timeout)){
2877 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2878 return 2;
2879 }
a9a3047d 2880 schedule_timeout_uninterruptible(1);
1da177e4
LT
2881 }
2882 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2883 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2884 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2885 writel( 0,&msg[2]);
2886 wmb();
2887
2888 writel(m, pHba->post_port);
2889 wmb();
2890 return 0;
2891}
2892
2893static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2894{
2895 u8 *status;
67af2b06 2896 dma_addr_t addr;
1da177e4
LT
2897 u32 __iomem *msg = NULL;
2898 int i;
2899 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
1da177e4
LT
2900 u32 m;
2901
2902 do {
2903 rmb();
2904 m = readl(pHba->post_port);
2905 if (m != EMPTY_QUEUE) {
2906 break;
2907 }
2908
2909 if(time_after(jiffies,timeout)){
2910 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2911 return -ETIMEDOUT;
2912 }
a9a3047d 2913 schedule_timeout_uninterruptible(1);
1da177e4
LT
2914 } while(m == EMPTY_QUEUE);
2915
2916 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2917
67af2b06 2918 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
bbfbbbc1 2919 if (!status) {
1da177e4
LT
2920 adpt_send_nop(pHba, m);
2921 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2922 pHba->name);
2923 return -ENOMEM;
2924 }
67af2b06 2925 memset(status, 0, 4);
1da177e4
LT
2926
2927 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2928 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2929 writel(0, &msg[2]);
2930 writel(0x0106, &msg[3]); /* Transaction context */
2931 writel(4096, &msg[4]); /* Host page frame size */
2932 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2933 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
67af2b06 2934 writel((u32)addr, &msg[7]);
1da177e4
LT
2935
2936 writel(m, pHba->post_port);
2937 wmb();
2938
2939 // Wait for the reply status to come back
2940 do {
2941 if (*status) {
2942 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2943 break;
2944 }
2945 }
2946 rmb();
2947 if(time_after(jiffies,timeout)){
2948 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
67af2b06
MS
2949 /* We lose 4 bytes of "status" here, but we
2950 cannot free these because controller may
2951 awake and corrupt those bytes at any time */
2952 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
1da177e4
LT
2953 return -ETIMEDOUT;
2954 }
a9a3047d 2955 schedule_timeout_uninterruptible(1);
1da177e4
LT
2956 } while (1);
2957
2958 // If the command was successful, fill the fifo with our reply
2959 // message packets
2960 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
67af2b06 2961 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
2962 return -2;
2963 }
67af2b06 2964 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4 2965
67af2b06
MS
2966 if(pHba->reply_pool != NULL) {
2967 dma_free_coherent(&pHba->pDev->dev,
2968 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2969 pHba->reply_pool, pHba->reply_pool_pa);
2970 }
1da177e4 2971
67af2b06
MS
2972 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2973 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2974 &pHba->reply_pool_pa, GFP_KERNEL);
bbfbbbc1
MK
2975 if (!pHba->reply_pool) {
2976 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2977 return -ENOMEM;
1da177e4 2978 }
67af2b06 2979 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
1da177e4 2980
1da177e4 2981 for(i = 0; i < pHba->reply_fifo_size; i++) {
67af2b06
MS
2982 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2983 pHba->reply_port);
1da177e4 2984 wmb();
1da177e4
LT
2985 }
2986 adpt_i2o_status_get(pHba);
2987 return 0;
2988}
2989
2990
2991/*
2992 * I2O System Table. Contains information about
2993 * all the IOPs in the system. Used to inform IOPs
2994 * about each other's existence.
2995 *
2996 * sys_tbl_ver is the CurrentChangeIndicator that is
2997 * used by IOPs to track changes.
2998 */
2999
3000
3001
3002static s32 adpt_i2o_status_get(adpt_hba* pHba)
3003{
3004 ulong timeout;
3005 u32 m;
3006 u32 __iomem *msg;
3007 u8 *status_block=NULL;
1da177e4
LT
3008
3009 if(pHba->status_block == NULL) {
67af2b06
MS
3010 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
3011 sizeof(i2o_status_block),
3012 &pHba->status_block_pa, GFP_KERNEL);
1da177e4
LT
3013 if(pHba->status_block == NULL) {
3014 printk(KERN_ERR
3015 "dpti%d: Get Status Block failed; Out of memory. \n",
3016 pHba->unit);
3017 return -ENOMEM;
3018 }
3019 }
3020 memset(pHba->status_block, 0, sizeof(i2o_status_block));
3021 status_block = (u8*)(pHba->status_block);
1da177e4
LT
3022 timeout = jiffies+TMOUT_GETSTATUS*HZ;
3023 do {
3024 rmb();
3025 m = readl(pHba->post_port);
3026 if (m != EMPTY_QUEUE) {
3027 break;
3028 }
3029 if(time_after(jiffies,timeout)){
3030 printk(KERN_ERR "%s: Timeout waiting for message !\n",
3031 pHba->name);
3032 return -ETIMEDOUT;
3033 }
a9a3047d 3034 schedule_timeout_uninterruptible(1);
1da177e4
LT
3035 } while(m==EMPTY_QUEUE);
3036
3037
3038 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3039
3040 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3041 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3042 writel(1, &msg[2]);
3043 writel(0, &msg[3]);
3044 writel(0, &msg[4]);
3045 writel(0, &msg[5]);
67af2b06
MS
3046 writel( dma_low(pHba->status_block_pa), &msg[6]);
3047 writel( dma_high(pHba->status_block_pa), &msg[7]);
1da177e4
LT
3048 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3049
3050 //post message
3051 writel(m, pHba->post_port);
3052 wmb();
3053
3054 while(status_block[87]!=0xff){
3055 if(time_after(jiffies,timeout)){
3056 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3057 pHba->unit);
3058 return -ETIMEDOUT;
3059 }
3060 rmb();
a9a3047d 3061 schedule_timeout_uninterruptible(1);
1da177e4
LT
3062 }
3063
3064 // Set up our number of outbound and inbound messages
3065 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3066 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3067 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3068 }
3069
3070 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3071 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3072 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3073 }
3074
3075 // Calculate the Scatter Gather list size
62ac5aed
MS
3076 if (dpt_dma64(pHba)) {
3077 pHba->sg_tablesize
3078 = ((pHba->status_block->inbound_frame_size * 4
3079 - 14 * sizeof(u32))
3080 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3081 } else {
3082 pHba->sg_tablesize
3083 = ((pHba->status_block->inbound_frame_size * 4
3084 - 12 * sizeof(u32))
3085 / sizeof(struct sg_simple_element));
3086 }
1da177e4
LT
3087 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3088 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3089 }
3090
3091
3092#ifdef DEBUG
3093 printk("dpti%d: State = ",pHba->unit);
3094 switch(pHba->status_block->iop_state) {
3095 case 0x01:
3096 printk("INIT\n");
3097 break;
3098 case 0x02:
3099 printk("RESET\n");
3100 break;
3101 case 0x04:
3102 printk("HOLD\n");
3103 break;
3104 case 0x05:
3105 printk("READY\n");
3106 break;
3107 case 0x08:
3108 printk("OPERATIONAL\n");
3109 break;
3110 case 0x10:
3111 printk("FAILED\n");
3112 break;
3113 case 0x11:
3114 printk("FAULTED\n");
3115 break;
3116 default:
3117 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3118 }
3119#endif
3120 return 0;
3121}
3122
3123/*
3124 * Get the IOP's Logical Configuration Table
3125 */
3126static int adpt_i2o_lct_get(adpt_hba* pHba)
3127{
3128 u32 msg[8];
3129 int ret;
3130 u32 buf[16];
3131
3132 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3133 pHba->lct_size = pHba->status_block->expected_lct_size;
3134 }
3135 do {
3136 if (pHba->lct == NULL) {
67af2b06
MS
3137 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3138 pHba->lct_size, &pHba->lct_pa,
da2907ff 3139 GFP_ATOMIC);
1da177e4
LT
3140 if(pHba->lct == NULL) {
3141 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3142 pHba->name);
3143 return -ENOMEM;
3144 }
3145 }
3146 memset(pHba->lct, 0, pHba->lct_size);
3147
3148 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3149 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3150 msg[2] = 0;
3151 msg[3] = 0;
3152 msg[4] = 0xFFFFFFFF; /* All devices */
3153 msg[5] = 0x00000000; /* Report now */
3154 msg[6] = 0xD0000000|pHba->lct_size;
67af2b06 3155 msg[7] = (u32)pHba->lct_pa;
1da177e4
LT
3156
3157 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3158 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3159 pHba->name, ret);
3160 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3161 return ret;
3162 }
3163
3164 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3165 pHba->lct_size = pHba->lct->table_size << 2;
67af2b06
MS
3166 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3167 pHba->lct, pHba->lct_pa);
1da177e4
LT
3168 pHba->lct = NULL;
3169 }
3170 } while (pHba->lct == NULL);
3171
3172 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3173
3174
3175 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3176 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3177 pHba->FwDebugBufferSize = buf[1];
62ac5aed
MS
3178 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3179 pHba->FwDebugBufferSize);
3180 if (pHba->FwDebugBuffer_P) {
3181 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3182 FW_DEBUG_FLAGS_OFFSET;
3183 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3184 FW_DEBUG_BLED_OFFSET;
3185 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3186 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3187 FW_DEBUG_STR_LENGTH_OFFSET;
3188 pHba->FwDebugBuffer_P += buf[2];
3189 pHba->FwDebugFlags = 0;
3190 }
1da177e4
LT
3191 }
3192
3193 return 0;
3194}
3195
3196static int adpt_i2o_build_sys_table(void)
3197{
67af2b06 3198 adpt_hba* pHba = hba_chain;
1da177e4
LT
3199 int count = 0;
3200
67af2b06
MS
3201 if (sys_tbl)
3202 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3203 sys_tbl, sys_tbl_pa);
3204
1da177e4
LT
3205 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3206 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3207
67af2b06
MS
3208 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3209 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
bbfbbbc1 3210 if (!sys_tbl) {
1da177e4
LT
3211 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3212 return -ENOMEM;
3213 }
67af2b06 3214 memset(sys_tbl, 0, sys_tbl_len);
1da177e4
LT
3215
3216 sys_tbl->num_entries = hba_count;
3217 sys_tbl->version = I2OVERSION;
3218 sys_tbl->change_ind = sys_tbl_ind++;
3219
3220 for(pHba = hba_chain; pHba; pHba = pHba->next) {
67af2b06 3221 u64 addr;
1da177e4
LT
3222 // Get updated Status Block so we have the latest information
3223 if (adpt_i2o_status_get(pHba)) {
3224 sys_tbl->num_entries--;
3225 continue; // try next one
3226 }
3227
3228 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3229 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3230 sys_tbl->iops[count].seg_num = 0;
3231 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3232 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3233 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3234 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3235 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3236 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
67af2b06
MS
3237 addr = pHba->base_addr_phys + 0x40;
3238 sys_tbl->iops[count].inbound_low = dma_low(addr);
3239 sys_tbl->iops[count].inbound_high = dma_high(addr);
1da177e4
LT
3240
3241 count++;
3242 }
3243
3244#ifdef DEBUG
3245{
3246 u32 *table = (u32*)sys_tbl;
3247 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3248 for(count = 0; count < (sys_tbl_len >>2); count++) {
3249 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3250 count, table[count]);
3251 }
3252}
3253#endif
3254
3255 return 0;
3256}
3257
3258
3259/*
3260 * Dump the information block associated with a given unit (TID)
3261 */
3262
3263static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3264{
3265 char buf[64];
3266 int unit = d->lct_data.tid;
3267
3268 printk(KERN_INFO "TID %3.3d ", unit);
3269
3270 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3271 {
3272 buf[16]=0;
3273 printk(" Vendor: %-12.12s", buf);
3274 }
3275 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3276 {
3277 buf[16]=0;
3278 printk(" Device: %-12.12s", buf);
3279 }
3280 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3281 {
3282 buf[8]=0;
3283 printk(" Rev: %-12.12s\n", buf);
3284 }
3285#ifdef DEBUG
3286 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3287 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3288 printk(KERN_INFO "\tFlags: ");
3289
3290 if(d->lct_data.device_flags&(1<<0))
3291 printk("C"); // ConfigDialog requested
3292 if(d->lct_data.device_flags&(1<<1))
3293 printk("U"); // Multi-user capable
3294 if(!(d->lct_data.device_flags&(1<<4)))
3295 printk("P"); // Peer service enabled!
3296 if(!(d->lct_data.device_flags&(1<<5)))
3297 printk("M"); // Mgmt service enabled!
3298 printk("\n");
3299#endif
3300}
3301
3302#ifdef DEBUG
3303/*
3304 * Do i2o class name lookup
3305 */
3306static const char *adpt_i2o_get_class_name(int class)
3307{
3308 int idx = 16;
3309 static char *i2o_class_name[] = {
3310 "Executive",
3311 "Device Driver Module",
3312 "Block Device",
3313 "Tape Device",
3314 "LAN Interface",
3315 "WAN Interface",
3316 "Fibre Channel Port",
3317 "Fibre Channel Device",
3318 "SCSI Device",
3319 "ATE Port",
3320 "ATE Device",
3321 "Floppy Controller",
3322 "Floppy Device",
3323 "Secondary Bus Port",
3324 "Peer Transport Agent",
3325 "Peer Transport",
3326 "Unknown"
3327 };
3328
3329 switch(class&0xFFF) {
3330 case I2O_CLASS_EXECUTIVE:
3331 idx = 0; break;
3332 case I2O_CLASS_DDM:
3333 idx = 1; break;
3334 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3335 idx = 2; break;
3336 case I2O_CLASS_SEQUENTIAL_STORAGE:
3337 idx = 3; break;
3338 case I2O_CLASS_LAN:
3339 idx = 4; break;
3340 case I2O_CLASS_WAN:
3341 idx = 5; break;
3342 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3343 idx = 6; break;
3344 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3345 idx = 7; break;
3346 case I2O_CLASS_SCSI_PERIPHERAL:
3347 idx = 8; break;
3348 case I2O_CLASS_ATE_PORT:
3349 idx = 9; break;
3350 case I2O_CLASS_ATE_PERIPHERAL:
3351 idx = 10; break;
3352 case I2O_CLASS_FLOPPY_CONTROLLER:
3353 idx = 11; break;
3354 case I2O_CLASS_FLOPPY_DEVICE:
3355 idx = 12; break;
3356 case I2O_CLASS_BUS_ADAPTER_PORT:
3357 idx = 13; break;
3358 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3359 idx = 14; break;
3360 case I2O_CLASS_PEER_TRANSPORT:
3361 idx = 15; break;
3362 }
3363 return i2o_class_name[idx];
3364}
3365#endif
3366
3367
3368static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3369{
3370 u32 msg[6];
3371 int ret, size = sizeof(i2o_hrt);
3372
3373 do {
3374 if (pHba->hrt == NULL) {
67af2b06
MS
3375 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3376 size, &pHba->hrt_pa, GFP_KERNEL);
1da177e4
LT
3377 if (pHba->hrt == NULL) {
3378 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3379 return -ENOMEM;
3380 }
3381 }
3382
3383 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3384 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3385 msg[2]= 0;
3386 msg[3]= 0;
3387 msg[4]= (0xD0000000 | size); /* Simple transaction */
67af2b06 3388 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
1da177e4
LT
3389
3390 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3391 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3392 return ret;
3393 }
3394
3395 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
67af2b06
MS
3396 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3397 dma_free_coherent(&pHba->pDev->dev, size,
3398 pHba->hrt, pHba->hrt_pa);
3399 size = newsize;
1da177e4
LT
3400 pHba->hrt = NULL;
3401 }
3402 } while(pHba->hrt == NULL);
3403 return 0;
3404}
3405
3406/*
3407 * Query one scalar group value or a whole scalar group.
3408 */
3409static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3410 int group, int field, void *buf, int buflen)
3411{
3412 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
67af2b06
MS
3413 u8 *opblk_va;
3414 dma_addr_t opblk_pa;
3415 u8 *resblk_va;
3416 dma_addr_t resblk_pa;
1da177e4
LT
3417
3418 int size;
3419
3420 /* 8 bytes for header */
67af2b06
MS
3421 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3422 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3423 if (resblk_va == NULL) {
1da177e4
LT
3424 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3425 return -ENOMEM;
3426 }
3427
67af2b06
MS
3428 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3429 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3430 if (opblk_va == NULL) {
3431 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3432 resblk_va, resblk_pa);
3433 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3434 pHba->name);
3435 return -ENOMEM;
3436 }
1da177e4
LT
3437 if (field == -1) /* whole group */
3438 opblk[4] = -1;
3439
67af2b06 3440 memcpy(opblk_va, opblk, sizeof(opblk));
1da177e4 3441 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
67af2b06
MS
3442 opblk_va, opblk_pa, sizeof(opblk),
3443 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3444 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
1da177e4 3445 if (size == -ETIME) {
67af2b06
MS
3446 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3447 resblk_va, resblk_pa);
1da177e4
LT
3448 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3449 return -ETIME;
3450 } else if (size == -EINTR) {
67af2b06
MS
3451 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3452 resblk_va, resblk_pa);
1da177e4
LT
3453 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3454 return -EINTR;
3455 }
3456
67af2b06 3457 memcpy(buf, resblk_va+8, buflen); /* cut off header */
1da177e4 3458
67af2b06
MS
3459 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3460 resblk_va, resblk_pa);
1da177e4
LT
3461 if (size < 0)
3462 return size;
3463
3464 return buflen;
3465}
3466
3467
3468/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3469 *
3470 * This function can be used for all UtilParamsGet/Set operations.
3471 * The OperationBlock is given in opblk-buffer,
3472 * and results are returned in resblk-buffer.
3473 * Note that the minimum sized resblk is 8 bytes and contains
3474 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3475 */
3476static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
67af2b06
MS
3477 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3478 void *resblk_va, dma_addr_t resblk_pa, int reslen)
1da177e4
LT
3479{
3480 u32 msg[9];
67af2b06 3481 u32 *res = (u32 *)resblk_va;
1da177e4
LT
3482 int wait_status;
3483
3484 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3485 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3486 msg[2] = 0;
3487 msg[3] = 0;
3488 msg[4] = 0;
3489 msg[5] = 0x54000000 | oplen; /* OperationBlock */
67af2b06 3490 msg[6] = (u32)opblk_pa;
1da177e4 3491 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
67af2b06 3492 msg[8] = (u32)resblk_pa;
1da177e4
LT
3493
3494 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
67af2b06 3495 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
1da177e4
LT
3496 return wait_status; /* -DetailedStatus */
3497 }
3498
3499 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3500 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3501 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3502 pHba->name,
3503 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3504 : "PARAMS_GET",
3505 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3506 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3507 }
3508
3509 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3510}
3511
3512
3513static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3514{
3515 u32 msg[4];
3516 int ret;
3517
3518 adpt_i2o_status_get(pHba);
3519
3520 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3521
3522 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3523 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3524 return 0;
3525 }
3526
3527 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3528 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3529 msg[2] = 0;
3530 msg[3] = 0;
3531
3532 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3533 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3534 pHba->unit, -ret);
3535 } else {
3536 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3537 }
3538
3539 adpt_i2o_status_get(pHba);
3540 return ret;
3541}
3542
3543
3544/*
3545 * Enable IOP. Allows the IOP to resume external operations.
3546 */
3547static int adpt_i2o_enable_hba(adpt_hba* pHba)
3548{
3549 u32 msg[4];
3550 int ret;
3551
3552 adpt_i2o_status_get(pHba);
3553 if(!pHba->status_block){
3554 return -ENOMEM;
3555 }
3556 /* Enable only allowed on READY state */
3557 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3558 return 0;
3559
3560 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3561 return -EINVAL;
3562
3563 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3564 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3565 msg[2]= 0;
3566 msg[3]= 0;
3567
3568 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3569 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3570 pHba->name, ret);
3571 } else {
3572 PDEBUG("%s: Enabled.\n", pHba->name);
3573 }
3574
3575 adpt_i2o_status_get(pHba);
3576 return ret;
3577}
3578
3579
3580static int adpt_i2o_systab_send(adpt_hba* pHba)
3581{
3582 u32 msg[12];
3583 int ret;
3584
3585 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3586 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3587 msg[2] = 0;
3588 msg[3] = 0;
3589 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3590 msg[5] = 0; /* Segment 0 */
3591
3592 /*
3593 * Provide three SGL-elements:
3594 * System table (SysTab), Private memory space declaration and
3595 * Private i/o space declaration
3596 */
3597 msg[6] = 0x54000000 | sys_tbl_len;
67af2b06 3598 msg[7] = (u32)sys_tbl_pa;
1da177e4
LT
3599 msg[8] = 0x54000000 | 0;
3600 msg[9] = 0;
3601 msg[10] = 0xD4000000 | 0;
3602 msg[11] = 0;
3603
3604 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3605 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3606 pHba->name, ret);
3607 }
3608#ifdef DEBUG
3609 else {
3610 PINFO("%s: SysTab set.\n", pHba->name);
3611 }
3612#endif
3613
3614 return ret;
3615 }
3616
3617
3618/*============================================================================
3619 *
3620 *============================================================================
3621 */
3622
3623
3624#ifdef UARTDELAY
3625
3626static static void adpt_delay(int millisec)
3627{
3628 int i;
3629 for (i = 0; i < millisec; i++) {
3630 udelay(1000); /* delay for one millisecond */
3631 }
3632}
3633
3634#endif
3635
24601bbc 3636static struct scsi_host_template driver_template = {
c864cb14 3637 .module = THIS_MODULE,
1da177e4
LT
3638 .name = "dpt_i2o",
3639 .proc_name = "dpt_i2o",
3640 .proc_info = adpt_proc_info,
1da177e4
LT
3641 .info = adpt_info,
3642 .queuecommand = adpt_queue,
3643 .eh_abort_handler = adpt_abort,
3644 .eh_device_reset_handler = adpt_device_reset,
3645 .eh_bus_reset_handler = adpt_bus_reset,
3646 .eh_host_reset_handler = adpt_reset,
3647 .bios_param = adpt_bios_param,
3648 .slave_configure = adpt_slave_configure,
3649 .can_queue = MAX_TO_IOP_MESSAGES,
3650 .this_id = 7,
3651 .cmd_per_lun = 1,
3652 .use_clustering = ENABLE_CLUSTERING,
3653};
c864cb14
MS
3654
3655static int __init adpt_init(void)
3656{
3657 int error;
3658 adpt_hba *pHba, *next;
3659
3660 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3661
3662 error = adpt_detect(&driver_template);
3663 if (error < 0)
3664 return error;
3665 if (hba_chain == NULL)
3666 return -ENODEV;
3667
3668 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3669 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3670 if (error)
3671 goto fail;
3672 scsi_scan_host(pHba->host);
3673 }
3674 return 0;
3675fail:
3676 for (pHba = hba_chain; pHba; pHba = next) {
3677 next = pHba->next;
3678 scsi_remove_host(pHba->host);
3679 }
3680 return error;
3681}
3682
3683static void __exit adpt_exit(void)
3684{
3685 adpt_hba *pHba, *next;
3686
3687 for (pHba = hba_chain; pHba; pHba = pHba->next)
3688 scsi_remove_host(pHba->host);
3689 for (pHba = hba_chain; pHba; pHba = next) {
3690 next = pHba->next;
3691 adpt_release(pHba->host);
3692 }
3693}
3694
3695module_init(adpt_init);
3696module_exit(adpt_exit);
3697
1da177e4 3698MODULE_LICENSE("GPL");