]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/53c700.c
iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[net-next-2.6.git] / drivers / scsi / 53c700.c
CommitLineData
1da177e4
LT
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
4 *
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or modify
9** it under the terms of the GNU General Public License as published by
10** the Free Software Foundation; either version 2 of the License, or
11** (at your option) any later version.
12**
13** This program is distributed in the hope that it will be useful,
14** but WITHOUT ANY WARRANTY; without even the implied warranty of
15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16** GNU General Public License for more details.
17**
18** You should have received a copy of the GNU General Public License
19** along with this program; if not, write to the Free Software
20** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21**
22**-----------------------------------------------------------------------------
23 */
24
25/* Notes:
26 *
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
31 *
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
34 *
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
42 *
43 *
44 * TODO List:
45 *
46 * 1. Better statistics in the proc fs
47 *
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
51
52/* CHANGELOG
53 *
54 * Version 2.8
55 *
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
61 *
62 * Version 2.7
63 *
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
69 *
70 * Version 2.6
71 *
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
75 *
76 * Version 2.5
77 *
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
83 *
84 * Version 2.4
85 *
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
88 *
89 * Version 2.3
90 *
91 * More endianness/cache coherency changes.
92 *
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
96 *
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
100 *
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
103 *
104 * Version 2.2
105 *
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
109 *
110 * Version 2.1
111 *
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115#define NCR_700_VERSION "2.8"
116
1da177e4
LT
117#include <linux/kernel.h>
118#include <linux/types.h>
119#include <linux/string.h>
120#include <linux/ioport.h>
121#include <linux/delay.h>
122#include <linux/spinlock.h>
123#include <linux/completion.h>
1da177e4
LT
124#include <linux/init.h>
125#include <linux/proc_fs.h>
126#include <linux/blkdev.h>
127#include <linux/module.h>
128#include <linux/interrupt.h>
017560fc 129#include <linux/device.h>
1da177e4
LT
130#include <asm/dma.h>
131#include <asm/system.h>
132#include <asm/io.h>
133#include <asm/pgtable.h>
134#include <asm/byteorder.h>
135
136#include <scsi/scsi.h>
137#include <scsi/scsi_cmnd.h>
138#include <scsi/scsi_dbg.h>
139#include <scsi/scsi_eh.h>
140#include <scsi/scsi_host.h>
141#include <scsi/scsi_tcq.h>
142#include <scsi/scsi_transport.h>
143#include <scsi/scsi_transport_spi.h>
144
145#include "53c700.h"
146
147/* NOTE: For 64 bit drivers there are points in the code where we use
148 * a non dereferenceable pointer to point to a structure in dma-able
149 * memory (which is 32 bits) so that we can use all of the structure
150 * operations but take the address at the end. This macro allows us
151 * to truncate the 64 bit pointer down to 32 bits without the compiler
152 * complaining */
153#define to32bit(x) ((__u32)((unsigned long)(x)))
154
155#ifdef NCR_700_DEBUG
156#define STATIC
157#else
158#define STATIC static
159#endif
160
161MODULE_AUTHOR("James Bottomley");
162MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
163MODULE_LICENSE("GPL");
164
165/* This is the script */
166#include "53c700_d.h"
167
168
169STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
170STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
171STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
1da177e4
LT
172STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
173STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
174STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
0f13fc09 175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
1da177e4
LT
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
e881a172 178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason);
1da177e4
LT
179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180
181STATIC struct device_attribute *NCR_700_dev_attrs[];
182
183STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
184
185static char *NCR_700_phase[] = {
186 "",
187 "after selection",
188 "before command phase",
189 "after command phase",
190 "after status phase",
191 "after data in phase",
192 "after data out phase",
193 "during data phase",
194};
195
196static char *NCR_700_condition[] = {
197 "",
198 "NOT MSG_OUT",
199 "UNEXPECTED PHASE",
200 "NOT MSG_IN",
201 "UNEXPECTED MSG",
202 "MSG_IN",
203 "SDTR_MSG RECEIVED",
204 "REJECT_MSG RECEIVED",
205 "DISCONNECT_MSG RECEIVED",
206 "MSG_OUT",
207 "DATA_IN",
208
209};
210
211static char *NCR_700_fatal_messages[] = {
212 "unexpected message after reselection",
213 "still MSG_OUT after message injection",
214 "not MSG_IN after selection",
215 "Illegal message length received",
216};
217
218static char *NCR_700_SBCL_bits[] = {
219 "IO ",
220 "CD ",
221 "MSG ",
222 "ATN ",
223 "SEL ",
224 "BSY ",
225 "ACK ",
226 "REQ ",
227};
228
229static char *NCR_700_SBCL_to_phase[] = {
230 "DATA_OUT",
231 "DATA_IN",
232 "CMD_OUT",
233 "STATE",
234 "ILLEGAL PHASE",
235 "ILLEGAL PHASE",
236 "MSG OUT",
237 "MSG IN",
238};
239
1da177e4
LT
240/* This translates the SDTR message offset and period to a value
241 * which can be loaded into the SXFER_REG.
242 *
243 * NOTE: According to SCSI-2, the true transfer period (in ns) is
244 * actually four times this period value */
245static inline __u8
246NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
247 __u8 offset, __u8 period)
248{
249 int XFERP;
250
251 __u8 min_xferp = (hostdata->chip710
252 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
253 __u8 max_offset = (hostdata->chip710
254 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
255
256 if(offset == 0)
257 return 0;
258
259 if(period < hostdata->min_period) {
6ea3c0b2 260 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
1da177e4
LT
261 period = hostdata->min_period;
262 }
263 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
264 if(offset > max_offset) {
265 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
266 offset, max_offset);
267 offset = max_offset;
268 }
269 if(XFERP < min_xferp) {
1da177e4
LT
270 XFERP = min_xferp;
271 }
272 return (offset & 0x0f) | (XFERP & 0x07)<<4;
273}
274
275static inline __u8
276NCR_700_get_SXFER(struct scsi_device *SDp)
277{
278 struct NCR_700_Host_Parameters *hostdata =
279 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
280
281 return NCR_700_offset_period_to_sxfer(hostdata,
282 spi_offset(SDp->sdev_target),
283 spi_period(SDp->sdev_target));
284}
285
286struct Scsi_Host *
287NCR_700_detect(struct scsi_host_template *tpnt,
288 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
289{
290 dma_addr_t pScript, pSlots;
291 __u8 *memory;
292 __u32 *script;
293 struct Scsi_Host *host;
294 static int banner = 0;
295 int j;
296
297 if(tpnt->sdev_attrs == NULL)
298 tpnt->sdev_attrs = NCR_700_dev_attrs;
299
300 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
301 &pScript, GFP_KERNEL);
302 if(memory == NULL) {
303 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
304 return NULL;
305 }
306
307 script = (__u32 *)memory;
308 hostdata->msgin = memory + MSGIN_OFFSET;
309 hostdata->msgout = memory + MSGOUT_OFFSET;
310 hostdata->status = memory + STATUS_OFFSET;
311 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
312 * if this isn't sufficient separation to avoid dma flushing issues */
f67637ee 313 BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
1da177e4
LT
314 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
315 hostdata->dev = dev;
6391a113 316
1da177e4
LT
317 pSlots = pScript + SLOTS_OFFSET;
318
319 /* Fill in the missing routines from the host template */
320 tpnt->queuecommand = NCR_700_queuecommand;
321 tpnt->eh_abort_handler = NCR_700_abort;
1da177e4
LT
322 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
323 tpnt->eh_host_reset_handler = NCR_700_host_reset;
324 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
325 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
326 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
327 tpnt->use_clustering = ENABLE_CLUSTERING;
328 tpnt->slave_configure = NCR_700_slave_configure;
329 tpnt->slave_destroy = NCR_700_slave_destroy;
0f13fc09 330 tpnt->slave_alloc = NCR_700_slave_alloc;
1da177e4
LT
331 tpnt->change_queue_depth = NCR_700_change_queue_depth;
332 tpnt->change_queue_type = NCR_700_change_queue_type;
6391a113 333
1da177e4
LT
334 if(tpnt->name == NULL)
335 tpnt->name = "53c700";
336 if(tpnt->proc_name == NULL)
337 tpnt->proc_name = "53c700";
1da177e4
LT
338
339 host = scsi_host_alloc(tpnt, 4);
340 if (!host)
341 return NULL;
342 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
343 * NCR_700_COMMAND_SLOTS_PER_HOST);
6391a113 344 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
1da177e4
LT
345 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
346 - (unsigned long)&hostdata->slots[0].SG[0]);
347 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
348 if(j == 0)
349 hostdata->free_list = &hostdata->slots[j];
350 else
351 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
352 hostdata->slots[j].state = NCR_700_SLOT_FREE;
353 }
354
6391a113 355 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
1da177e4 356 script[j] = bS_to_host(SCRIPT[j]);
1da177e4
LT
357
358 /* adjust all labels to be bus physical */
6391a113 359 for (j = 0; j < PATCHES; j++)
1da177e4 360 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
1da177e4 361 /* now patch up fixed addresses. */
d3fa72e4 362 script_patch_32(hostdata->dev, script, MessageLocation,
1da177e4 363 pScript + MSGOUT_OFFSET);
d3fa72e4 364 script_patch_32(hostdata->dev, script, StatusAddress,
1da177e4 365 pScript + STATUS_OFFSET);
d3fa72e4 366 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
1da177e4
LT
367 pScript + MSGIN_OFFSET);
368
369 hostdata->script = script;
370 hostdata->pScript = pScript;
371 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
372 hostdata->state = NCR_700_HOST_FREE;
373 hostdata->cmd = NULL;
2b89dad0 374 host->max_id = 8;
1da177e4
LT
375 host->max_lun = NCR_700_MAX_LUNS;
376 BUG_ON(NCR_700_transport_template == NULL);
377 host->transportt = NCR_700_transport_template;
56fece20 378 host->unique_id = (unsigned long)hostdata->base;
1da177e4
LT
379 hostdata->eh_complete = NULL;
380 host->hostdata[0] = (unsigned long)hostdata;
381 /* kick the chip */
382 NCR_700_writeb(0xff, host, CTEST9_REG);
6391a113 383 if (hostdata->chip710)
1da177e4
LT
384 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
385 else
386 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
387 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
6391a113 388 if (banner == 0) {
1da177e4
LT
389 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
390 banner = 1;
391 }
392 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
6391a113 393 hostdata->chip710 ? "53c710" :
1da177e4
LT
394 (hostdata->fast ? "53c700-66" : "53c700"),
395 hostdata->rev, hostdata->differential ?
396 "(Differential)" : "");
397 /* reset the chip */
398 NCR_700_chip_reset(host);
399
400 if (scsi_add_host(host, dev)) {
401 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
402 scsi_host_put(host);
403 return NULL;
404 }
405
406 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
407 SPI_SIGNAL_SE;
408
409 return host;
410}
411
412int
413NCR_700_release(struct Scsi_Host *host)
414{
415 struct NCR_700_Host_Parameters *hostdata =
416 (struct NCR_700_Host_Parameters *)host->hostdata[0];
417
418 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
419 hostdata->script, hostdata->pScript);
420 return 1;
421}
422
423static inline __u8
424NCR_700_identify(int can_disconnect, __u8 lun)
425{
426 return IDENTIFY_BASE |
427 ((can_disconnect) ? 0x40 : 0) |
428 (lun & NCR_700_LUN_MASK);
429}
430
431/*
432 * Function : static int data_residual (Scsi_Host *host)
433 *
434 * Purpose : return residual data count of what's in the chip. If you
435 * really want to know what this function is doing, it's almost a
436 * direct transcription of the algorithm described in the 53c710
437 * guide, except that the DBC and DFIFO registers are only 6 bits
438 * wide on a 53c700.
439 *
440 * Inputs : host - SCSI host */
441static inline int
442NCR_700_data_residual (struct Scsi_Host *host) {
443 struct NCR_700_Host_Parameters *hostdata =
444 (struct NCR_700_Host_Parameters *)host->hostdata[0];
445 int count, synchronous = 0;
446 unsigned int ddir;
447
448 if(hostdata->chip710) {
449 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
450 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
451 } else {
452 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
453 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
454 }
455
456 if(hostdata->fast)
457 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
458
459 /* get the data direction */
460 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
461
462 if (ddir) {
463 /* Receive */
464 if (synchronous)
465 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
466 else
467 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
468 ++count;
469 } else {
470 /* Send */
471 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
472 if (sstat & SODL_REG_FULL)
473 ++count;
474 if (synchronous && (sstat & SODR_REG_FULL))
475 ++count;
476 }
477#ifdef NCR_700_DEBUG
478 if(count)
479 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
480#endif
481 return count;
482}
483
484/* print out the SCSI wires and corresponding phase from the SBCL register
485 * in the chip */
486static inline char *
487sbcl_to_string(__u8 sbcl)
488{
489 int i;
490 static char ret[256];
491
492 ret[0]='\0';
493 for(i=0; i<8; i++) {
494 if((1<<i) & sbcl)
495 strcat(ret, NCR_700_SBCL_bits[i]);
496 }
497 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
498 return ret;
499}
500
501static inline __u8
502bitmap_to_number(__u8 bitmap)
503{
504 __u8 i;
505
506 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
507 ;
508 return i;
509}
510
511/* Pull a slot off the free list */
512STATIC struct NCR_700_command_slot *
513find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
514{
515 struct NCR_700_command_slot *slot = hostdata->free_list;
516
517 if(slot == NULL) {
518 /* sanity check */
519 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
520 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
521 return NULL;
522 }
523
524 if(slot->state != NCR_700_SLOT_FREE)
525 /* should panic! */
526 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
527
528
529 hostdata->free_list = slot->ITL_forw;
530 slot->ITL_forw = NULL;
531
532
533 /* NOTE: set the state to busy here, not queued, since this
534 * indicates the slot is in use and cannot be run by the IRQ
535 * finish routine. If we cannot queue the command when it
536 * is properly build, we then change to NCR_700_SLOT_QUEUED */
537 slot->state = NCR_700_SLOT_BUSY;
67d59dfd 538 slot->flags = 0;
1da177e4
LT
539 hostdata->command_slot_count++;
540
541 return slot;
542}
543
544STATIC void
545free_slot(struct NCR_700_command_slot *slot,
546 struct NCR_700_Host_Parameters *hostdata)
547{
548 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
549 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
550 }
551 if(slot->state == NCR_700_SLOT_FREE) {
552 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
553 }
554
555 slot->resume_offset = 0;
556 slot->cmnd = NULL;
557 slot->state = NCR_700_SLOT_FREE;
558 slot->ITL_forw = hostdata->free_list;
559 hostdata->free_list = slot;
560 hostdata->command_slot_count--;
561}
562
563
564/* This routine really does very little. The command is indexed on
565 the ITL and (if tagged) the ITLQ lists in _queuecommand */
566STATIC void
567save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
568 struct scsi_cmnd *SCp, __u32 dsp)
569{
570 /* Its just possible that this gets executed twice */
571 if(SCp != NULL) {
572 struct NCR_700_command_slot *slot =
573 (struct NCR_700_command_slot *)SCp->host_scribble;
574
575 slot->resume_offset = dsp;
576 }
577 hostdata->state = NCR_700_HOST_FREE;
578 hostdata->cmd = NULL;
579}
580
581STATIC inline void
582NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
583 struct NCR_700_command_slot *slot)
584{
585 if(SCp->sc_data_direction != DMA_NONE &&
3258a4d5
FT
586 SCp->sc_data_direction != DMA_BIDIRECTIONAL)
587 scsi_dma_unmap(SCp);
1da177e4
LT
588}
589
590STATIC inline void
591NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
592 struct scsi_cmnd *SCp, int result)
593{
594 hostdata->state = NCR_700_HOST_FREE;
595 hostdata->cmd = NULL;
596
597 if(SCp != NULL) {
598 struct NCR_700_command_slot *slot =
599 (struct NCR_700_command_slot *)SCp->host_scribble;
600
0f13fc09 601 dma_unmap_single(hostdata->dev, slot->pCmd,
64a87b24 602 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
67d59dfd 603 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
0f13fc09 604 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
1da177e4
LT
605#ifdef NCR_700_DEBUG
606 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
607 SCp, SCp->cmnd[7], result);
608 scsi_print_sense("53c700", SCp);
609
610#endif
b80ca4f7
FT
611 dma_unmap_single(hostdata->dev, slot->dma_handle,
612 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1da177e4
LT
613 /* restore the old result if the request sense was
614 * successful */
c603d04e 615 if (result == 0)
0f13fc09 616 result = cmnd[7];
c603d04e
JB
617 /* restore the original length */
618 SCp->cmd_len = cmnd[8];
67d59dfd 619 } else
0f13fc09 620 NCR_700_unmap(hostdata, SCp, slot);
67d59dfd 621
1da177e4
LT
622 free_slot(slot, hostdata);
623#ifdef NCR_700_DEBUG
624 if(NCR_700_get_depth(SCp->device) == 0 ||
625 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
626 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
627 NCR_700_get_depth(SCp->device));
628#endif /* NCR_700_DEBUG */
629 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
630
631 SCp->host_scribble = NULL;
632 SCp->result = result;
633 SCp->scsi_done(SCp);
634 } else {
635 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
636 }
637}
638
639
640STATIC void
641NCR_700_internal_bus_reset(struct Scsi_Host *host)
642{
643 /* Bus reset */
644 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
645 udelay(50);
646 NCR_700_writeb(0, host, SCNTL1_REG);
647
648}
649
650STATIC void
651NCR_700_chip_setup(struct Scsi_Host *host)
652{
653 struct NCR_700_Host_Parameters *hostdata =
654 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1da177e4
LT
655 __u8 min_period;
656 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
657
658 if(hostdata->chip710) {
f67a9c15
TB
659 __u8 burst_disable = 0;
660 __u8 burst_length = 0;
661
662 switch (hostdata->burst_length) {
663 case 1:
664 burst_length = BURST_LENGTH_1;
665 break;
666 case 2:
667 burst_length = BURST_LENGTH_2;
668 break;
669 case 4:
670 burst_length = BURST_LENGTH_4;
671 break;
672 case 8:
673 burst_length = BURST_LENGTH_8;
674 break;
675 default:
676 burst_disable = BURST_DISABLE;
677 break;
678 }
63273134 679 hostdata->dcntl_extra |= COMPAT_700_MODE;
1da177e4 680
63273134 681 NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
f67a9c15 682 NCR_700_writeb(burst_length | hostdata->dmode_extra,
1da177e4 683 host, DMODE_710_REG);
63273134
KJ
684 NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
685 (hostdata->differential ? DIFF : 0),
686 host, CTEST7_REG);
1da177e4
LT
687 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
688 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
689 | AUTO_ATN, host, SCNTL0_REG);
690 } else {
691 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
692 host, DMODE_700_REG);
693 NCR_700_writeb(hostdata->differential ?
694 DIFF : 0, host, CTEST7_REG);
695 if(hostdata->fast) {
696 /* this is for 700-66, does nothing on 700 */
697 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
698 | GENERATE_RECEIVE_PARITY, host,
699 CTEST8_REG);
700 } else {
701 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
702 | PARITY | AUTO_ATN, host, SCNTL0_REG);
703 }
704 }
705
706 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
707 NCR_700_writeb(0, host, SBCL_REG);
708 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
709
710 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
711 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
712
713 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
714 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
715 if(hostdata->clock > 75) {
716 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
717 /* do the best we can, but the async clock will be out
718 * of spec: sync divider 2, async divider 3 */
719 DEBUG(("53c700: sync 2 async 3\n"));
720 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
63273134 721 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
1da177e4
LT
722 hostdata->sync_clock = hostdata->clock/2;
723 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
724 /* sync divider 1.5, async divider 3 */
725 DEBUG(("53c700: sync 1.5 async 3\n"));
726 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
63273134 727 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
1da177e4
LT
728 hostdata->sync_clock = hostdata->clock*2;
729 hostdata->sync_clock /= 3;
730
731 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
732 /* sync divider 1, async divider 2 */
733 DEBUG(("53c700: sync 1 async 2\n"));
734 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
63273134 735 NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
1da177e4
LT
736 hostdata->sync_clock = hostdata->clock;
737 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
738 /* sync divider 1, async divider 1.5 */
739 DEBUG(("53c700: sync 1 async 1.5\n"));
740 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
63273134 741 NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
1da177e4
LT
742 hostdata->sync_clock = hostdata->clock;
743 } else {
744 DEBUG(("53c700: sync 1 async 1\n"));
745 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
63273134 746 NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
1da177e4
LT
747 /* sync divider 1, async divider 1 */
748 hostdata->sync_clock = hostdata->clock;
749 }
750 /* Calculate the actual minimum period that can be supported
751 * by our synchronous clock speed. See the 710 manual for
752 * exact details of this calculation which is based on a
753 * setting of the SXFER register */
754 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
755 hostdata->min_period = NCR_700_MIN_PERIOD;
756 if(min_period > NCR_700_MIN_PERIOD)
757 hostdata->min_period = min_period;
758}
759
760STATIC void
761NCR_700_chip_reset(struct Scsi_Host *host)
762{
763 struct NCR_700_Host_Parameters *hostdata =
764 (struct NCR_700_Host_Parameters *)host->hostdata[0];
765 if(hostdata->chip710) {
766 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
767 udelay(100);
768
769 NCR_700_writeb(0, host, ISTAT_REG);
770 } else {
771 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
772 udelay(100);
773
774 NCR_700_writeb(0, host, DCNTL_REG);
775 }
776
777 mdelay(1000);
778
779 NCR_700_chip_setup(host);
780}
781
782/* The heart of the message processing engine is that the instruction
783 * immediately after the INT is the normal case (and so must be CLEAR
784 * ACK). If we want to do something else, we call that routine in
785 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
786 * ACK) so that the routine returns correctly to resume its activity
787 * */
788STATIC __u32
789process_extended_message(struct Scsi_Host *host,
790 struct NCR_700_Host_Parameters *hostdata,
791 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
792{
793 __u32 resume_offset = dsp, temp = dsp + 8;
794 __u8 pun = 0xff, lun = 0xff;
795
796 if(SCp != NULL) {
797 pun = SCp->device->id;
798 lun = SCp->device->lun;
799 }
800
801 switch(hostdata->msgin[2]) {
802 case A_SDTR_MSG:
803 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
804 struct scsi_target *starget = SCp->device->sdev_target;
805 __u8 period = hostdata->msgin[3];
806 __u8 offset = hostdata->msgin[4];
807
808 if(offset == 0 || period == 0) {
809 offset = 0;
810 period = 0;
811 }
812
813 spi_offset(starget) = offset;
814 spi_period(starget) = period;
815
816 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
817 spi_display_xfer_agreement(starget);
818 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
819 }
820
821 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
822 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
823
824 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
825 host, SXFER_REG);
826
827 } else {
828 /* SDTR message out of the blue, reject it */
017560fc
JG
829 shost_printk(KERN_WARNING, host,
830 "Unexpected SDTR msg\n");
1da177e4 831 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
832 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
833 script_patch_16(hostdata->dev, hostdata->script,
834 MessageCount, 1);
1da177e4
LT
835 /* SendMsgOut returns, so set up the return
836 * address */
837 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
838 }
839 break;
840
841 case A_WDTR_MSG:
842 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
843 host->host_no, pun, lun);
844 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
845 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
846 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
847 1);
1da177e4
LT
848 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
849
850 break;
851
852 default:
853 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
854 host->host_no, pun, lun,
855 NCR_700_phase[(dsps & 0xf00) >> 8]);
1abfd370 856 spi_print_msg(hostdata->msgin);
1da177e4
LT
857 printk("\n");
858 /* just reject it */
859 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
860 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
861 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
862 1);
1da177e4
LT
863 /* SendMsgOut returns, so set up the return
864 * address */
865 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
866 }
867 NCR_700_writel(temp, host, TEMP_REG);
868 return resume_offset;
869}
870
871STATIC __u32
872process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
873 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
874{
875 /* work out where to return to */
876 __u32 temp = dsp + 8, resume_offset = dsp;
877 __u8 pun = 0xff, lun = 0xff;
878
879 if(SCp != NULL) {
880 pun = SCp->device->id;
881 lun = SCp->device->lun;
882 }
883
884#ifdef NCR_700_DEBUG
885 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
886 NCR_700_phase[(dsps & 0xf00) >> 8]);
1abfd370 887 spi_print_msg(hostdata->msgin);
1da177e4
LT
888 printk("\n");
889#endif
890
891 switch(hostdata->msgin[0]) {
892
893 case A_EXTENDED_MSG:
894 resume_offset = process_extended_message(host, hostdata, SCp,
895 dsp, dsps);
896 break;
897
898 case A_REJECT_MSG:
899 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
900 /* Rejected our sync negotiation attempt */
901 spi_period(SCp->device->sdev_target) =
902 spi_offset(SCp->device->sdev_target) = 0;
903 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
904 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
905 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
906 /* rejected our first simple tag message */
017560fc
JG
907 scmd_printk(KERN_WARNING, SCp,
908 "Rejected first tag queue attempt, turning off tag queueing\n");
1da177e4
LT
909 /* we're done negotiating */
910 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
017560fc 911 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
912 SCp->device->tagged_supported = 0;
913 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
914 } else {
017560fc
JG
915 shost_printk(KERN_WARNING, host,
916 "(%d:%d) Unexpected REJECT Message %s\n",
917 pun, lun,
1da177e4
LT
918 NCR_700_phase[(dsps & 0xf00) >> 8]);
919 /* however, just ignore it */
920 }
921 break;
922
923 case A_PARITY_ERROR_MSG:
924 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
925 pun, lun);
926 NCR_700_internal_bus_reset(host);
927 break;
928 case A_SIMPLE_TAG_MSG:
929 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
930 pun, lun, hostdata->msgin[1],
931 NCR_700_phase[(dsps & 0xf00) >> 8]);
932 /* just ignore it */
933 break;
934 default:
935 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
936 host->host_no, pun, lun,
937 NCR_700_phase[(dsps & 0xf00) >> 8]);
938
1abfd370 939 spi_print_msg(hostdata->msgin);
1da177e4
LT
940 printk("\n");
941 /* just reject it */
942 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
943 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
944 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
945 1);
1da177e4
LT
946 /* SendMsgOut returns, so set up the return
947 * address */
948 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
949
950 break;
951 }
952 NCR_700_writel(temp, host, TEMP_REG);
953 /* set us up to receive another message */
d3fa72e4 954 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1da177e4
LT
955 return resume_offset;
956}
957
958STATIC __u32
959process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
960 struct Scsi_Host *host,
961 struct NCR_700_Host_Parameters *hostdata)
962{
963 __u32 resume_offset = 0;
964 __u8 pun = 0xff, lun=0xff;
965
966 if(SCp != NULL) {
967 pun = SCp->device->id;
968 lun = SCp->device->lun;
969 }
970
971 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
972 DEBUG((" COMMAND COMPLETE, status=%02x\n",
973 hostdata->status[0]));
974 /* OK, if TCQ still under negotiation, we now know it works */
975 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
976 NCR_700_set_tag_neg_state(SCp->device,
977 NCR_700_FINISHED_TAG_NEGOTIATION);
978
979 /* check for contingent allegiance contitions */
980 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
981 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
982 struct NCR_700_command_slot *slot =
983 (struct NCR_700_command_slot *)SCp->host_scribble;
0f13fc09 984 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
1da177e4
LT
985 /* OOPS: bad device, returning another
986 * contingent allegiance condition */
017560fc
JG
987 scmd_printk(KERN_ERR, SCp,
988 "broken device is looping in contingent allegiance: ignoring\n");
1da177e4
LT
989 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
990 } else {
0f13fc09
JB
991 char *cmnd =
992 NCR_700_get_sense_cmnd(SCp->device);
1da177e4
LT
993#ifdef NCR_DEBUG
994 scsi_print_command(SCp);
995 printk(" cmd %p has status %d, requesting sense\n",
996 SCp, hostdata->status[0]);
997#endif
998 /* we can destroy the command here
999 * because the contingent allegiance
1000 * condition will cause a retry which
1001 * will re-copy the command from the
1002 * saved data_cmnd. We also unmap any
1003 * data associated with the command
1004 * here */
1005 NCR_700_unmap(hostdata, SCp, slot);
67d59dfd 1006 dma_unmap_single(hostdata->dev, slot->pCmd,
64a87b24 1007 MAX_COMMAND_SIZE,
67d59dfd
JB
1008 DMA_TO_DEVICE);
1009
0f13fc09
JB
1010 cmnd[0] = REQUEST_SENSE;
1011 cmnd[1] = (SCp->device->lun & 0x7) << 5;
1012 cmnd[2] = 0;
1013 cmnd[3] = 0;
b80ca4f7 1014 cmnd[4] = SCSI_SENSE_BUFFERSIZE;
0f13fc09 1015 cmnd[5] = 0;
1da177e4
LT
1016 /* Here's a quiet hack: the
1017 * REQUEST_SENSE command is six bytes,
1018 * so store a flag indicating that
1019 * this was an internal sense request
1020 * and the original status at the end
1021 * of the command */
0f13fc09
JB
1022 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1023 cmnd[7] = hostdata->status[0];
c603d04e
JB
1024 cmnd[8] = SCp->cmd_len;
1025 SCp->cmd_len = 6; /* command length for
1026 * REQUEST_SENSE */
0f13fc09 1027 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
b80ca4f7
FT
1028 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1029 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1030 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1031 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1032 slot->SG[1].pAddr = 0;
1033 slot->resume_offset = hostdata->pScript;
d3fa72e4 1034 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
b80ca4f7 1035 dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
d3fa72e4 1036
1da177e4
LT
1037 /* queue the command for reissue */
1038 slot->state = NCR_700_SLOT_QUEUED;
67d59dfd 1039 slot->flags = NCR_700_FLAG_AUTOSENSE;
1da177e4
LT
1040 hostdata->state = NCR_700_HOST_FREE;
1041 hostdata->cmd = NULL;
1042 }
1043 } else {
1044 // Currently rely on the mid layer evaluation
1045 // of the tag queuing capability
1046 //
1047 //if(status_byte(hostdata->status[0]) == GOOD &&
1048 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1049 // /* Piggy back the tag queueing support
1050 // * on this command */
1051 // dma_sync_single_for_cpu(hostdata->dev,
1052 // slot->dma_handle,
1053 // SCp->request_bufflen,
1054 // DMA_FROM_DEVICE);
1055 // if(((char *)SCp->request_buffer)[7] & 0x02) {
017560fc
JG
1056 // scmd_printk(KERN_INFO, SCp,
1057 // "Enabling Tag Command Queuing\n");
1058 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1da177e4
LT
1059 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1060 // } else {
1061 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
017560fc 1062 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
1063 // }
1064 //}
1065 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1066 }
1067 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1068 __u8 i = (dsps & 0xf00) >> 8;
1069
017560fc 1070 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1da177e4
LT
1071 NCR_700_phase[i],
1072 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
017560fc
JG
1073 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1074 SCp->cmd_len);
1da177e4
LT
1075 scsi_print_command(SCp);
1076
1077 NCR_700_internal_bus_reset(host);
1078 } else if((dsps & 0xfffff000) == A_FATAL) {
1079 int i = (dsps & 0xfff);
1080
1081 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1082 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1083 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1084 printk(KERN_ERR " msg begins %02x %02x\n",
1085 hostdata->msgin[0], hostdata->msgin[1]);
1086 }
1087 NCR_700_internal_bus_reset(host);
1088 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1089#ifdef NCR_700_DEBUG
1090 __u8 i = (dsps & 0xf00) >> 8;
1091
1092 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1093 host->host_no, pun, lun,
1094 i, NCR_700_phase[i]);
1095#endif
1096 save_for_reselection(hostdata, SCp, dsp);
1097
1098 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1099 __u8 lun;
1100 struct NCR_700_command_slot *slot;
1101 __u8 reselection_id = hostdata->reselection_id;
1102 struct scsi_device *SDp;
1103
1104 lun = hostdata->msgin[0] & 0x1f;
1105
1106 hostdata->reselection_id = 0xff;
1107 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1108 host->host_no, reselection_id, lun));
1109 /* clear the reselection indicator */
1110 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1111 if(unlikely(SDp == NULL)) {
1112 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1113 host->host_no, reselection_id, lun);
1114 BUG();
1115 }
1116 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1117 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1118 if(unlikely(SCp == NULL)) {
1119 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1120 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1121 BUG();
1122 }
1123
1124 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
017560fc
JG
1125 DDEBUG(KERN_DEBUG, SDp,
1126 "reselection is tag %d, slot %p(%d)\n",
1127 hostdata->msgin[2], slot, slot->tag);
1da177e4
LT
1128 } else {
1129 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1130 if(unlikely(SCp == NULL)) {
017560fc
JG
1131 sdev_printk(KERN_ERR, SDp,
1132 "no saved request for untagged cmd\n");
1da177e4
LT
1133 BUG();
1134 }
1135 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1136 }
1137
1138 if(slot == NULL) {
1139 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1140 host->host_no, reselection_id, lun,
1141 hostdata->msgin[0], hostdata->msgin[1],
1142 hostdata->msgin[2]);
1143 } else {
1144 if(hostdata->state != NCR_700_HOST_BUSY)
1145 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1146 host->host_no);
1147 resume_offset = slot->resume_offset;
1148 hostdata->cmd = slot->cmnd;
1149
1150 /* re-patch for this command */
d3fa72e4
RB
1151 script_patch_32_abs(hostdata->dev, hostdata->script,
1152 CommandAddress, slot->pCmd);
1153 script_patch_16(hostdata->dev, hostdata->script,
1da177e4 1154 CommandCount, slot->cmnd->cmd_len);
d3fa72e4
RB
1155 script_patch_32_abs(hostdata->dev, hostdata->script,
1156 SGScriptStartAddress,
1da177e4
LT
1157 to32bit(&slot->pSG[0].ins));
1158
1159 /* Note: setting SXFER only works if we're
1160 * still in the MESSAGE phase, so it is vital
1161 * that ACK is still asserted when we process
1162 * the reselection message. The resume offset
1163 * should therefore always clear ACK */
1164 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1165 host, SXFER_REG);
d3fa72e4 1166 dma_cache_sync(hostdata->dev, hostdata->msgin,
1da177e4 1167 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
d3fa72e4 1168 dma_cache_sync(hostdata->dev, hostdata->msgout,
1da177e4
LT
1169 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1170 /* I'm just being paranoid here, the command should
1171 * already have been flushed from the cache */
d3fa72e4 1172 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1da177e4
LT
1173 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1174
1175
1176
1177 }
1178 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1179
1180 /* This section is full of debugging code because I've
1181 * never managed to reach it. I think what happens is
1182 * that, because the 700 runs with selection
1183 * interrupts enabled the whole time that we take a
1184 * selection interrupt before we manage to get to the
1185 * reselected script interrupt */
1186
1187 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1188 struct NCR_700_command_slot *slot;
1189
1190 /* Take out our own ID */
1191 reselection_id &= ~(1<<host->this_id);
1192
1193 /* I've never seen this happen, so keep this as a printk rather
1194 * than a debug */
1195 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1196 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1197
1198 {
1199 /* FIXME: DEBUGGING CODE */
1200 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1201 int i;
1202
1203 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1204 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1205 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1206 break;
1207 }
1208 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1209 SCp = hostdata->slots[i].cmnd;
1210 }
1211
1212 if(SCp != NULL) {
1213 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1214 /* change slot from busy to queued to redo command */
1215 slot->state = NCR_700_SLOT_QUEUED;
1216 }
1217 hostdata->cmd = NULL;
1218
1219 if(reselection_id == 0) {
1220 if(hostdata->reselection_id == 0xff) {
1221 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1222 return 0;
1223 } else {
1224 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1225 host->host_no);
1226 reselection_id = hostdata->reselection_id;
1227 }
1228 } else {
1229
1230 /* convert to real ID */
1231 reselection_id = bitmap_to_number(reselection_id);
1232 }
1233 hostdata->reselection_id = reselection_id;
1234 /* just in case we have a stale simple tag message, clear it */
1235 hostdata->msgin[1] = 0;
d3fa72e4 1236 dma_cache_sync(hostdata->dev, hostdata->msgin,
1da177e4
LT
1237 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1238 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1239 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1240 } else {
1241 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1242 }
1243 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1244 /* we've just disconnected from the bus, do nothing since
1245 * a return here will re-run the queued command slot
1246 * that may have been interrupted by the initial selection */
1247 DEBUG((" SELECTION COMPLETED\n"));
1248 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1249 resume_offset = process_message(host, hostdata, SCp,
1250 dsp, dsps);
1251 } else if((dsps & 0xfffff000) == 0) {
1252 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1253 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1254 host->host_no, pun, lun, NCR_700_condition[i],
1255 NCR_700_phase[j], dsp - hostdata->pScript);
1256 if(SCp != NULL) {
3258a4d5 1257 struct scatterlist *sg;
1da177e4 1258
3258a4d5
FT
1259 scsi_print_command(SCp);
1260 scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1261 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1da177e4 1262 }
3258a4d5 1263 }
1da177e4
LT
1264 NCR_700_internal_bus_reset(host);
1265 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1266 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1267 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1268 resume_offset = dsp;
1269 } else {
1270 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1271 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1272 NCR_700_internal_bus_reset(host);
1273 }
1274 return resume_offset;
1275}
1276
1277/* We run the 53c700 with selection interrupts always enabled. This
1278 * means that the chip may be selected as soon as the bus frees. On a
1279 * busy bus, this can be before the scripts engine finishes its
1280 * processing. Therefore, part of the selection processing has to be
1281 * to find out what the scripts engine is doing and complete the
1282 * function if necessary (i.e. process the pending disconnect or save
1283 * the interrupted initial selection */
1284STATIC inline __u32
1285process_selection(struct Scsi_Host *host, __u32 dsp)
1286{
1287 __u8 id = 0; /* Squash compiler warning */
1288 int count = 0;
1289 __u32 resume_offset = 0;
1290 struct NCR_700_Host_Parameters *hostdata =
1291 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1292 struct scsi_cmnd *SCp = hostdata->cmd;
1293 __u8 sbcl;
1294
1295 for(count = 0; count < 5; count++) {
1296 id = NCR_700_readb(host, hostdata->chip710 ?
1297 CTEST9_REG : SFBR_REG);
1298
1299 /* Take out our own ID */
1300 id &= ~(1<<host->this_id);
1301 if(id != 0)
1302 break;
1303 udelay(5);
1304 }
1305 sbcl = NCR_700_readb(host, SBCL_REG);
1306 if((sbcl & SBCL_IO) == 0) {
1307 /* mark as having been selected rather than reselected */
1308 id = 0xff;
1309 } else {
1310 /* convert to real ID */
1311 hostdata->reselection_id = id = bitmap_to_number(id);
1312 DEBUG(("scsi%d: Reselected by %d\n",
1313 host->host_no, id));
1314 }
1315 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1316 struct NCR_700_command_slot *slot =
1317 (struct NCR_700_command_slot *)SCp->host_scribble;
1318 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1319
1320 switch(dsp - hostdata->pScript) {
1321 case Ent_Disconnect1:
1322 case Ent_Disconnect2:
1323 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1324 break;
1325 case Ent_Disconnect3:
1326 case Ent_Disconnect4:
1327 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1328 break;
1329 case Ent_Disconnect5:
1330 case Ent_Disconnect6:
1331 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1332 break;
1333 case Ent_Disconnect7:
1334 case Ent_Disconnect8:
1335 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1336 break;
1337 case Ent_Finish1:
1338 case Ent_Finish2:
1339 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1340 break;
1341
1342 default:
1343 slot->state = NCR_700_SLOT_QUEUED;
1344 break;
1345 }
1346 }
1347 hostdata->state = NCR_700_HOST_BUSY;
1348 hostdata->cmd = NULL;
1349 /* clear any stale simple tag message */
1350 hostdata->msgin[1] = 0;
d3fa72e4 1351 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1da177e4
LT
1352 DMA_BIDIRECTIONAL);
1353
1354 if(id == 0xff) {
1355 /* Selected as target, Ignore */
1356 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1357 } else if(hostdata->tag_negotiated & (1<<id)) {
1358 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1359 } else {
1360 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1361 }
1362 return resume_offset;
1363}
1364
1365static inline void
1366NCR_700_clear_fifo(struct Scsi_Host *host) {
1367 const struct NCR_700_Host_Parameters *hostdata
1368 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1369 if(hostdata->chip710) {
1370 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1371 } else {
1372 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1373 }
1374}
1375
1376static inline void
1377NCR_700_flush_fifo(struct Scsi_Host *host) {
1378 const struct NCR_700_Host_Parameters *hostdata
1379 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1380 if(hostdata->chip710) {
1381 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1382 udelay(10);
1383 NCR_700_writeb(0, host, CTEST8_REG);
1384 } else {
1385 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1386 udelay(10);
1387 NCR_700_writeb(0, host, DFIFO_REG);
1388 }
1389}
1390
1391
1392/* The queue lock with interrupts disabled must be held on entry to
1393 * this function */
1394STATIC int
1395NCR_700_start_command(struct scsi_cmnd *SCp)
1396{
1397 struct NCR_700_command_slot *slot =
1398 (struct NCR_700_command_slot *)SCp->host_scribble;
1399 struct NCR_700_Host_Parameters *hostdata =
1400 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1401 __u16 count = 1; /* for IDENTIFY message */
1402
1403 if(hostdata->state != NCR_700_HOST_FREE) {
1404 /* keep this inside the lock to close the race window where
1405 * the running command finishes on another CPU while we don't
1406 * change the state to queued on this one */
1407 slot->state = NCR_700_SLOT_QUEUED;
1408
1409 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1410 SCp->device->host->host_no, slot->cmnd, slot));
1411 return 0;
1412 }
1413 hostdata->state = NCR_700_HOST_BUSY;
1414 hostdata->cmd = SCp;
1415 slot->state = NCR_700_SLOT_BUSY;
1416 /* keep interrupts disabled until we have the command correctly
1417 * set up so we cannot take a selection interrupt */
1418
67d59dfd
JB
1419 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1420 slot->flags != NCR_700_FLAG_AUTOSENSE),
1da177e4
LT
1421 SCp->device->lun);
1422 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1423 * if the negotiated transfer parameters still hold, so
1424 * always renegotiate them */
67d59dfd
JB
1425 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1426 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1da177e4
LT
1427 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1428 }
1429
1430 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1431 * If a contingent allegiance condition exists, the device
1432 * will refuse all tags, so send the request sense as untagged
1433 * */
422c0d61 1434 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
67d59dfd
JB
1435 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1436 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1da177e4
LT
1437 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1438 }
1439
1440 if(hostdata->fast &&
1441 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
6ea3c0b2
MW
1442 count += spi_populate_sync_msg(&hostdata->msgout[count],
1443 spi_period(SCp->device->sdev_target),
1444 spi_offset(SCp->device->sdev_target));
1da177e4
LT
1445 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1446 }
1447
d3fa72e4 1448 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1da177e4
LT
1449
1450
d3fa72e4 1451 script_patch_ID(hostdata->dev, hostdata->script,
422c0d61 1452 Device_ID, 1<<scmd_id(SCp));
1da177e4 1453
d3fa72e4 1454 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1da177e4 1455 slot->pCmd);
d3fa72e4
RB
1456 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1457 SCp->cmd_len);
1da177e4
LT
1458 /* finally plumb the beginning of the SG list into the script
1459 * */
d3fa72e4
RB
1460 script_patch_32_abs(hostdata->dev, hostdata->script,
1461 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1da177e4
LT
1462 NCR_700_clear_fifo(SCp->device->host);
1463
1464 if(slot->resume_offset == 0)
1465 slot->resume_offset = hostdata->pScript;
1466 /* now perform all the writebacks and invalidates */
d3fa72e4
RB
1467 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1468 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1da177e4 1469 DMA_FROM_DEVICE);
d3fa72e4
RB
1470 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1471 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1da177e4
LT
1472
1473 /* set the synchronous period/offset */
1474 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1475 SCp->device->host, SXFER_REG);
1476 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1477 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1478
1479 return 1;
1480}
1481
1482irqreturn_t
7d12e780 1483NCR_700_intr(int irq, void *dev_id)
1da177e4
LT
1484{
1485 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1486 struct NCR_700_Host_Parameters *hostdata =
1487 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1488 __u8 istat;
1489 __u32 resume_offset = 0;
1490 __u8 pun = 0xff, lun = 0xff;
1491 unsigned long flags;
1492 int handled = 0;
1493
af901ca1 1494 /* Use the host lock to serialise access to the 53c700
1da177e4
LT
1495 * hardware. Note: In future, we may need to take the queue
1496 * lock to enter the done routines. When that happens, we
1497 * need to ensure that for this driver, the host lock and the
1498 * queue lock point to the same thing. */
1499 spin_lock_irqsave(host->host_lock, flags);
1500 if((istat = NCR_700_readb(host, ISTAT_REG))
1501 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1502 __u32 dsps;
1503 __u8 sstat0 = 0, dstat = 0;
1504 __u32 dsp;
1505 struct scsi_cmnd *SCp = hostdata->cmd;
1506 enum NCR_700_Host_State state;
1507
1508 handled = 1;
1509 state = hostdata->state;
1510 SCp = hostdata->cmd;
1511
1512 if(istat & SCSI_INT_PENDING) {
1513 udelay(10);
1514
1515 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1516 }
1517
1518 if(istat & DMA_INT_PENDING) {
1519 udelay(10);
1520
1521 dstat = NCR_700_readb(host, DSTAT_REG);
1522 }
1523
1524 dsps = NCR_700_readl(host, DSPS_REG);
1525 dsp = NCR_700_readl(host, DSP_REG);
1526
1527 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1528 host->host_no, istat, sstat0, dstat,
1529 (dsp - (__u32)(hostdata->pScript))/4,
1530 dsp, dsps));
1531
1532 if(SCp != NULL) {
1533 pun = SCp->device->id;
1534 lun = SCp->device->lun;
1535 }
1536
1537 if(sstat0 & SCSI_RESET_DETECTED) {
1538 struct scsi_device *SDp;
1539 int i;
1540
1541 hostdata->state = NCR_700_HOST_BUSY;
1542
1543 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1544 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1545
1546 scsi_report_bus_reset(host, 0);
1547
1548 /* clear all the negotiated parameters */
1549 __shost_for_each_device(SDp, host)
0f13fc09 1550 NCR_700_clear_flag(SDp, ~0);
1da177e4
LT
1551
1552 /* clear all the slots and their pending commands */
1553 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1554 struct scsi_cmnd *SCp;
1555 struct NCR_700_command_slot *slot =
1556 &hostdata->slots[i];
1557
1558 if(slot->state == NCR_700_SLOT_FREE)
1559 continue;
1560
1561 SCp = slot->cmnd;
1562 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1563 slot, SCp);
1564 free_slot(slot, hostdata);
1565 SCp->host_scribble = NULL;
1566 NCR_700_set_depth(SCp->device, 0);
1567 /* NOTE: deadlock potential here: we
1568 * rely on mid-layer guarantees that
1569 * scsi_done won't try to issue the
1570 * command again otherwise we'll
1571 * deadlock on the
1572 * hostdata->state_lock */
1573 SCp->result = DID_RESET << 16;
1574 SCp->scsi_done(SCp);
1575 }
1576 mdelay(25);
1577 NCR_700_chip_setup(host);
1578
1579 hostdata->state = NCR_700_HOST_FREE;
1580 hostdata->cmd = NULL;
1581 /* signal back if this was an eh induced reset */
1582 if(hostdata->eh_complete != NULL)
1583 complete(hostdata->eh_complete);
1584 goto out_unlock;
1585 } else if(sstat0 & SELECTION_TIMEOUT) {
1586 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1587 host->host_no, pun, lun));
1588 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1589 } else if(sstat0 & PHASE_MISMATCH) {
1590 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1591 (struct NCR_700_command_slot *)SCp->host_scribble;
1592
1593 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1594 /* It wants to reply to some part of
1595 * our message */
1596#ifdef NCR_700_DEBUG
1597 __u32 temp = NCR_700_readl(host, TEMP_REG);
1598 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1599 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1600#endif
1601 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1602 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1603 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1604 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1605 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1606 int residual = NCR_700_data_residual(host);
1607 int i;
1608#ifdef NCR_700_DEBUG
1609 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1610
1611 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1612 host->host_no, pun, lun,
1613 SGcount, data_transfer);
1614 scsi_print_command(SCp);
1615 if(residual) {
1616 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1617 host->host_no, pun, lun,
1618 SGcount, data_transfer, residual);
1619 }
1620#endif
1621 data_transfer += residual;
1622
1623 if(data_transfer != 0) {
1624 int count;
1625 __u32 pAddr;
1626
1627 SGcount--;
1628
1629 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1630 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1631 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1632 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1633 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1634 pAddr += (count - data_transfer);
1635#ifdef NCR_700_DEBUG
1636 if(pAddr != naddr) {
1637 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1638 }
1639#endif
1640 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1641 }
1642 /* set the executed moves to nops */
1643 for(i=0; i<SGcount; i++) {
1644 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1645 slot->SG[i].pAddr = 0;
1646 }
d3fa72e4 1647 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1da177e4
LT
1648 /* and pretend we disconnected after
1649 * the command phase */
1650 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1651 /* make sure all the data is flushed */
1652 NCR_700_flush_fifo(host);
1653 } else {
1654 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1655 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1656 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1657 NCR_700_internal_bus_reset(host);
1658 }
1659
1660 } else if(sstat0 & SCSI_GROSS_ERROR) {
1661 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1662 host->host_no, pun, lun);
1663 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1664 } else if(sstat0 & PARITY_ERROR) {
1665 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1666 host->host_no, pun, lun);
1667 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1668 } else if(dstat & SCRIPT_INT_RECEIVED) {
1669 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1670 host->host_no, pun, lun));
1671 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1672 } else if(dstat & (ILGL_INST_DETECTED)) {
1673 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1674 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1675 host->host_no, pun, lun,
1676 dsp, dsp - hostdata->pScript);
1677 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1678 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1679 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1680 host->host_no, pun, lun, dstat);
1681 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1682 }
1683
1684
1685 /* NOTE: selection interrupt processing MUST occur
1686 * after script interrupt processing to correctly cope
1687 * with the case where we process a disconnect and
1688 * then get reselected before we process the
1689 * disconnection */
1690 if(sstat0 & SELECTED) {
1691 /* FIXME: It currently takes at least FOUR
1692 * interrupts to complete a command that
1693 * disconnects: one for the disconnect, one
1694 * for the reselection, one to get the
1695 * reselection data and one to complete the
1696 * command. If we guess the reselected
1697 * command here and prepare it, we only need
1698 * to get a reselection data interrupt if we
1699 * guessed wrongly. Since the interrupt
1700 * overhead is much greater than the command
1701 * setup, this would be an efficient
1702 * optimisation particularly as we probably
1703 * only have one outstanding command on a
1704 * target most of the time */
1705
1706 resume_offset = process_selection(host, dsp);
1707
1708 }
1709
1710 }
1711
1712 if(resume_offset) {
1713 if(hostdata->state != NCR_700_HOST_BUSY) {
1714 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1715 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1716 hostdata->state = NCR_700_HOST_BUSY;
1717 }
1718
1719 DEBUG(("Attempting to resume at %x\n", resume_offset));
1720 NCR_700_clear_fifo(host);
1721 NCR_700_writel(resume_offset, host, DSP_REG);
1722 }
1723 /* There is probably a technical no-no about this: If we're a
1724 * shared interrupt and we got this interrupt because the
1725 * other device needs servicing not us, we're still going to
1726 * check our queued commands here---of course, there shouldn't
1727 * be any outstanding.... */
1728 if(hostdata->state == NCR_700_HOST_FREE) {
1729 int i;
1730
1731 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1732 /* fairness: always run the queue from the last
1733 * position we left off */
1734 int j = (i + hostdata->saved_slot_position)
1735 % NCR_700_COMMAND_SLOTS_PER_HOST;
1736
1737 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1738 continue;
1739 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1740 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1741 host->host_no, &hostdata->slots[j],
1742 hostdata->slots[j].cmnd));
1743 hostdata->saved_slot_position = j + 1;
1744 }
1745
1746 break;
1747 }
1748 }
1749 out_unlock:
1750 spin_unlock_irqrestore(host->host_lock, flags);
1751 return IRQ_RETVAL(handled);
1752}
1753
1754STATIC int
1755NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1756{
1757 struct NCR_700_Host_Parameters *hostdata =
1758 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1759 __u32 move_ins;
1760 enum dma_data_direction direction;
1761 struct NCR_700_command_slot *slot;
1762
1763 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1764 /* We're over our allocation, this should never happen
1765 * since we report the max allocation to the mid layer */
1766 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1767 return 1;
1768 }
1769 /* check for untagged commands. We cannot have any outstanding
1770 * commands if we accept them. Commands could be untagged because:
1771 *
1772 * - The tag negotiated bitmap is clear
1773 * - The blk layer sent and untagged command
1774 */
1775 if(NCR_700_get_depth(SCp->device) != 0
017560fc 1776 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1da177e4 1777 || !blk_rq_tagged(SCp->request))) {
017560fc
JG
1778 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1779 NCR_700_get_depth(SCp->device));
1da177e4
LT
1780 return SCSI_MLQUEUE_DEVICE_BUSY;
1781 }
1782 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
017560fc
JG
1783 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1784 NCR_700_get_depth(SCp->device));
1da177e4
LT
1785 return SCSI_MLQUEUE_DEVICE_BUSY;
1786 }
1787 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1788
1789 /* begin the command here */
1790 /* no need to check for NULL, test for command_slot_count above
1791 * ensures a slot is free */
1792 slot = find_empty_slot(hostdata);
1793
1794 slot->cmnd = SCp;
1795
1796 SCp->scsi_done = done;
1797 SCp->host_scribble = (unsigned char *)slot;
1798 SCp->SCp.ptr = NULL;
1799 SCp->SCp.buffer = NULL;
1800
1801#ifdef NCR_700_DEBUG
1802 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1803 scsi_print_command(SCp);
1804#endif
1805 if(blk_rq_tagged(SCp->request)
017560fc 1806 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1da177e4 1807 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
017560fc
JG
1808 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1809 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1da177e4
LT
1810 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1811 }
1812
1813 /* here we may have to process an untagged command. The gate
1814 * above ensures that this will be the only one outstanding,
1815 * so clear the tag negotiated bit.
1816 *
1817 * FIXME: This will royally screw up on multiple LUN devices
1818 * */
1819 if(!blk_rq_tagged(SCp->request)
017560fc
JG
1820 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1821 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1822 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
1823 }
1824
017560fc 1825 if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1da177e4
LT
1826 && scsi_get_tag_type(SCp->device)) {
1827 slot->tag = SCp->request->tag;
017560fc
JG
1828 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1829 slot->tag, slot);
1da177e4
LT
1830 } else {
1831 slot->tag = SCSI_NO_TAG;
1832 /* must populate current_cmnd for scsi_find_tag to work */
1833 SCp->device->current_cmnd = SCp;
1834 }
1835 /* sanity check: some of the commands generated by the mid-layer
1836 * have an eccentric idea of their sc_data_direction */
3258a4d5
FT
1837 if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1838 SCp->sc_data_direction != DMA_NONE) {
1da177e4
LT
1839#ifdef NCR_700_DEBUG
1840 printk("53c700: Command");
1841 scsi_print_command(SCp);
1842 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1843#endif
1844 SCp->sc_data_direction = DMA_NONE;
1845 }
1846
1847 switch (SCp->cmnd[0]) {
1848 case REQUEST_SENSE:
1849 /* clear the internal sense magic */
1850 SCp->cmnd[6] = 0;
1851 /* fall through */
1852 default:
1853 /* OK, get it from the command */
1854 switch(SCp->sc_data_direction) {
1855 case DMA_BIDIRECTIONAL:
1856 default:
1857 printk(KERN_ERR "53c700: Unknown command for data direction ");
1858 scsi_print_command(SCp);
1859
1860 move_ins = 0;
1861 break;
1862 case DMA_NONE:
1863 move_ins = 0;
1864 break;
1865 case DMA_FROM_DEVICE:
1866 move_ins = SCRIPT_MOVE_DATA_IN;
1867 break;
1868 case DMA_TO_DEVICE:
1869 move_ins = SCRIPT_MOVE_DATA_OUT;
1870 break;
1871 }
1872 }
1873
1874 /* now build the scatter gather list */
1875 direction = SCp->sc_data_direction;
1876 if(move_ins != 0) {
1877 int i;
1878 int sg_count;
1879 dma_addr_t vPtr = 0;
3258a4d5 1880 struct scatterlist *sg;
1da177e4
LT
1881 __u32 count = 0;
1882
3258a4d5
FT
1883 sg_count = scsi_dma_map(SCp);
1884 BUG_ON(sg_count < 0);
1da177e4 1885
3258a4d5
FT
1886 scsi_for_each_sg(SCp, sg, sg_count, i) {
1887 vPtr = sg_dma_address(sg);
1888 count = sg_dma_len(sg);
1da177e4
LT
1889
1890 slot->SG[i].ins = bS_to_host(move_ins | count);
1891 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1892 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1893 slot->SG[i].pAddr = bS_to_host(vPtr);
1894 }
1895 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1896 slot->SG[i].pAddr = 0;
d3fa72e4 1897 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1da177e4 1898 DEBUG((" SETTING %08lx to %x\n",
d3fa72e4 1899 (&slot->pSG[i].ins),
1da177e4
LT
1900 slot->SG[i].ins));
1901 }
1902 slot->resume_offset = 0;
1903 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
64a87b24 1904 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1da177e4
LT
1905 NCR_700_start_command(SCp);
1906 return 0;
1907}
1908
1909STATIC int
1910NCR_700_abort(struct scsi_cmnd * SCp)
1911{
1912 struct NCR_700_command_slot *slot;
1913
017560fc
JG
1914 scmd_printk(KERN_INFO, SCp,
1915 "New error handler wants to abort command\n\t");
1da177e4
LT
1916 scsi_print_command(SCp);
1917
1918 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1919
1920 if(slot == NULL)
1921 /* no outstanding command to abort */
1922 return SUCCESS;
1923 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1924 /* FIXME: This is because of a problem in the new
1925 * error handler. When it is in error recovery, it
1926 * will send a TUR to a device it thinks may still be
1927 * showing a problem. If the TUR isn't responded to,
1928 * it will abort it and mark the device off line.
1929 * Unfortunately, it does no other error recovery, so
1930 * this would leave us with an outstanding command
1931 * occupying a slot. Rather than allow this to
1932 * happen, we issue a bus reset to force all
1933 * outstanding commands to terminate here. */
1934 NCR_700_internal_bus_reset(SCp->device->host);
1935 /* still drop through and return failed */
1936 }
1937 return FAILED;
1938
1939}
1940
1941STATIC int
1942NCR_700_bus_reset(struct scsi_cmnd * SCp)
1943{
6e9a4738 1944 DECLARE_COMPLETION_ONSTACK(complete);
1da177e4
LT
1945 struct NCR_700_Host_Parameters *hostdata =
1946 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1947
017560fc
JG
1948 scmd_printk(KERN_INFO, SCp,
1949 "New error handler wants BUS reset, cmd %p\n\t", SCp);
1da177e4 1950 scsi_print_command(SCp);
68b3aa7c 1951
1da177e4
LT
1952 /* In theory, eh_complete should always be null because the
1953 * eh is single threaded, but just in case we're handling a
1954 * reset via sg or something */
68b3aa7c
JG
1955 spin_lock_irq(SCp->device->host->host_lock);
1956 while (hostdata->eh_complete != NULL) {
1da177e4
LT
1957 spin_unlock_irq(SCp->device->host->host_lock);
1958 msleep_interruptible(100);
1959 spin_lock_irq(SCp->device->host->host_lock);
1960 }
68b3aa7c 1961
1da177e4
LT
1962 hostdata->eh_complete = &complete;
1963 NCR_700_internal_bus_reset(SCp->device->host);
68b3aa7c 1964
1da177e4
LT
1965 spin_unlock_irq(SCp->device->host->host_lock);
1966 wait_for_completion(&complete);
1967 spin_lock_irq(SCp->device->host->host_lock);
68b3aa7c 1968
1da177e4
LT
1969 hostdata->eh_complete = NULL;
1970 /* Revalidate the transport parameters of the failing device */
1971 if(hostdata->fast)
1972 spi_schedule_dv_device(SCp->device);
68b3aa7c
JG
1973
1974 spin_unlock_irq(SCp->device->host->host_lock);
1da177e4
LT
1975 return SUCCESS;
1976}
1977
1da177e4
LT
1978STATIC int
1979NCR_700_host_reset(struct scsi_cmnd * SCp)
1980{
017560fc 1981 scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1da177e4
LT
1982 scsi_print_command(SCp);
1983
df0ae249
JG
1984 spin_lock_irq(SCp->device->host->host_lock);
1985
1da177e4
LT
1986 NCR_700_internal_bus_reset(SCp->device->host);
1987 NCR_700_chip_reset(SCp->device->host);
df0ae249
JG
1988
1989 spin_unlock_irq(SCp->device->host->host_lock);
1990
1da177e4
LT
1991 return SUCCESS;
1992}
1993
1994STATIC void
1995NCR_700_set_period(struct scsi_target *STp, int period)
1996{
1997 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1998 struct NCR_700_Host_Parameters *hostdata =
1999 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2000
2001 if(!hostdata->fast)
2002 return;
2003
2004 if(period < hostdata->min_period)
2005 period = hostdata->min_period;
2006
2007 spi_period(STp) = period;
2008 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2009 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2010 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2011}
2012
2013STATIC void
2014NCR_700_set_offset(struct scsi_target *STp, int offset)
2015{
2016 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2017 struct NCR_700_Host_Parameters *hostdata =
2018 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2019 int max_offset = hostdata->chip710
2020 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2021
2022 if(!hostdata->fast)
2023 return;
2024
2025 if(offset > max_offset)
2026 offset = max_offset;
2027
2028 /* if we're currently async, make sure the period is reasonable */
2029 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2030 spi_period(STp) > 0xff))
2031 spi_period(STp) = hostdata->min_period;
2032
2033 spi_offset(STp) = offset;
2034 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2035 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2036 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2037}
2038
0f13fc09
JB
2039STATIC int
2040NCR_700_slave_alloc(struct scsi_device *SDp)
2041{
2042 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2043 GFP_KERNEL);
1da177e4 2044
0f13fc09
JB
2045 if (!SDp->hostdata)
2046 return -ENOMEM;
2047
2048 return 0;
2049}
1da177e4
LT
2050
2051STATIC int
2052NCR_700_slave_configure(struct scsi_device *SDp)
2053{
2054 struct NCR_700_Host_Parameters *hostdata =
2055 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2056
2057 /* to do here: allocate memory; build a queue_full list */
2058 if(SDp->tagged_supported) {
2059 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2060 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2061 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2062 } else {
2063 /* initialise to default depth */
2064 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2065 }
2066 if(hostdata->fast) {
2067 /* Find the correct offset and period via domain validation */
2068 if (!spi_initial_dv(SDp->sdev_target))
2069 spi_dv_device(SDp);
2070 } else {
2071 spi_offset(SDp->sdev_target) = 0;
2072 spi_period(SDp->sdev_target) = 0;
2073 }
2074 return 0;
2075}
2076
2077STATIC void
2078NCR_700_slave_destroy(struct scsi_device *SDp)
2079{
67d59dfd
JB
2080 kfree(SDp->hostdata);
2081 SDp->hostdata = NULL;
1da177e4
LT
2082}
2083
2084static int
e881a172 2085NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
1da177e4 2086{
e881a172
MC
2087 if (reason != SCSI_QDEPTH_DEFAULT)
2088 return -EOPNOTSUPP;
2089
1da177e4
LT
2090 if (depth > NCR_700_MAX_TAGS)
2091 depth = NCR_700_MAX_TAGS;
2092
2093 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2094 return depth;
2095}
2096
2097static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2098{
2099 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2100 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2101 struct NCR_700_Host_Parameters *hostdata =
2102 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2103
2104 scsi_set_tag_type(SDp, tag_type);
2105
2106 /* We have a global (per target) flag to track whether TCQ is
2107 * enabled, so we'll be turning it off for the entire target here.
2108 * our tag algorithm will fail if we mix tagged and untagged commands,
2109 * so quiesce the device before doing this */
2110 if (change_tag)
2111 scsi_target_quiesce(SDp->sdev_target);
2112
2113 if (!tag_type) {
2114 /* shift back to the default unqueued number of commands
2115 * (the user can still raise this) */
2116 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
422c0d61 2117 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
1da177e4
LT
2118 } else {
2119 /* Here, we cleared the negotiation flag above, so this
2120 * will force the driver to renegotiate */
2121 scsi_activate_tcq(SDp, SDp->queue_depth);
2122 if (change_tag)
2123 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2124 }
2125 if (change_tag)
2126 scsi_target_resume(SDp->sdev_target);
2127
2128 return tag_type;
2129}
2130
2131static ssize_t
10523b3b 2132NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
2133{
2134 struct scsi_device *SDp = to_scsi_device(dev);
2135
2136 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2137}
2138
2139static struct device_attribute NCR_700_active_tags_attr = {
2140 .attr = {
2141 .name = "active_tags",
2142 .mode = S_IRUGO,
2143 },
2144 .show = NCR_700_show_active_tags,
2145};
2146
2147STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2148 &NCR_700_active_tags_attr,
2149 NULL,
2150};
2151
2152EXPORT_SYMBOL(NCR_700_detect);
2153EXPORT_SYMBOL(NCR_700_release);
2154EXPORT_SYMBOL(NCR_700_intr);
2155
2156static struct spi_function_template NCR_700_transport_functions = {
2157 .set_period = NCR_700_set_period,
2158 .show_period = 1,
2159 .set_offset = NCR_700_set_offset,
2160 .show_offset = 1,
2161};
2162
2163static int __init NCR_700_init(void)
2164{
2165 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2166 if(!NCR_700_transport_template)
2167 return -ENODEV;
2168 return 0;
2169}
2170
2171static void __exit NCR_700_exit(void)
2172{
2173 spi_release_transport(NCR_700_transport_template);
2174}
2175
2176module_init(NCR_700_init);
2177module_exit(NCR_700_exit);
2178