]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/atm/iphase.c
atm: [iphase] move struct suni_priv to suni.h
[net-next-2.6.git] / drivers / atm / iphase.c
CommitLineData
1da177e4
LT
1/******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
6 Version: 1.0
7*******************************************************************************
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
25
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
31 Add the CBR support.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
37 Add SMP support.
38
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41*******************************************************************************/
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/mm.h>
46#include <linux/pci.h>
47#include <linux/errno.h>
48#include <linux/atm.h>
49#include <linux/atmdev.h>
50#include <linux/sonet.h>
51#include <linux/skbuff.h>
52#include <linux/time.h>
53#include <linux/delay.h>
54#include <linux/uio.h>
55#include <linux/init.h>
56#include <linux/wait.h>
57#include <asm/system.h>
58#include <asm/io.h>
59#include <asm/atomic.h>
60#include <asm/uaccess.h>
61#include <asm/string.h>
62#include <asm/byteorder.h>
420635f5
JL
63#include <linux/vmalloc.h>
64#include <linux/jiffies.h>
1da177e4
LT
65#include "iphase.h"
66#include "suni.h"
67#define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
2be63b87 68
1da177e4
LT
69#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
70
71static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
72static void desc_dbg(IADEV *iadev);
73
74static IADEV *ia_dev[8];
75static struct atm_dev *_ia_dev[8];
76static int iadev_count;
77static void ia_led_timer(unsigned long arg);
8d06afab 78static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
1da177e4
LT
79static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
80static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
81static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
82 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
83
84module_param(IA_TX_BUF, int, 0);
85module_param(IA_TX_BUF_SZ, int, 0);
86module_param(IA_RX_BUF, int, 0);
87module_param(IA_RX_BUF_SZ, int, 0);
88module_param(IADebugFlag, uint, 0644);
89
90MODULE_LICENSE("GPL");
91
92#if BITS_PER_LONG != 32
93# error FIXME: this driver only works on 32-bit platforms
94#endif
95
96/**************************** IA_LIB **********************************/
97
98static void ia_init_rtn_q (IARTN_Q *que)
99{
100 que->next = NULL;
101 que->tail = NULL;
102}
103
104static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
105{
106 data->next = NULL;
107 if (que->next == NULL)
108 que->next = que->tail = data;
109 else {
110 data->next = que->next;
111 que->next = data;
112 }
113 return;
114}
115
116static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
117 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
118 if (!entry) return -1;
119 entry->data = data;
120 entry->next = NULL;
121 if (que->next == NULL)
122 que->next = que->tail = entry;
123 else {
124 que->tail->next = entry;
125 que->tail = que->tail->next;
126 }
127 return 1;
128}
129
130static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
131 IARTN_Q *tmpdata;
132 if (que->next == NULL)
133 return NULL;
134 tmpdata = que->next;
135 if ( que->next == que->tail)
136 que->next = que->tail = NULL;
137 else
138 que->next = que->next->next;
139 return tmpdata;
140}
141
142static void ia_hack_tcq(IADEV *dev) {
143
144 u_short desc1;
145 u_short tcq_wr;
146 struct ia_vcc *iavcc_r = NULL;
147
148 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
149 while (dev->host_tcq_wr != tcq_wr) {
150 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
151 if (!desc1) ;
152 else if (!dev->desc_tbl[desc1 -1].timestamp) {
153 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
154 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
155 }
156 else if (dev->desc_tbl[desc1 -1].timestamp) {
157 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
158 printk("IA: Fatal err in get_desc\n");
159 continue;
160 }
161 iavcc_r->vc_desc_cnt--;
162 dev->desc_tbl[desc1 -1].timestamp = 0;
163 IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
164 (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
165 if (iavcc_r->pcr < dev->rate_limit) {
166 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
167 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
168 printk("ia_hack_tcq: No memory available\n");
169 }
170 dev->desc_tbl[desc1 -1].iavcc = NULL;
171 dev->desc_tbl[desc1 -1].txskb = NULL;
172 }
173 dev->host_tcq_wr += 2;
174 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
175 dev->host_tcq_wr = dev->ffL.tcq_st;
176 }
177} /* ia_hack_tcq */
178
179static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
180 u_short desc_num, i;
181 struct sk_buff *skb;
182 struct ia_vcc *iavcc_r = NULL;
183 unsigned long delta;
184 static unsigned long timer = 0;
185 int ltimeout;
186
187 ia_hack_tcq (dev);
420635f5 188 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
1da177e4
LT
189 timer = jiffies;
190 i=0;
191 while (i < dev->num_tx_desc) {
192 if (!dev->desc_tbl[i].timestamp) {
193 i++;
194 continue;
195 }
196 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
197 delta = jiffies - dev->desc_tbl[i].timestamp;
198 if (delta >= ltimeout) {
199 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
200 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
201 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
202 else
203 dev->ffL.tcq_rd -= 2;
204 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
205 if (!(skb = dev->desc_tbl[i].txskb) ||
206 !(iavcc_r = dev->desc_tbl[i].iavcc))
207 printk("Fatal err, desc table vcc or skb is NULL\n");
208 else
209 iavcc_r->vc_desc_cnt--;
210 dev->desc_tbl[i].timestamp = 0;
211 dev->desc_tbl[i].iavcc = NULL;
212 dev->desc_tbl[i].txskb = NULL;
213 }
214 i++;
215 } /* while */
216 }
217 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
218 return 0xFFFF;
219
220 /* Get the next available descriptor number from TCQ */
221 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
222
223 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
224 dev->ffL.tcq_rd += 2;
225 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
226 dev->ffL.tcq_rd = dev->ffL.tcq_st;
227 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
228 return 0xFFFF;
229 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
230 }
231
232 /* get system time */
233 dev->desc_tbl[desc_num -1].timestamp = jiffies;
234 return desc_num;
235}
236
237static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
238 u_char foundLockUp;
239 vcstatus_t *vcstatus;
240 u_short *shd_tbl;
241 u_short tempCellSlot, tempFract;
242 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
243 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
244 u_int i;
245
246 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
247 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
248 vcstatus->cnt++;
249 foundLockUp = 0;
250 if( vcstatus->cnt == 0x05 ) {
251 abr_vc += vcc->vci;
252 eabr_vc += vcc->vci;
253 if( eabr_vc->last_desc ) {
254 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
255 /* Wait for 10 Micro sec */
256 udelay(10);
257 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
258 foundLockUp = 1;
259 }
260 else {
261 tempCellSlot = abr_vc->last_cell_slot;
262 tempFract = abr_vc->fraction;
263 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
264 && (tempFract == dev->testTable[vcc->vci]->fract))
265 foundLockUp = 1;
266 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
267 dev->testTable[vcc->vci]->fract = tempFract;
268 }
269 } /* last descriptor */
270 vcstatus->cnt = 0;
271 } /* vcstatus->cnt */
272
273 if (foundLockUp) {
274 IF_ABR(printk("LOCK UP found\n");)
275 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
276 /* Wait for 10 Micro sec */
277 udelay(10);
278 abr_vc->status &= 0xFFF8;
279 abr_vc->status |= 0x0001; /* state is idle */
280 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
281 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
282 if (i < dev->num_vc)
283 shd_tbl[i] = vcc->vci;
284 else
285 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
286 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
287 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
288 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
289 vcstatus->cnt = 0;
290 } /* foundLockUp */
291
292 } /* if an ABR VC */
293
294
295}
296
297/*
298** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
299**
300** +----+----+------------------+-------------------------------+
301** | R | NZ | 5-bit exponent | 9-bit mantissa |
302** +----+----+------------------+-------------------------------+
303**
0779bf2d 304** R = reserved (written as 0)
1da177e4
LT
305** NZ = 0 if 0 cells/sec; 1 otherwise
306**
307** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
308*/
309static u16
310cellrate_to_float(u32 cr)
311{
312
313#define NZ 0x4000
314#define M_BITS 9 /* Number of bits in mantissa */
315#define E_BITS 5 /* Number of bits in exponent */
316#define M_MASK 0x1ff
317#define E_MASK 0x1f
318 u16 flot;
319 u32 tmp = cr & 0x00ffffff;
320 int i = 0;
321 if (cr == 0)
322 return 0;
323 while (tmp != 1) {
324 tmp >>= 1;
325 i++;
326 }
327 if (i == M_BITS)
328 flot = NZ | (i << M_BITS) | (cr & M_MASK);
329 else if (i < M_BITS)
330 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
331 else
332 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
333 return flot;
334}
335
336#if 0
337/*
338** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
339*/
340static u32
341float_to_cellrate(u16 rate)
342{
343 u32 exp, mantissa, cps;
344 if ((rate & NZ) == 0)
345 return 0;
346 exp = (rate >> M_BITS) & E_MASK;
347 mantissa = rate & M_MASK;
348 if (exp == 0)
349 return 1;
350 cps = (1 << M_BITS) | mantissa;
351 if (exp == M_BITS)
352 cps = cps;
353 else if (exp > M_BITS)
354 cps <<= (exp - M_BITS);
355 else
356 cps >>= (M_BITS - exp);
357 return cps;
358}
359#endif
360
361static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
362 srv_p->class_type = ATM_ABR;
363 srv_p->pcr = dev->LineRate;
364 srv_p->mcr = 0;
365 srv_p->icr = 0x055cb7;
366 srv_p->tbe = 0xffffff;
367 srv_p->frtt = 0x3a;
368 srv_p->rif = 0xf;
369 srv_p->rdf = 0xb;
370 srv_p->nrm = 0x4;
371 srv_p->trm = 0x7;
372 srv_p->cdf = 0x3;
373 srv_p->adtf = 50;
374}
375
376static int
377ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
378 struct atm_vcc *vcc, u8 flag)
379{
380 f_vc_abr_entry *f_abr_vc;
381 r_vc_abr_entry *r_abr_vc;
382 u32 icr;
383 u8 trm, nrm, crm;
384 u16 adtf, air, *ptr16;
385 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
386 f_abr_vc += vcc->vci;
387 switch (flag) {
388 case 1: /* FFRED initialization */
389#if 0 /* sanity check */
390 if (srv_p->pcr == 0)
391 return INVALID_PCR;
392 if (srv_p->pcr > dev->LineRate)
393 srv_p->pcr = dev->LineRate;
394 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
395 return MCR_UNAVAILABLE;
396 if (srv_p->mcr > srv_p->pcr)
397 return INVALID_MCR;
398 if (!(srv_p->icr))
399 srv_p->icr = srv_p->pcr;
400 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
401 return INVALID_ICR;
402 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
403 return INVALID_TBE;
404 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
405 return INVALID_FRTT;
406 if (srv_p->nrm > MAX_NRM)
407 return INVALID_NRM;
408 if (srv_p->trm > MAX_TRM)
409 return INVALID_TRM;
410 if (srv_p->adtf > MAX_ADTF)
411 return INVALID_ADTF;
412 else if (srv_p->adtf == 0)
413 srv_p->adtf = 1;
414 if (srv_p->cdf > MAX_CDF)
415 return INVALID_CDF;
416 if (srv_p->rif > MAX_RIF)
417 return INVALID_RIF;
418 if (srv_p->rdf > MAX_RDF)
419 return INVALID_RDF;
420#endif
421 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
422 f_abr_vc->f_vc_type = ABR;
423 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
424 /* i.e 2**n = 2 << (n-1) */
425 f_abr_vc->f_nrm = nrm << 8 | nrm;
426 trm = 100000/(2 << (16 - srv_p->trm));
427 if ( trm == 0) trm = 1;
428 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
429 crm = srv_p->tbe / nrm;
430 if (crm == 0) crm = 1;
431 f_abr_vc->f_crm = crm & 0xff;
432 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
433 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
434 ((srv_p->tbe/srv_p->frtt)*1000000) :
435 (1000000/(srv_p->frtt/srv_p->tbe)));
436 f_abr_vc->f_icr = cellrate_to_float(icr);
437 adtf = (10000 * srv_p->adtf)/8192;
438 if (adtf == 0) adtf = 1;
439 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
440 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
441 f_abr_vc->f_acr = f_abr_vc->f_icr;
442 f_abr_vc->f_status = 0x0042;
443 break;
444 case 0: /* RFRED initialization */
445 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
446 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
447 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
448 r_abr_vc += vcc->vci;
449 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
450 air = srv_p->pcr << (15 - srv_p->rif);
451 if (air == 0) air = 1;
452 r_abr_vc->r_air = cellrate_to_float(air);
453 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
454 dev->sum_mcr += srv_p->mcr;
455 dev->n_abr++;
456 break;
457 default:
458 break;
459 }
460 return 0;
461}
462static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
463 u32 rateLow=0, rateHigh, rate;
464 int entries;
465 struct ia_vcc *ia_vcc;
466
467 int idealSlot =0, testSlot, toBeAssigned, inc;
468 u32 spacing;
469 u16 *SchedTbl, *TstSchedTbl;
470 u16 cbrVC, vcIndex;
471 u32 fracSlot = 0;
472 u32 sp_mod = 0;
473 u32 sp_mod2 = 0;
474
475 /* IpAdjustTrafficParams */
476 if (vcc->qos.txtp.max_pcr <= 0) {
477 IF_ERR(printk("PCR for CBR not defined\n");)
478 return -1;
479 }
480 rate = vcc->qos.txtp.max_pcr;
481 entries = rate / dev->Granularity;
482 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
483 entries, rate, dev->Granularity);)
484 if (entries < 1)
485 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
486 rateLow = entries * dev->Granularity;
487 rateHigh = (entries + 1) * dev->Granularity;
488 if (3*(rate - rateLow) > (rateHigh - rate))
489 entries++;
490 if (entries > dev->CbrRemEntries) {
491 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
492 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
493 entries, dev->CbrRemEntries);)
494 return -EBUSY;
495 }
496
497 ia_vcc = INPH_IA_VCC(vcc);
498 ia_vcc->NumCbrEntry = entries;
499 dev->sum_mcr += entries * dev->Granularity;
500 /* IaFFrednInsertCbrSched */
501 // Starting at an arbitrary location, place the entries into the table
502 // as smoothly as possible
503 cbrVC = 0;
504 spacing = dev->CbrTotEntries / entries;
505 sp_mod = dev->CbrTotEntries % entries; // get modulo
506 toBeAssigned = entries;
507 fracSlot = 0;
508 vcIndex = vcc->vci;
509 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
510 while (toBeAssigned)
511 {
512 // If this is the first time, start the table loading for this connection
513 // as close to entryPoint as possible.
514 if (toBeAssigned == entries)
515 {
516 idealSlot = dev->CbrEntryPt;
517 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
518 if (dev->CbrEntryPt >= dev->CbrTotEntries)
519 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
520 } else {
521 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
522 // in the table that would be smoothest
523 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
524 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
525 }
526 if (idealSlot >= (int)dev->CbrTotEntries)
527 idealSlot -= dev->CbrTotEntries;
528 // Continuously check around this ideal value until a null
529 // location is encountered.
530 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
531 inc = 0;
532 testSlot = idealSlot;
533 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
534 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
535 testSlot, (u32)TstSchedTbl,toBeAssigned);)
536 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
537 while (cbrVC) // If another VC at this location, we have to keep looking
538 {
539 inc++;
540 testSlot = idealSlot - inc;
541 if (testSlot < 0) { // Wrap if necessary
542 testSlot += dev->CbrTotEntries;
543 IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
544 (u32)SchedTbl,testSlot);)
545 }
546 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
547 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
548 if (!cbrVC)
549 break;
550 testSlot = idealSlot + inc;
551 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
552 testSlot -= dev->CbrTotEntries;
553 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
554 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
555 testSlot, toBeAssigned);)
556 }
557 // set table index and read in value
558 TstSchedTbl = (u16*)(SchedTbl + testSlot);
559 IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
560 (u32)TstSchedTbl,cbrVC,inc);)
561 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
562 } /* while */
563 // Move this VCI number into this location of the CBR Sched table.
564 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
565 dev->CbrRemEntries--;
566 toBeAssigned--;
567 } /* while */
568
569 /* IaFFrednCbrEnable */
570 dev->NumEnabledCBR++;
571 if (dev->NumEnabledCBR == 1) {
572 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
573 IF_CBR(printk("CBR is enabled\n");)
574 }
575 return 0;
576}
577static void ia_cbrVc_close (struct atm_vcc *vcc) {
578 IADEV *iadev;
579 u16 *SchedTbl, NullVci = 0;
580 u32 i, NumFound;
581
582 iadev = INPH_IA_DEV(vcc->dev);
583 iadev->NumEnabledCBR--;
584 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
585 if (iadev->NumEnabledCBR == 0) {
586 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
587 IF_CBR (printk("CBR support disabled\n");)
588 }
589 NumFound = 0;
590 for (i=0; i < iadev->CbrTotEntries; i++)
591 {
592 if (*SchedTbl == vcc->vci) {
593 iadev->CbrRemEntries++;
594 *SchedTbl = NullVci;
595 IF_CBR(NumFound++;)
596 }
597 SchedTbl++;
598 }
599 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
600}
601
602static int ia_avail_descs(IADEV *iadev) {
603 int tmp = 0;
604 ia_hack_tcq(iadev);
605 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
606 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
607 else
608 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
609 iadev->ffL.tcq_st) / 2;
610 return tmp;
611}
612
613static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
614
615static int ia_que_tx (IADEV *iadev) {
616 struct sk_buff *skb;
617 int num_desc;
618 struct atm_vcc *vcc;
619 struct ia_vcc *iavcc;
620 num_desc = ia_avail_descs(iadev);
621
622 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
623 if (!(vcc = ATM_SKB(skb)->vcc)) {
624 dev_kfree_skb_any(skb);
625 printk("ia_que_tx: Null vcc\n");
626 break;
627 }
628 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
629 dev_kfree_skb_any(skb);
630 printk("Free the SKB on closed vci %d \n", vcc->vci);
631 break;
632 }
633 iavcc = INPH_IA_VCC(vcc);
634 if (ia_pkt_tx (vcc, skb)) {
635 skb_queue_head(&iadev->tx_backlog, skb);
636 }
637 num_desc--;
638 }
639 return 0;
640}
641
642static void ia_tx_poll (IADEV *iadev) {
643 struct atm_vcc *vcc = NULL;
644 struct sk_buff *skb = NULL, *skb1 = NULL;
645 struct ia_vcc *iavcc;
646 IARTN_Q * rtne;
647
648 ia_hack_tcq(iadev);
649 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
650 skb = rtne->data.txskb;
651 if (!skb) {
652 printk("ia_tx_poll: skb is null\n");
653 goto out;
654 }
655 vcc = ATM_SKB(skb)->vcc;
656 if (!vcc) {
657 printk("ia_tx_poll: vcc is null\n");
658 dev_kfree_skb_any(skb);
659 goto out;
660 }
661
662 iavcc = INPH_IA_VCC(vcc);
663 if (!iavcc) {
664 printk("ia_tx_poll: iavcc is null\n");
665 dev_kfree_skb_any(skb);
666 goto out;
667 }
668
669 skb1 = skb_dequeue(&iavcc->txing_skb);
670 while (skb1 && (skb1 != skb)) {
671 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
672 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
673 }
674 IF_ERR(printk("Release the SKB not match\n");)
675 if ((vcc->pop) && (skb1->len != 0))
676 {
677 vcc->pop(vcc, skb1);
678 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
679 (long)skb1);)
680 }
681 else
682 dev_kfree_skb_any(skb1);
683 skb1 = skb_dequeue(&iavcc->txing_skb);
684 }
685 if (!skb1) {
686 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
687 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
688 break;
689 }
690 if ((vcc->pop) && (skb->len != 0))
691 {
692 vcc->pop(vcc, skb);
693 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
694 }
695 else
696 dev_kfree_skb_any(skb);
697 kfree(rtne);
698 }
699 ia_que_tx(iadev);
700out:
701 return;
702}
703#if 0
704static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
705{
706 u32 t;
707 int i;
708 /*
709 * Issue a command to enable writes to the NOVRAM
710 */
711 NVRAM_CMD (EXTEND + EWEN);
712 NVRAM_CLR_CE;
713 /*
714 * issue the write command
715 */
716 NVRAM_CMD(IAWRITE + addr);
717 /*
718 * Send the data, starting with D15, then D14, and so on for 16 bits
719 */
720 for (i=15; i>=0; i--) {
721 NVRAM_CLKOUT (val & 0x8000);
722 val <<= 1;
723 }
724 NVRAM_CLR_CE;
725 CFG_OR(NVCE);
726 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
727 while (!(t & NVDO))
728 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
729
730 NVRAM_CLR_CE;
731 /*
732 * disable writes again
733 */
734 NVRAM_CMD(EXTEND + EWDS)
735 NVRAM_CLR_CE;
736 CFG_AND(~NVDI);
737}
738#endif
739
740static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
741{
742 u_short val;
743 u32 t;
744 int i;
745 /*
746 * Read the first bit that was clocked with the falling edge of the
747 * the last command data clock
748 */
749 NVRAM_CMD(IAREAD + addr);
750 /*
751 * Now read the rest of the bits, the next bit read is D14, then D13,
752 * and so on.
753 */
754 val = 0;
755 for (i=15; i>=0; i--) {
756 NVRAM_CLKIN(t);
757 val |= (t << i);
758 }
759 NVRAM_CLR_CE;
760 CFG_AND(~NVDI);
761 return val;
762}
763
764static void ia_hw_type(IADEV *iadev) {
765 u_short memType = ia_eeprom_get(iadev, 25);
766 iadev->memType = memType;
767 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
768 iadev->num_tx_desc = IA_TX_BUF;
769 iadev->tx_buf_sz = IA_TX_BUF_SZ;
770 iadev->num_rx_desc = IA_RX_BUF;
771 iadev->rx_buf_sz = IA_RX_BUF_SZ;
772 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
773 if (IA_TX_BUF == DFL_TX_BUFFERS)
774 iadev->num_tx_desc = IA_TX_BUF / 2;
775 else
776 iadev->num_tx_desc = IA_TX_BUF;
777 iadev->tx_buf_sz = IA_TX_BUF_SZ;
778 if (IA_RX_BUF == DFL_RX_BUFFERS)
779 iadev->num_rx_desc = IA_RX_BUF / 2;
780 else
781 iadev->num_rx_desc = IA_RX_BUF;
782 iadev->rx_buf_sz = IA_RX_BUF_SZ;
783 }
784 else {
785 if (IA_TX_BUF == DFL_TX_BUFFERS)
786 iadev->num_tx_desc = IA_TX_BUF / 8;
787 else
788 iadev->num_tx_desc = IA_TX_BUF;
789 iadev->tx_buf_sz = IA_TX_BUF_SZ;
790 if (IA_RX_BUF == DFL_RX_BUFFERS)
791 iadev->num_rx_desc = IA_RX_BUF / 8;
792 else
793 iadev->num_rx_desc = IA_RX_BUF;
794 iadev->rx_buf_sz = IA_RX_BUF_SZ;
795 }
796 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
797 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
798 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
799 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
800
801#if 0
802 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
803 iadev->phy_type = PHY_OC3C_S;
804 else if ((memType & FE_MASK) == FE_UTP_OPTION)
805 iadev->phy_type = PHY_UTP155;
806 else
807 iadev->phy_type = PHY_OC3C_M;
808#endif
809
810 iadev->phy_type = memType & FE_MASK;
811 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
812 memType,iadev->phy_type);)
813 if (iadev->phy_type == FE_25MBIT_PHY)
814 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
815 else if (iadev->phy_type == FE_DS3_PHY)
816 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
817 else if (iadev->phy_type == FE_E3_PHY)
818 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
819 else
820 iadev->LineRate = (u32)(ATM_OC3_PCR);
821 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
822
823}
824
825static void IaFrontEndIntr(IADEV *iadev) {
826 volatile IA_SUNI *suni;
827 volatile ia_mb25_t *mb25;
828 volatile suni_pm7345_t *suni_pm7345;
829 u32 intr_status;
830 u_int frmr_intr;
831
832 if(iadev->phy_type & FE_25MBIT_PHY) {
833 mb25 = (ia_mb25_t*)iadev->phy;
834 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
835 } else if (iadev->phy_type & FE_DS3_PHY) {
836 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
837 /* clear FRMR interrupts */
838 frmr_intr = suni_pm7345->suni_ds3_frm_intr_stat;
839 iadev->carrier_detect =
840 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
841 } else if (iadev->phy_type & FE_E3_PHY ) {
842 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
843 frmr_intr = suni_pm7345->suni_e3_frm_maint_intr_ind;
844 iadev->carrier_detect =
845 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
846 }
847 else {
848 suni = (IA_SUNI *)iadev->phy;
849 intr_status = suni->suni_rsop_status & 0xff;
850 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
851 }
852 if (iadev->carrier_detect)
853 printk("IA: SUNI carrier detected\n");
854 else
855 printk("IA: SUNI carrier lost signal\n");
856 return;
857}
858
859static void ia_mb25_init (IADEV *iadev)
860{
861 volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
862#if 0
863 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
864#endif
865 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
866 mb25->mb25_diag_control = 0;
867 /*
868 * Initialize carrier detect state
869 */
870 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
871 return;
872}
873
874static void ia_suni_pm7345_init (IADEV *iadev)
875{
876 volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
877 if (iadev->phy_type & FE_DS3_PHY)
878 {
879 iadev->carrier_detect =
880 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
881 suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
882 suni_pm7345->suni_ds3_frm_cfg = 1;
883 suni_pm7345->suni_ds3_tran_cfg = 1;
884 suni_pm7345->suni_config = 0;
885 suni_pm7345->suni_splr_cfg = 0;
886 suni_pm7345->suni_splt_cfg = 0;
887 }
888 else
889 {
890 iadev->carrier_detect =
891 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
892 suni_pm7345->suni_e3_frm_fram_options = 0x4;
893 suni_pm7345->suni_e3_frm_maint_options = 0x20;
894 suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
895 suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
896 suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
897 suni_pm7345->suni_e3_tran_fram_options = 0x1;
898 suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
899 suni_pm7345->suni_splr_cfg = 0x41;
900 suni_pm7345->suni_splt_cfg = 0x41;
901 }
902 /*
903 * Enable RSOP loss of signal interrupt.
904 */
905 suni_pm7345->suni_intr_enbl = 0x28;
906
907 /*
908 * Clear error counters
909 */
910 suni_pm7345->suni_id_reset = 0;
911
912 /*
913 * Clear "PMCTST" in master test register.
914 */
915 suni_pm7345->suni_master_test = 0;
916
917 suni_pm7345->suni_rxcp_ctrl = 0x2c;
918 suni_pm7345->suni_rxcp_fctrl = 0x81;
919
920 suni_pm7345->suni_rxcp_idle_pat_h1 =
921 suni_pm7345->suni_rxcp_idle_pat_h2 =
922 suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
923 suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
924
925 suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
926 suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
927 suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
928 suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
929
930 suni_pm7345->suni_rxcp_cell_pat_h1 =
931 suni_pm7345->suni_rxcp_cell_pat_h2 =
932 suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
933 suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
934
935 suni_pm7345->suni_rxcp_cell_mask_h1 =
936 suni_pm7345->suni_rxcp_cell_mask_h2 =
937 suni_pm7345->suni_rxcp_cell_mask_h3 =
938 suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
939
940 suni_pm7345->suni_txcp_ctrl = 0xa4;
941 suni_pm7345->suni_txcp_intr_en_sts = 0x10;
942 suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
943
944 suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
945 SUNI_PM7345_CLB |
946 SUNI_PM7345_DLB |
947 SUNI_PM7345_PLB);
948#ifdef __SNMP__
949 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
950#endif /* __SNMP__ */
951 return;
952}
953
954
955/***************************** IA_LIB END *****************************/
956
a22eb6fa 957#ifdef CONFIG_ATM_IA_DEBUG
1da177e4
LT
958static int tcnter = 0;
959static void xdump( u_char* cp, int length, char* prefix )
960{
961 int col, count;
962 u_char prntBuf[120];
963 u_char* pBuf = prntBuf;
964 count = 0;
965 while(count < length){
966 pBuf += sprintf( pBuf, "%s", prefix );
967 for(col = 0;count + col < length && col < 16; col++){
968 if (col != 0 && (col % 4) == 0)
969 pBuf += sprintf( pBuf, " " );
970 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
971 }
972 while(col++ < 16){ /* pad end of buffer with blanks */
973 if ((col % 4) == 0)
974 sprintf( pBuf, " " );
975 pBuf += sprintf( pBuf, " " );
976 }
977 pBuf += sprintf( pBuf, " " );
978 for(col = 0;count + col < length && col < 16; col++){
979 if (isprint((int)cp[count + col]))
980 pBuf += sprintf( pBuf, "%c", cp[count + col] );
981 else
982 pBuf += sprintf( pBuf, "." );
983 }
984 sprintf( pBuf, "\n" );
985 // SPrint(prntBuf);
986 printk(prntBuf);
987 count += col;
988 pBuf = prntBuf;
989 }
990
991} /* close xdump(... */
a22eb6fa 992#endif /* CONFIG_ATM_IA_DEBUG */
1da177e4
LT
993
994
995static struct atm_dev *ia_boards = NULL;
996
997#define ACTUAL_RAM_BASE \
998 RAM_BASE*((iadev->mem)/(128 * 1024))
999#define ACTUAL_SEG_RAM_BASE \
1000 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1001#define ACTUAL_REASS_RAM_BASE \
1002 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1003
1004
1005/*-- some utilities and memory allocation stuff will come here -------------*/
1006
1007static void desc_dbg(IADEV *iadev) {
1008
1009 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1010 u32 i;
1011 void __iomem *tmp;
1012 // regval = readl((u32)ia_cmds->maddr);
1013 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1014 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1015 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1016 readw(iadev->seg_ram+tcq_wr_ptr-2));
1017 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1018 iadev->ffL.tcq_rd);
1019 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1020 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1021 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1022 i = 0;
1023 while (tcq_st_ptr != tcq_ed_ptr) {
1024 tmp = iadev->seg_ram+tcq_st_ptr;
1025 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1026 tcq_st_ptr += 2;
1027 }
1028 for(i=0; i <iadev->num_tx_desc; i++)
1029 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1030}
1031
1032
1033/*----------------------------- Recieving side stuff --------------------------*/
1034
1035static void rx_excp_rcvd(struct atm_dev *dev)
1036{
1037#if 0 /* closing the receiving size will cause too many excp int */
1038 IADEV *iadev;
1039 u_short state;
1040 u_short excpq_rd_ptr;
1041 //u_short *ptr;
1042 int vci, error = 1;
1043 iadev = INPH_IA_DEV(dev);
1044 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1045 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1046 { printk("state = %x \n", state);
1047 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1048 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1049 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1050 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1051 // TODO: update exception stat
1052 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1053 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1054 // pwang_test
1055 excpq_rd_ptr += 4;
1056 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1057 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1058 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1059 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1060 }
1061#endif
1062}
1063
1064static void free_desc(struct atm_dev *dev, int desc)
1065{
1066 IADEV *iadev;
1067 iadev = INPH_IA_DEV(dev);
1068 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1069 iadev->rfL.fdq_wr +=2;
1070 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1071 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1072 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1073}
1074
1075
1076static int rx_pkt(struct atm_dev *dev)
1077{
1078 IADEV *iadev;
1079 struct atm_vcc *vcc;
1080 unsigned short status;
1081 struct rx_buf_desc __iomem *buf_desc_ptr;
1082 int desc;
1083 struct dle* wr_ptr;
1084 int len;
1085 struct sk_buff *skb;
1086 u_int buf_addr, dma_addr;
1087
1088 iadev = INPH_IA_DEV(dev);
1089 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1090 {
1091 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1092 return -EINVAL;
1093 }
1094 /* mask 1st 3 bits to get the actual descno. */
1095 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1096 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1097 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1098 printk(" pcq_wr_ptr = 0x%x\n",
1099 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1100 /* update the read pointer - maybe we shud do this in the end*/
1101 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1102 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1103 else
1104 iadev->rfL.pcq_rd += 2;
1105 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1106
1107 /* get the buffer desc entry.
1108 update stuff. - doesn't seem to be any update necessary
1109 */
1110 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1111 /* make the ptr point to the corresponding buffer desc entry */
1112 buf_desc_ptr += desc;
1113 if (!desc || (desc > iadev->num_rx_desc) ||
1114 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1115 free_desc(dev, desc);
1116 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1117 return -1;
1118 }
1119 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1120 if (!vcc)
1121 {
1122 free_desc(dev, desc);
1123 printk("IA: null vcc, drop PDU\n");
1124 return -1;
1125 }
1126
1127
1128 /* might want to check the status bits for errors */
1129 status = (u_short) (buf_desc_ptr->desc_mode);
1130 if (status & (RX_CER | RX_PTE | RX_OFL))
1131 {
1132 atomic_inc(&vcc->stats->rx_err);
1133 IF_ERR(printk("IA: bad packet, dropping it");)
1134 if (status & RX_CER) {
1135 IF_ERR(printk(" cause: packet CRC error\n");)
1136 }
1137 else if (status & RX_PTE) {
1138 IF_ERR(printk(" cause: packet time out\n");)
1139 }
1140 else {
1141 IF_ERR(printk(" cause: buffer over flow\n");)
1142 }
1143 goto out_free_desc;
1144 }
1145
1146 /*
1147 build DLE.
1148 */
1149
1150 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1151 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1152 len = dma_addr - buf_addr;
1153 if (len > iadev->rx_buf_sz) {
1154 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1155 atomic_inc(&vcc->stats->rx_err);
1156 goto out_free_desc;
1157 }
1158
1159 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1160 if (vcc->vci < 32)
1161 printk("Drop control packets\n");
1162 goto out_free_desc;
1163 }
1164 skb_put(skb,len);
1165 // pwang_test
1166 ATM_SKB(skb)->vcc = vcc;
1167 ATM_DESC(skb) = desc;
1168 skb_queue_tail(&iadev->rx_dma_q, skb);
1169
1170 /* Build the DLE structure */
1171 wr_ptr = iadev->rx_dle_q.write;
1172 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1173 len, PCI_DMA_FROMDEVICE);
1174 wr_ptr->local_pkt_addr = buf_addr;
1175 wr_ptr->bytes = len; /* We don't know this do we ?? */
1176 wr_ptr->mode = DMA_INT_ENABLE;
1177
1178 /* shud take care of wrap around here too. */
1179 if(++wr_ptr == iadev->rx_dle_q.end)
1180 wr_ptr = iadev->rx_dle_q.start;
1181 iadev->rx_dle_q.write = wr_ptr;
1182 udelay(1);
1183 /* Increment transaction counter */
1184 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1185out: return 0;
1186out_free_desc:
1187 free_desc(dev, desc);
1188 goto out;
1189}
1190
1191static void rx_intr(struct atm_dev *dev)
1192{
1193 IADEV *iadev;
1194 u_short status;
1195 u_short state, i;
1196
1197 iadev = INPH_IA_DEV(dev);
1198 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1199 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1200 if (status & RX_PKT_RCVD)
1201 {
1202 /* do something */
1203 /* Basically recvd an interrupt for receving a packet.
1204 A descriptor would have been written to the packet complete
1205 queue. Get all the descriptors and set up dma to move the
1206 packets till the packet complete queue is empty..
1207 */
1208 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1209 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1210 while(!(state & PCQ_EMPTY))
1211 {
1212 rx_pkt(dev);
1213 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1214 }
1215 iadev->rxing = 1;
1216 }
1217 if (status & RX_FREEQ_EMPT)
1218 {
1219 if (iadev->rxing) {
1220 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1221 iadev->rx_tmp_jif = jiffies;
1222 iadev->rxing = 0;
1223 }
420635f5 1224 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1da177e4
LT
1225 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1226 for (i = 1; i <= iadev->num_rx_desc; i++)
1227 free_desc(dev, i);
1228printk("Test logic RUN!!!!\n");
1229 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1230 iadev->rxing = 1;
1231 }
1232 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1233 }
1234
1235 if (status & RX_EXCP_RCVD)
1236 {
1237 /* probably need to handle the exception queue also. */
1238 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1239 rx_excp_rcvd(dev);
1240 }
1241
1242
1243 if (status & RX_RAW_RCVD)
1244 {
1245 /* need to handle the raw incoming cells. This deepnds on
1246 whether we have programmed to receive the raw cells or not.
1247 Else ignore. */
1248 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1249 }
1250}
1251
1252
1253static void rx_dle_intr(struct atm_dev *dev)
1254{
1255 IADEV *iadev;
1256 struct atm_vcc *vcc;
1257 struct sk_buff *skb;
1258 int desc;
1259 u_short state;
1260 struct dle *dle, *cur_dle;
1261 u_int dle_lp;
1262 int len;
1263 iadev = INPH_IA_DEV(dev);
1264
1265 /* free all the dles done, that is just update our own dle read pointer
1266 - do we really need to do this. Think not. */
1267 /* DMA is done, just get all the recevie buffers from the rx dma queue
1268 and push them up to the higher layer protocol. Also free the desc
1269 associated with the buffer. */
1270 dle = iadev->rx_dle_q.read;
1271 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1272 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1273 while(dle != cur_dle)
1274 {
1275 /* free the DMAed skb */
1276 skb = skb_dequeue(&iadev->rx_dma_q);
1277 if (!skb)
1278 goto INCR_DLE;
1279 desc = ATM_DESC(skb);
1280 free_desc(dev, desc);
1281
1282 if (!(len = skb->len))
1283 {
1284 printk("rx_dle_intr: skb len 0\n");
1285 dev_kfree_skb_any(skb);
1286 }
1287 else
1288 {
1289 struct cpcs_trailer *trailer;
1290 u_short length;
1291 struct ia_vcc *ia_vcc;
1292
1293 pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1294 len, PCI_DMA_FROMDEVICE);
1295 /* no VCC related housekeeping done as yet. lets see */
1296 vcc = ATM_SKB(skb)->vcc;
1297 if (!vcc) {
1298 printk("IA: null vcc\n");
1299 dev_kfree_skb_any(skb);
1300 goto INCR_DLE;
1301 }
1302 ia_vcc = INPH_IA_VCC(vcc);
1303 if (ia_vcc == NULL)
1304 {
1305 atomic_inc(&vcc->stats->rx_err);
1306 dev_kfree_skb_any(skb);
1307 atm_return(vcc, atm_guess_pdu2truesize(len));
1308 goto INCR_DLE;
1309 }
1310 // get real pkt length pwang_test
1311 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1312 skb->len - sizeof(*trailer));
1313 length = swap(trailer->length);
1314 if ((length > iadev->rx_buf_sz) || (length >
1315 (skb->len - sizeof(struct cpcs_trailer))))
1316 {
1317 atomic_inc(&vcc->stats->rx_err);
1318 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1319 length, skb->len);)
1320 dev_kfree_skb_any(skb);
1321 atm_return(vcc, atm_guess_pdu2truesize(len));
1322 goto INCR_DLE;
1323 }
1324 skb_trim(skb, length);
1325
1326 /* Display the packet */
1327 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1328 xdump(skb->data, skb->len, "RX: ");
1329 printk("\n");)
1330
1331 IF_RX(printk("rx_dle_intr: skb push");)
1332 vcc->push(vcc,skb);
1333 atomic_inc(&vcc->stats->rx);
1334 iadev->rx_pkt_cnt++;
1335 }
1336INCR_DLE:
1337 if (++dle == iadev->rx_dle_q.end)
1338 dle = iadev->rx_dle_q.start;
1339 }
1340 iadev->rx_dle_q.read = dle;
1341
1342 /* if the interrupts are masked because there were no free desc available,
1343 unmask them now. */
1344 if (!iadev->rxing) {
1345 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1346 if (!(state & FREEQ_EMPTY)) {
1347 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1348 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1349 iadev->reass_reg+REASS_MASK_REG);
1350 iadev->rxing++;
1351 }
1352 }
1353}
1354
1355
1356static int open_rx(struct atm_vcc *vcc)
1357{
1358 IADEV *iadev;
1359 u_short __iomem *vc_table;
1360 u_short __iomem *reass_ptr;
1361 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1362
1363 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1364 iadev = INPH_IA_DEV(vcc->dev);
1365 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1366 if (iadev->phy_type & FE_25MBIT_PHY) {
1367 printk("IA: ABR not support\n");
1368 return -EINVAL;
1369 }
1370 }
1371 /* Make only this VCI in the vc table valid and let all
1372 others be invalid entries */
1373 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1374 vc_table += vcc->vci;
1375 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1376
1377 *vc_table = vcc->vci << 6;
1378 /* Also keep a list of open rx vcs so that we can attach them with
1379 incoming PDUs later. */
1380 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1381 (vcc->qos.txtp.traffic_class == ATM_ABR))
1382 {
1383 srv_cls_param_t srv_p;
1384 init_abr_vc(iadev, &srv_p);
1385 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1386 }
1387 else { /* for UBR later may need to add CBR logic */
1388 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1389 reass_ptr += vcc->vci;
1390 *reass_ptr = NO_AAL5_PKT;
1391 }
1392
1393 if (iadev->rx_open[vcc->vci])
1394 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1395 vcc->dev->number, vcc->vci);
1396 iadev->rx_open[vcc->vci] = vcc;
1397 return 0;
1398}
1399
1400static int rx_init(struct atm_dev *dev)
1401{
1402 IADEV *iadev;
1403 struct rx_buf_desc __iomem *buf_desc_ptr;
1404 unsigned long rx_pkt_start = 0;
1405 void *dle_addr;
1406 struct abr_vc_table *abr_vc_table;
1407 u16 *vc_table;
1408 u16 *reass_table;
1409 u16 *ptr16;
1410 int i,j, vcsize_sel;
1411 u_short freeq_st_adr;
1412 u_short *freeq_start;
1413
1414 iadev = INPH_IA_DEV(dev);
1415 // spin_lock_init(&iadev->rx_lock);
1416
1417 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1418 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1419 &iadev->rx_dle_dma);
1420 if (!dle_addr) {
1421 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1422 goto err_out;
1423 }
1424 iadev->rx_dle_q.start = (struct dle*)dle_addr;
1425 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1426 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1427 iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1428 /* the end of the dle q points to the entry after the last
1429 DLE that can be used. */
1430
1431 /* write the upper 20 bits of the start address to rx list address register */
1432 writel(iadev->rx_dle_dma & 0xfffff000,
1433 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1434 IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1435 (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR),
1436 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1437 printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1438 (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR),
1439 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1440
1441 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1442 writew(0, iadev->reass_reg+MODE_REG);
1443 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1444
1445 /* Receive side control memory map
1446 -------------------------------
1447
1448 Buffer descr 0x0000 (736 - 23K)
1449 VP Table 0x5c00 (256 - 512)
1450 Except q 0x5e00 (128 - 512)
1451 Free buffer q 0x6000 (1K - 2K)
1452 Packet comp q 0x6800 (1K - 2K)
1453 Reass Table 0x7000 (1K - 2K)
1454 VC Table 0x7800 (1K - 2K)
1455 ABR VC Table 0x8000 (1K - 32K)
1456 */
1457
1458 /* Base address for Buffer Descriptor Table */
1459 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1460 /* Set the buffer size register */
1461 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1462
1463 /* Initialize each entry in the Buffer Descriptor Table */
1464 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1465 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1466 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1467 buf_desc_ptr++;
1468 rx_pkt_start = iadev->rx_pkt_ram;
1469 for(i=1; i<=iadev->num_rx_desc; i++)
1470 {
1471 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1472 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1473 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1474 buf_desc_ptr++;
1475 rx_pkt_start += iadev->rx_buf_sz;
1476 }
1477 IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)
1478 i = FREE_BUF_DESC_Q*iadev->memSize;
1479 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1480 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1481 writew(i+iadev->num_rx_desc*sizeof(u_short),
1482 iadev->reass_reg+FREEQ_ED_ADR);
1483 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1484 writew(i+iadev->num_rx_desc*sizeof(u_short),
1485 iadev->reass_reg+FREEQ_WR_PTR);
1486 /* Fill the FREEQ with all the free descriptors. */
1487 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1488 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1489 for(i=1; i<=iadev->num_rx_desc; i++)
1490 {
1491 *freeq_start = (u_short)i;
1492 freeq_start++;
1493 }
1494 IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)
1495 /* Packet Complete Queue */
1496 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1497 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1498 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1499 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1500 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1501
1502 /* Exception Queue */
1503 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1504 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1505 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1506 iadev->reass_reg+EXCP_Q_ED_ADR);
1507 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1508 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1509
1510 /* Load local copy of FREEQ and PCQ ptrs */
1511 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1512 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1513 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1514 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1515 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1516 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1517 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1518 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1519
1520 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1521 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1522 iadev->rfL.pcq_wr);)
1523 /* just for check - no VP TBL */
1524 /* VP Table */
1525 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1526 /* initialize VP Table for invalid VPIs
1527 - I guess we can write all 1s or 0x000f in the entire memory
1528 space or something similar.
1529 */
1530
1531 /* This seems to work and looks right to me too !!! */
1532 i = REASS_TABLE * iadev->memSize;
1533 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1534 /* initialize Reassembly table to I don't know what ???? */
1535 reass_table = (u16 *)(iadev->reass_ram+i);
1536 j = REASS_TABLE_SZ * iadev->memSize;
1537 for(i=0; i < j; i++)
1538 *reass_table++ = NO_AAL5_PKT;
1539 i = 8*1024;
1540 vcsize_sel = 0;
1541 while (i != iadev->num_vc) {
1542 i /= 2;
1543 vcsize_sel++;
1544 }
1545 i = RX_VC_TABLE * iadev->memSize;
1546 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1547 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1548 j = RX_VC_TABLE_SZ * iadev->memSize;
1549 for(i = 0; i < j; i++)
1550 {
1551 /* shift the reassembly pointer by 3 + lower 3 bits of
1552 vc_lkup_base register (=3 for 1K VCs) and the last byte
1553 is those low 3 bits.
1554 Shall program this later.
1555 */
1556 *vc_table = (i << 6) | 15; /* for invalid VCI */
1557 vc_table++;
1558 }
1559 /* ABR VC table */
1560 i = ABR_VC_TABLE * iadev->memSize;
1561 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1562
1563 i = ABR_VC_TABLE * iadev->memSize;
1564 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1565 j = REASS_TABLE_SZ * iadev->memSize;
1566 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1567 for(i = 0; i < j; i++) {
1568 abr_vc_table->rdf = 0x0003;
1569 abr_vc_table->air = 0x5eb1;
1570 abr_vc_table++;
1571 }
1572
1573 /* Initialize other registers */
1574
1575 /* VP Filter Register set for VC Reassembly only */
1576 writew(0xff00, iadev->reass_reg+VP_FILTER);
1577 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1578 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1579
1580 /* Packet Timeout Count related Registers :
1581 Set packet timeout to occur in about 3 seconds
1582 Set Packet Aging Interval count register to overflow in about 4 us
1583 */
1584 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1585 ptr16 = (u16*)j;
1586 i = ((u32)ptr16 >> 6) & 0xff;
1587 ptr16 += j - 1;
1588 i |=(((u32)ptr16 << 2) & 0xff00);
1589 writew(i, iadev->reass_reg+TMOUT_RANGE);
1590 /* initiate the desc_tble */
1591 for(i=0; i<iadev->num_tx_desc;i++)
1592 iadev->desc_tbl[i].timestamp = 0;
1593
1594 /* to clear the interrupt status register - read it */
1595 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1596
1597 /* Mask Register - clear it */
1598 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1599
1600 skb_queue_head_init(&iadev->rx_dma_q);
1601 iadev->rx_free_desc_qhead = NULL;
f7141761
MK
1602
1603 iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1604 if (!iadev->rx_open) {
1da177e4
LT
1605 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1606 dev->number);
1607 goto err_free_dle;
1608 }
f7141761 1609
1da177e4
LT
1610 iadev->rxing = 1;
1611 iadev->rx_pkt_cnt = 0;
1612 /* Mode Register */
1613 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1614 return 0;
1615
1616err_free_dle:
1617 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1618 iadev->rx_dle_dma);
1619err_out:
1620 return -ENOMEM;
1621}
1622
1623
1624/*
1625 The memory map suggested in appendix A and the coding for it.
1626 Keeping it around just in case we change our mind later.
1627
1628 Buffer descr 0x0000 (128 - 4K)
1629 UBR sched 0x1000 (1K - 4K)
1630 UBR Wait q 0x2000 (1K - 4K)
1631 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1632 (128 - 256) each
1633 extended VC 0x4000 (1K - 8K)
1634 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1635 CBR sched 0x7000 (as needed)
1636 VC table 0x8000 (1K - 32K)
1637*/
1638
1639static void tx_intr(struct atm_dev *dev)
1640{
1641 IADEV *iadev;
1642 unsigned short status;
1643 unsigned long flags;
1644
1645 iadev = INPH_IA_DEV(dev);
1646
1647 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1648 if (status & TRANSMIT_DONE){
1649
1650 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1651 spin_lock_irqsave(&iadev->tx_lock, flags);
1652 ia_tx_poll(iadev);
1653 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1654 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1655 if (iadev->close_pending)
1656 wake_up(&iadev->close_wait);
1657 }
1658 if (status & TCQ_NOT_EMPTY)
1659 {
1660 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1661 }
1662}
1663
1664static void tx_dle_intr(struct atm_dev *dev)
1665{
1666 IADEV *iadev;
1667 struct dle *dle, *cur_dle;
1668 struct sk_buff *skb;
1669 struct atm_vcc *vcc;
1670 struct ia_vcc *iavcc;
1671 u_int dle_lp;
1672 unsigned long flags;
1673
1674 iadev = INPH_IA_DEV(dev);
1675 spin_lock_irqsave(&iadev->tx_lock, flags);
1676 dle = iadev->tx_dle_q.read;
1677 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1678 (sizeof(struct dle)*DLE_ENTRIES - 1);
1679 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1680 while (dle != cur_dle)
1681 {
1682 /* free the DMAed skb */
1683 skb = skb_dequeue(&iadev->tx_dma_q);
1684 if (!skb) break;
1685
1686 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1687 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1688 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1689 PCI_DMA_TODEVICE);
1690 }
1691 vcc = ATM_SKB(skb)->vcc;
1692 if (!vcc) {
1693 printk("tx_dle_intr: vcc is null\n");
1694 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1695 dev_kfree_skb_any(skb);
1696
1697 return;
1698 }
1699 iavcc = INPH_IA_VCC(vcc);
1700 if (!iavcc) {
1701 printk("tx_dle_intr: iavcc is null\n");
1702 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1703 dev_kfree_skb_any(skb);
1704 return;
1705 }
1706 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1707 if ((vcc->pop) && (skb->len != 0))
1708 {
1709 vcc->pop(vcc, skb);
1710 }
1711 else {
1712 dev_kfree_skb_any(skb);
1713 }
1714 }
1715 else { /* Hold the rate-limited skb for flow control */
1716 IA_SKB_STATE(skb) |= IA_DLED;
1717 skb_queue_tail(&iavcc->txing_skb, skb);
1718 }
1719 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1720 if (++dle == iadev->tx_dle_q.end)
1721 dle = iadev->tx_dle_q.start;
1722 }
1723 iadev->tx_dle_q.read = dle;
1724 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1725}
1726
1727static int open_tx(struct atm_vcc *vcc)
1728{
1729 struct ia_vcc *ia_vcc;
1730 IADEV *iadev;
1731 struct main_vc *vc;
1732 struct ext_vc *evc;
1733 int ret;
1734 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1735 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1736 iadev = INPH_IA_DEV(vcc->dev);
1737
1738 if (iadev->phy_type & FE_25MBIT_PHY) {
1739 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1740 printk("IA: ABR not support\n");
1741 return -EINVAL;
1742 }
1743 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1744 printk("IA: CBR not support\n");
1745 return -EINVAL;
1746 }
1747 }
1748 ia_vcc = INPH_IA_VCC(vcc);
1749 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1750 if (vcc->qos.txtp.max_sdu >
1751 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1752 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1753 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1754 vcc->dev_data = NULL;
1755 kfree(ia_vcc);
1756 return -EINVAL;
1757 }
1758 ia_vcc->vc_desc_cnt = 0;
1759 ia_vcc->txing = 1;
1760
1761 /* find pcr */
1762 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1763 vcc->qos.txtp.pcr = iadev->LineRate;
1764 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1765 vcc->qos.txtp.pcr = iadev->LineRate;
1766 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1767 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1768 if (vcc->qos.txtp.pcr > iadev->LineRate)
1769 vcc->qos.txtp.pcr = iadev->LineRate;
1770 ia_vcc->pcr = vcc->qos.txtp.pcr;
1771
1772 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1773 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1774 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1775 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1776 if (ia_vcc->pcr < iadev->rate_limit)
1777 skb_queue_head_init (&ia_vcc->txing_skb);
1778 if (ia_vcc->pcr < iadev->rate_limit) {
1779 struct sock *sk = sk_atm(vcc);
1780
1781 if (vcc->qos.txtp.max_sdu != 0) {
1782 if (ia_vcc->pcr > 60000)
1783 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1784 else if (ia_vcc->pcr > 2000)
1785 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1786 else
1787 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1788 }
1789 else
1790 sk->sk_sndbuf = 24576;
1791 }
1792
1793 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1794 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1795 vc += vcc->vci;
1796 evc += vcc->vci;
1797 memset((caddr_t)vc, 0, sizeof(*vc));
1798 memset((caddr_t)evc, 0, sizeof(*evc));
1799
1800 /* store the most significant 4 bits of vci as the last 4 bits
1801 of first part of atm header.
1802 store the last 12 bits of vci as first 12 bits of the second
1803 part of the atm header.
1804 */
1805 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1806 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1807
1808 /* check the following for different traffic classes */
1809 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1810 {
1811 vc->type = UBR;
1812 vc->status = CRC_APPEND;
1813 vc->acr = cellrate_to_float(iadev->LineRate);
1814 if (vcc->qos.txtp.pcr > 0)
1815 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1816 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1817 vcc->qos.txtp.max_pcr,vc->acr);)
1818 }
1819 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1820 { srv_cls_param_t srv_p;
1821 IF_ABR(printk("Tx ABR VCC\n");)
1822 init_abr_vc(iadev, &srv_p);
1823 if (vcc->qos.txtp.pcr > 0)
1824 srv_p.pcr = vcc->qos.txtp.pcr;
1825 if (vcc->qos.txtp.min_pcr > 0) {
1826 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1827 if (tmpsum > iadev->LineRate)
1828 return -EBUSY;
1829 srv_p.mcr = vcc->qos.txtp.min_pcr;
1830 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1831 }
1832 else srv_p.mcr = 0;
1833 if (vcc->qos.txtp.icr)
1834 srv_p.icr = vcc->qos.txtp.icr;
1835 if (vcc->qos.txtp.tbe)
1836 srv_p.tbe = vcc->qos.txtp.tbe;
1837 if (vcc->qos.txtp.frtt)
1838 srv_p.frtt = vcc->qos.txtp.frtt;
1839 if (vcc->qos.txtp.rif)
1840 srv_p.rif = vcc->qos.txtp.rif;
1841 if (vcc->qos.txtp.rdf)
1842 srv_p.rdf = vcc->qos.txtp.rdf;
1843 if (vcc->qos.txtp.nrm_pres)
1844 srv_p.nrm = vcc->qos.txtp.nrm;
1845 if (vcc->qos.txtp.trm_pres)
1846 srv_p.trm = vcc->qos.txtp.trm;
1847 if (vcc->qos.txtp.adtf_pres)
1848 srv_p.adtf = vcc->qos.txtp.adtf;
1849 if (vcc->qos.txtp.cdf_pres)
1850 srv_p.cdf = vcc->qos.txtp.cdf;
1851 if (srv_p.icr > srv_p.pcr)
1852 srv_p.icr = srv_p.pcr;
1853 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1854 srv_p.pcr, srv_p.mcr);)
1855 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1856 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1857 if (iadev->phy_type & FE_25MBIT_PHY) {
1858 printk("IA: CBR not support\n");
1859 return -EINVAL;
1860 }
1861 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1862 IF_CBR(printk("PCR is not availble\n");)
1863 return -1;
1864 }
1865 vc->type = CBR;
1866 vc->status = CRC_APPEND;
1867 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1868 return ret;
1869 }
1870 }
1871 else
1872 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1873
1874 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1875 IF_EVENT(printk("ia open_tx returning \n");)
1876 return 0;
1877}
1878
1879
1880static int tx_init(struct atm_dev *dev)
1881{
1882 IADEV *iadev;
1883 struct tx_buf_desc *buf_desc_ptr;
1884 unsigned int tx_pkt_start;
1885 void *dle_addr;
1886 int i;
1887 u_short tcq_st_adr;
1888 u_short *tcq_start;
1889 u_short prq_st_adr;
1890 u_short *prq_start;
1891 struct main_vc *vc;
1892 struct ext_vc *evc;
1893 u_short tmp16;
1894 u32 vcsize_sel;
1895
1896 iadev = INPH_IA_DEV(dev);
1897 spin_lock_init(&iadev->tx_lock);
1898
1899 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1900 readw(iadev->seg_reg+SEG_MASK_REG));)
1901
1902 /* Allocate 4k (boundary aligned) bytes */
1903 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1904 &iadev->tx_dle_dma);
1905 if (!dle_addr) {
1906 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1907 goto err_out;
1908 }
1909 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1910 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1911 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1912 iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1913
1914 /* write the upper 20 bits of the start address to tx list address register */
1915 writel(iadev->tx_dle_dma & 0xfffff000,
1916 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1917 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1918 writew(0, iadev->seg_reg+MODE_REG_0);
1919 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1920 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1921 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1922 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1923
1924 /*
1925 Transmit side control memory map
1926 --------------------------------
1927 Buffer descr 0x0000 (128 - 4K)
1928 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1929 (512 - 1K) each
1930 TCQ - 4K, PRQ - 5K
1931 CBR Table 0x1800 (as needed) - 6K
1932 UBR Table 0x3000 (1K - 4K) - 12K
1933 UBR Wait queue 0x4000 (1K - 4K) - 16K
1934 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1935 ABR Tbl - 20K, ABR Wq - 22K
1936 extended VC 0x6000 (1K - 8K) - 24K
1937 VC Table 0x8000 (1K - 32K) - 32K
1938
1939 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1940 and Wait q, which can be allotted later.
1941 */
1942
1943 /* Buffer Descriptor Table Base address */
1944 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1945
1946 /* initialize each entry in the buffer descriptor table */
1947 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1948 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1949 buf_desc_ptr++;
1950 tx_pkt_start = TX_PACKET_RAM;
1951 for(i=1; i<=iadev->num_tx_desc; i++)
1952 {
1953 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1954 buf_desc_ptr->desc_mode = AAL5;
1955 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1956 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1957 buf_desc_ptr++;
1958 tx_pkt_start += iadev->tx_buf_sz;
1959 }
1960 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1961 if (!iadev->tx_buf) {
1962 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1963 goto err_free_dle;
1964 }
1965 for (i= 0; i< iadev->num_tx_desc; i++)
1966 {
1967 struct cpcs_trailer *cpcs;
1968
1969 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1970 if(!cpcs) {
1971 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1972 goto err_free_tx_bufs;
1973 }
1974 iadev->tx_buf[i].cpcs = cpcs;
1975 iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1976 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1977 }
1978 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1979 sizeof(struct desc_tbl_t), GFP_KERNEL);
1980 if (!iadev->desc_tbl) {
1981 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1982 goto err_free_all_tx_bufs;
1983 }
1984
1985 /* Communication Queues base address */
1986 i = TX_COMP_Q * iadev->memSize;
1987 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
1988
1989 /* Transmit Complete Queue */
1990 writew(i, iadev->seg_reg+TCQ_ST_ADR);
1991 writew(i, iadev->seg_reg+TCQ_RD_PTR);
1992 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
1993 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1994 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
1995 iadev->seg_reg+TCQ_ED_ADR);
1996 /* Fill the TCQ with all the free descriptors. */
1997 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
1998 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
1999 for(i=1; i<=iadev->num_tx_desc; i++)
2000 {
2001 *tcq_start = (u_short)i;
2002 tcq_start++;
2003 }
2004
2005 /* Packet Ready Queue */
2006 i = PKT_RDY_Q * iadev->memSize;
2007 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2008 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2009 iadev->seg_reg+PRQ_ED_ADR);
2010 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2011 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2012
2013 /* Load local copy of PRQ and TCQ ptrs */
2014 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2015 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2016 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2017
2018 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2019 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2020 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2021
2022 /* Just for safety initializing the queue to have desc 1 always */
2023 /* Fill the PRQ with all the free descriptors. */
2024 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2025 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2026 for(i=1; i<=iadev->num_tx_desc; i++)
2027 {
2028 *prq_start = (u_short)0; /* desc 1 in all entries */
2029 prq_start++;
2030 }
2031 /* CBR Table */
2032 IF_INIT(printk("Start CBR Init\n");)
2033#if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2034 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2035#else /* Charlie's logic is wrong ? */
2036 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2037 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2038 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2039#endif
2040
2041 IF_INIT(printk("value in register = 0x%x\n",
2042 readw(iadev->seg_reg+CBR_PTR_BASE));)
2043 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2044 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2045 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2046 readw(iadev->seg_reg+CBR_TAB_BEG));)
2047 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2048 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2049 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2050 IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2051 (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2052 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2053 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2054 readw(iadev->seg_reg+CBR_TAB_END+1));)
2055
2056 /* Initialize the CBR Schedualing Table */
2057 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2058 0, iadev->num_vc*6);
2059 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2060 iadev->CbrEntryPt = 0;
2061 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2062 iadev->NumEnabledCBR = 0;
2063
2064 /* UBR scheduling Table and wait queue */
2065 /* initialize all bytes of UBR scheduler table and wait queue to 0
2066 - SCHEDSZ is 1K (# of entries).
2067 - UBR Table size is 4K
2068 - UBR wait queue is 4K
2069 since the table and wait queues are contiguous, all the bytes
2070 can be initialized by one memeset.
2071 */
2072
2073 vcsize_sel = 0;
2074 i = 8*1024;
2075 while (i != iadev->num_vc) {
2076 i /= 2;
2077 vcsize_sel++;
2078 }
2079
2080 i = MAIN_VC_TABLE * iadev->memSize;
2081 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2082 i = EXT_VC_TABLE * iadev->memSize;
2083 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2084 i = UBR_SCHED_TABLE * iadev->memSize;
2085 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2086 i = UBR_WAIT_Q * iadev->memSize;
2087 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2088 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2089 0, iadev->num_vc*8);
2090 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2091 /* initialize all bytes of ABR scheduler table and wait queue to 0
2092 - SCHEDSZ is 1K (# of entries).
2093 - ABR Table size is 2K
2094 - ABR wait queue is 2K
2095 since the table and wait queues are contiguous, all the bytes
2096 can be intialized by one memeset.
2097 */
2098 i = ABR_SCHED_TABLE * iadev->memSize;
2099 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2100 i = ABR_WAIT_Q * iadev->memSize;
2101 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2102
2103 i = ABR_SCHED_TABLE*iadev->memSize;
2104 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2105 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2106 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2107 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2108 if (!iadev->testTable) {
2109 printk("Get freepage failed\n");
2110 goto err_free_desc_tbl;
2111 }
2112 for(i=0; i<iadev->num_vc; i++)
2113 {
2114 memset((caddr_t)vc, 0, sizeof(*vc));
2115 memset((caddr_t)evc, 0, sizeof(*evc));
2116 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2117 GFP_KERNEL);
2118 if (!iadev->testTable[i])
2119 goto err_free_test_tables;
2120 iadev->testTable[i]->lastTime = 0;
2121 iadev->testTable[i]->fract = 0;
2122 iadev->testTable[i]->vc_status = VC_UBR;
2123 vc++;
2124 evc++;
2125 }
2126
2127 /* Other Initialization */
2128
2129 /* Max Rate Register */
2130 if (iadev->phy_type & FE_25MBIT_PHY) {
2131 writew(RATE25, iadev->seg_reg+MAXRATE);
2132 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2133 }
2134 else {
2135 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2136 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2137 }
2138 /* Set Idle Header Reigisters to be sure */
2139 writew(0, iadev->seg_reg+IDLEHEADHI);
2140 writew(0, iadev->seg_reg+IDLEHEADLO);
2141
2142 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2143 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2144
2145 iadev->close_pending = 0;
2146 init_waitqueue_head(&iadev->close_wait);
2147 init_waitqueue_head(&iadev->timeout_wait);
2148 skb_queue_head_init(&iadev->tx_dma_q);
2149 ia_init_rtn_q(&iadev->tx_return_q);
2150
2151 /* RM Cell Protocol ID and Message Type */
2152 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2153 skb_queue_head_init (&iadev->tx_backlog);
2154
2155 /* Mode Register 1 */
2156 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2157
2158 /* Mode Register 0 */
2159 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2160
2161 /* Interrupt Status Register - read to clear */
2162 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2163
2164 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2165 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2166 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2167 iadev->tx_pkt_cnt = 0;
2168 iadev->rate_limit = iadev->LineRate / 3;
2169
2170 return 0;
2171
2172err_free_test_tables:
2173 while (--i >= 0)
2174 kfree(iadev->testTable[i]);
2175 kfree(iadev->testTable);
2176err_free_desc_tbl:
2177 kfree(iadev->desc_tbl);
2178err_free_all_tx_bufs:
2179 i = iadev->num_tx_desc;
2180err_free_tx_bufs:
2181 while (--i >= 0) {
2182 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2183
2184 pci_unmap_single(iadev->pci, desc->dma_addr,
2185 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2186 kfree(desc->cpcs);
2187 }
2188 kfree(iadev->tx_buf);
2189err_free_dle:
2190 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2191 iadev->tx_dle_dma);
2192err_out:
2193 return -ENOMEM;
2194}
2195
7d12e780 2196static irqreturn_t ia_int(int irq, void *dev_id)
1da177e4
LT
2197{
2198 struct atm_dev *dev;
2199 IADEV *iadev;
2200 unsigned int status;
2201 int handled = 0;
2202
2203 dev = dev_id;
2204 iadev = INPH_IA_DEV(dev);
2205 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2206 {
2207 handled = 1;
2208 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2209 if (status & STAT_REASSINT)
2210 {
2211 /* do something */
2212 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2213 rx_intr(dev);
2214 }
2215 if (status & STAT_DLERINT)
2216 {
2217 /* Clear this bit by writing a 1 to it. */
2218 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2219 rx_dle_intr(dev);
2220 }
2221 if (status & STAT_SEGINT)
2222 {
2223 /* do something */
2224 IF_EVENT(printk("IA: tx_intr \n");)
2225 tx_intr(dev);
2226 }
2227 if (status & STAT_DLETINT)
2228 {
2229 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2230 tx_dle_intr(dev);
2231 }
2232 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2233 {
2234 if (status & STAT_FEINT)
2235 IaFrontEndIntr(iadev);
2236 }
2237 }
2238 return IRQ_RETVAL(handled);
2239}
2240
2241
2242
2243/*----------------------------- entries --------------------------------*/
2244static int get_esi(struct atm_dev *dev)
2245{
2246 IADEV *iadev;
2247 int i;
2248 u32 mac1;
2249 u16 mac2;
2250
2251 iadev = INPH_IA_DEV(dev);
2252 mac1 = cpu_to_be32(le32_to_cpu(readl(
2253 iadev->reg+IPHASE5575_MAC1)));
2254 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2255 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2256 for (i=0; i<MAC1_LEN; i++)
2257 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2258
2259 for (i=0; i<MAC2_LEN; i++)
2260 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2261 return 0;
2262}
2263
2264static int reset_sar(struct atm_dev *dev)
2265{
2266 IADEV *iadev;
2267 int i, error = 1;
2268 unsigned int pci[64];
2269
2270 iadev = INPH_IA_DEV(dev);
2271 for(i=0; i<64; i++)
2272 if ((error = pci_read_config_dword(iadev->pci,
2273 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2274 return error;
2275 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2276 for(i=0; i<64; i++)
2277 if ((error = pci_write_config_dword(iadev->pci,
2278 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2279 return error;
2280 udelay(5);
2281 return 0;
2282}
2283
2284
249c14b5 2285static int __devinit ia_init(struct atm_dev *dev)
1da177e4
LT
2286{
2287 IADEV *iadev;
2288 unsigned long real_base;
2289 void __iomem *base;
2290 unsigned short command;
1da177e4
LT
2291 int error, i;
2292
2293 /* The device has been identified and registered. Now we read
2294 necessary configuration info like memory base address,
2295 interrupt number etc */
2296
2297 IF_INIT(printk(">ia_init\n");)
2298 dev->ci_range.vpi_bits = 0;
2299 dev->ci_range.vci_bits = NR_VCI_LD;
2300
2301 iadev = INPH_IA_DEV(dev);
2302 real_base = pci_resource_start (iadev->pci, 0);
2303 iadev->irq = iadev->pci->irq;
2304
44c10138
AK
2305 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2306 if (error) {
1da177e4
LT
2307 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2308 dev->number,error);
2309 return -EINVAL;
2310 }
2311 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
44c10138 2312 dev->number, iadev->pci->revision, real_base, iadev->irq);)
1da177e4
LT
2313
2314 /* find mapping size of board */
2315
2316 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2317
2318 if (iadev->pci_map_size == 0x100000){
2319 iadev->num_vc = 4096;
2320 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2321 iadev->memSize = 4;
2322 }
2323 else if (iadev->pci_map_size == 0x40000) {
2324 iadev->num_vc = 1024;
2325 iadev->memSize = 1;
2326 }
2327 else {
2328 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2329 return -EINVAL;
2330 }
2331 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2332
2333 /* enable bus mastering */
2334 pci_set_master(iadev->pci);
2335
2336 /*
2337 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2338 */
2339 udelay(10);
2340
2341 /* mapping the physical address to a virtual address in address space */
2342 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2343
2344 if (!base)
2345 {
2346 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2347 dev->number);
2348 return error;
2349 }
2350 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
44c10138 2351 dev->number, iadev->pci->revision, base, iadev->irq);)
1da177e4
LT
2352
2353 /* filling the iphase dev structure */
2354 iadev->mem = iadev->pci_map_size /2;
2355 iadev->real_base = real_base;
2356 iadev->base = base;
2357
2358 /* Bus Interface Control Registers */
2359 iadev->reg = base + REG_BASE;
2360 /* Segmentation Control Registers */
2361 iadev->seg_reg = base + SEG_BASE;
2362 /* Reassembly Control Registers */
2363 iadev->reass_reg = base + REASS_BASE;
2364 /* Front end/ DMA control registers */
2365 iadev->phy = base + PHY_BASE;
2366 iadev->dma = base + PHY_BASE;
2367 /* RAM - Segmentation RAm and Reassembly RAM */
2368 iadev->ram = base + ACTUAL_RAM_BASE;
2369 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2370 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2371
2372 /* lets print out the above */
2373 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2374 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2375 iadev->phy, iadev->ram, iadev->seg_ram,
2376 iadev->reass_ram);)
2377
2378 /* lets try reading the MAC address */
2379 error = get_esi(dev);
2380 if (error) {
2381 iounmap(iadev->base);
2382 return error;
2383 }
2384 printk("IA: ");
2385 for (i=0; i < ESI_LEN; i++)
2386 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2387 printk("\n");
2388
2389 /* reset SAR */
2390 if (reset_sar(dev)) {
2391 iounmap(iadev->base);
2392 printk("IA: reset SAR fail, please try again\n");
2393 return 1;
2394 }
2395 return 0;
2396}
2397
2398static void ia_update_stats(IADEV *iadev) {
2399 if (!iadev->carrier_detect)
2400 return;
2401 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2402 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2403 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2404 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2405 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2406 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2407 return;
2408}
2409
2410static void ia_led_timer(unsigned long arg) {
2411 unsigned long flags;
2412 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2413 u_char i;
2414 static u32 ctrl_reg;
2415 for (i = 0; i < iadev_count; i++) {
2416 if (ia_dev[i]) {
2417 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2418 if (blinking[i] == 0) {
2419 blinking[i]++;
2420 ctrl_reg &= (~CTRL_LED);
2421 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2422 ia_update_stats(ia_dev[i]);
2423 }
2424 else {
2425 blinking[i] = 0;
2426 ctrl_reg |= CTRL_LED;
2427 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2428 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2429 if (ia_dev[i]->close_pending)
2430 wake_up(&ia_dev[i]->close_wait);
2431 ia_tx_poll(ia_dev[i]);
2432 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2433 }
2434 }
2435 }
2436 mod_timer(&ia_timer, jiffies + HZ / 4);
2437 return;
2438}
2439
2440static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2441 unsigned long addr)
2442{
2443 writel(value, INPH_IA_DEV(dev)->phy+addr);
2444}
2445
2446static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2447{
2448 return readl(INPH_IA_DEV(dev)->phy+addr);
2449}
2450
2451static void ia_free_tx(IADEV *iadev)
2452{
2453 int i;
2454
2455 kfree(iadev->desc_tbl);
2456 for (i = 0; i < iadev->num_vc; i++)
2457 kfree(iadev->testTable[i]);
2458 kfree(iadev->testTable);
2459 for (i = 0; i < iadev->num_tx_desc; i++) {
2460 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2461
2462 pci_unmap_single(iadev->pci, desc->dma_addr,
2463 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2464 kfree(desc->cpcs);
2465 }
2466 kfree(iadev->tx_buf);
2467 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2468 iadev->tx_dle_dma);
2469}
2470
2471static void ia_free_rx(IADEV *iadev)
2472{
2473 kfree(iadev->rx_open);
2474 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2475 iadev->rx_dle_dma);
2476}
2477
249c14b5 2478static int __devinit ia_start(struct atm_dev *dev)
1da177e4
LT
2479{
2480 IADEV *iadev;
2481 int error;
2482 unsigned char phy;
2483 u32 ctrl_reg;
2484 IF_EVENT(printk(">ia_start\n");)
2485 iadev = INPH_IA_DEV(dev);
dace1453 2486 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
1da177e4
LT
2487 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2488 dev->number, iadev->irq);
2489 error = -EAGAIN;
2490 goto err_out;
2491 }
2492 /* @@@ should release IRQ on error */
2493 /* enabling memory + master */
2494 if ((error = pci_write_config_word(iadev->pci,
2495 PCI_COMMAND,
2496 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2497 {
2498 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2499 "master (0x%x)\n",dev->number, error);
2500 error = -EIO;
2501 goto err_free_irq;
2502 }
2503 udelay(10);
2504
2505 /* Maybe we should reset the front end, initialize Bus Interface Control
2506 Registers and see. */
2507
2508 IF_INIT(printk("Bus ctrl reg: %08x\n",
2509 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2510 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2511 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2512 | CTRL_B8
2513 | CTRL_B16
2514 | CTRL_B32
2515 | CTRL_B48
2516 | CTRL_B64
2517 | CTRL_B128
2518 | CTRL_ERRMASK
2519 | CTRL_DLETMASK /* shud be removed l8r */
2520 | CTRL_DLERMASK
2521 | CTRL_SEGMASK
2522 | CTRL_REASSMASK
2523 | CTRL_FEMASK
2524 | CTRL_CSPREEMPT;
2525
2526 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2527
2528 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2529 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2530 printk("Bus status reg after init: %08x\n",
2531 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2532
2533 ia_hw_type(iadev);
2534 error = tx_init(dev);
2535 if (error)
2536 goto err_free_irq;
2537 error = rx_init(dev);
2538 if (error)
2539 goto err_free_tx;
2540
2541 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2542 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2543 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2544 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2545 phy = 0; /* resolve compiler complaint */
2546 IF_INIT (
2547 if ((phy=ia_phy_get(dev,0)) == 0x30)
2548 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2549 else
2550 printk("IA: utopia,rev.%0x\n",phy);)
2551
2552 if (iadev->phy_type & FE_25MBIT_PHY)
2553 ia_mb25_init(iadev);
2554 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2555 ia_suni_pm7345_init(iadev);
2556 else {
2557 error = suni_init(dev);
2558 if (error)
2559 goto err_free_rx;
d6c1d704
JBD
2560 if (dev->phy->start) {
2561 error = dev->phy->start(dev);
2562 if (error)
2563 goto err_free_rx;
2564 }
1da177e4
LT
2565 /* Get iadev->carrier_detect status */
2566 IaFrontEndIntr(iadev);
2567 }
2568 return 0;
2569
2570err_free_rx:
2571 ia_free_rx(iadev);
2572err_free_tx:
2573 ia_free_tx(iadev);
2574err_free_irq:
2575 free_irq(iadev->irq, dev);
2576err_out:
2577 return error;
2578}
2579
2580static void ia_close(struct atm_vcc *vcc)
2581{
2582 DEFINE_WAIT(wait);
2583 u16 *vc_table;
2584 IADEV *iadev;
2585 struct ia_vcc *ia_vcc;
2586 struct sk_buff *skb = NULL;
2587 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2588 unsigned long closetime, flags;
2589
2590 iadev = INPH_IA_DEV(vcc->dev);
2591 ia_vcc = INPH_IA_VCC(vcc);
2592 if (!ia_vcc) return;
2593
2594 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2595 ia_vcc->vc_desc_cnt,vcc->vci);)
2596 clear_bit(ATM_VF_READY,&vcc->flags);
2597 skb_queue_head_init (&tmp_tx_backlog);
2598 skb_queue_head_init (&tmp_vcc_backlog);
2599 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2600 iadev->close_pending++;
2601 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2602 schedule_timeout(50);
2603 finish_wait(&iadev->timeout_wait, &wait);
2604 spin_lock_irqsave(&iadev->tx_lock, flags);
2605 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2606 if (ATM_SKB(skb)->vcc == vcc){
2607 if (vcc->pop) vcc->pop(vcc, skb);
2608 else dev_kfree_skb_any(skb);
2609 }
2610 else
2611 skb_queue_tail(&tmp_tx_backlog, skb);
2612 }
2613 while((skb = skb_dequeue(&tmp_tx_backlog)))
2614 skb_queue_tail(&iadev->tx_backlog, skb);
2615 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2616 closetime = 300000 / ia_vcc->pcr;
2617 if (closetime == 0)
2618 closetime = 1;
2619 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2620 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2621 spin_lock_irqsave(&iadev->tx_lock, flags);
2622 iadev->close_pending--;
2623 iadev->testTable[vcc->vci]->lastTime = 0;
2624 iadev->testTable[vcc->vci]->fract = 0;
2625 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2626 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2627 if (vcc->qos.txtp.min_pcr > 0)
2628 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2629 }
2630 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2631 ia_vcc = INPH_IA_VCC(vcc);
2632 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2633 ia_cbrVc_close (vcc);
2634 }
2635 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2636 }
2637
2638 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2639 // reset reass table
2640 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2641 vc_table += vcc->vci;
2642 *vc_table = NO_AAL5_PKT;
2643 // reset vc table
2644 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2645 vc_table += vcc->vci;
2646 *vc_table = (vcc->vci << 6) | 15;
2647 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2648 struct abr_vc_table __iomem *abr_vc_table =
2649 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2650 abr_vc_table += vcc->vci;
2651 abr_vc_table->rdf = 0x0003;
2652 abr_vc_table->air = 0x5eb1;
2653 }
2654 // Drain the packets
2655 rx_dle_intr(vcc->dev);
2656 iadev->rx_open[vcc->vci] = NULL;
2657 }
2658 kfree(INPH_IA_VCC(vcc));
2659 ia_vcc = NULL;
2660 vcc->dev_data = NULL;
2661 clear_bit(ATM_VF_ADDR,&vcc->flags);
2662 return;
2663}
2664
2665static int ia_open(struct atm_vcc *vcc)
2666{
2667 IADEV *iadev;
2668 struct ia_vcc *ia_vcc;
2669 int error;
2670 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2671 {
2672 IF_EVENT(printk("ia: not partially allocated resources\n");)
2673 vcc->dev_data = NULL;
2674 }
2675 iadev = INPH_IA_DEV(vcc->dev);
2676 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2677 {
2678 IF_EVENT(printk("iphase open: unspec part\n");)
2679 set_bit(ATM_VF_ADDR,&vcc->flags);
2680 }
2681 if (vcc->qos.aal != ATM_AAL5)
2682 return -EINVAL;
2683 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2684 vcc->dev->number, vcc->vpi, vcc->vci);)
2685
2686 /* Device dependent initialization */
2687 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2688 if (!ia_vcc) return -ENOMEM;
2689 vcc->dev_data = ia_vcc;
2690
2691 if ((error = open_rx(vcc)))
2692 {
2693 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2694 ia_close(vcc);
2695 return error;
2696 }
2697
2698 if ((error = open_tx(vcc)))
2699 {
2700 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2701 ia_close(vcc);
2702 return error;
2703 }
2704
2705 set_bit(ATM_VF_READY,&vcc->flags);
2706
2707#if 0
2708 {
2709 static u8 first = 1;
2710 if (first) {
2711 ia_timer.expires = jiffies + 3*HZ;
2712 add_timer(&ia_timer);
2713 first = 0;
2714 }
2715 }
2716#endif
2717 IF_EVENT(printk("ia open returning\n");)
2718 return 0;
2719}
2720
2721static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2722{
2723 IF_EVENT(printk(">ia_change_qos\n");)
2724 return 0;
2725}
2726
2727static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2728{
2729 IA_CMDBUF ia_cmds;
2730 IADEV *iadev;
2731 int i, board;
2732 u16 __user *tmps;
2733 IF_EVENT(printk(">ia_ioctl\n");)
2734 if (cmd != IA_CMD) {
2735 if (!dev->phy->ioctl) return -EINVAL;
2736 return dev->phy->ioctl(dev,cmd,arg);
2737 }
2738 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2739 board = ia_cmds.status;
2740 if ((board < 0) || (board > iadev_count))
2741 board = 0;
2742 iadev = ia_dev[board];
2743 switch (ia_cmds.cmd) {
2744 case MEMDUMP:
2745 {
2746 switch (ia_cmds.sub_cmd) {
2747 case MEMDUMP_DEV:
2748 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2749 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2750 return -EFAULT;
2751 ia_cmds.status = 0;
2752 break;
2753 case MEMDUMP_SEGREG:
2754 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2755 tmps = (u16 __user *)ia_cmds.buf;
2756 for(i=0; i<0x80; i+=2, tmps++)
2757 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2758 ia_cmds.status = 0;
2759 ia_cmds.len = 0x80;
2760 break;
2761 case MEMDUMP_REASSREG:
2762 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2763 tmps = (u16 __user *)ia_cmds.buf;
2764 for(i=0; i<0x80; i+=2, tmps++)
2765 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2766 ia_cmds.status = 0;
2767 ia_cmds.len = 0x80;
2768 break;
2769 case MEMDUMP_FFL:
2770 {
2771 ia_regs_t *regs_local;
2772 ffredn_t *ffL;
2773 rfredn_t *rfL;
2774
2775 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2776 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2777 if (!regs_local) return -ENOMEM;
2778 ffL = &regs_local->ffredn;
2779 rfL = &regs_local->rfredn;
2780 /* Copy real rfred registers into the local copy */
2781 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2782 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2783 /* Copy real ffred registers into the local copy */
2784 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2785 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2786
2787 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2788 kfree(regs_local);
2789 return -EFAULT;
2790 }
2791 kfree(regs_local);
2792 printk("Board %d registers dumped\n", board);
2793 ia_cmds.status = 0;
2794 }
2795 break;
2796 case READ_REG:
2797 {
2798 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2799 desc_dbg(iadev);
2800 ia_cmds.status = 0;
2801 }
2802 break;
2803 case 0x6:
2804 {
2805 ia_cmds.status = 0;
2806 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2807 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2808 }
2809 break;
2810 case 0x8:
2811 {
2812 struct k_sonet_stats *stats;
2813 stats = &PRIV(_ia_dev[board])->sonet_stats;
2814 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2815 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2816 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2817 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2818 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2819 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2820 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2821 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2822 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2823 }
2824 ia_cmds.status = 0;
2825 break;
2826 case 0x9:
2827 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2828 for (i = 1; i <= iadev->num_rx_desc; i++)
2829 free_desc(_ia_dev[board], i);
2830 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2831 iadev->reass_reg+REASS_MASK_REG);
2832 iadev->rxing = 1;
2833
2834 ia_cmds.status = 0;
2835 break;
2836
2837 case 0xb:
2838 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2839 IaFrontEndIntr(iadev);
2840 break;
2841 case 0xa:
2842 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2843 {
2844 ia_cmds.status = 0;
2845 IADebugFlag = ia_cmds.maddr;
2846 printk("New debug option loaded\n");
2847 }
2848 break;
2849 default:
2850 ia_cmds.status = 0;
2851 break;
2852 }
2853 }
2854 break;
2855 default:
2856 break;
2857
2858 }
2859 return 0;
2860}
2861
2862static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2863 void __user *optval, int optlen)
2864{
2865 IF_EVENT(printk(">ia_getsockopt\n");)
2866 return -EINVAL;
2867}
2868
2869static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2870 void __user *optval, int optlen)
2871{
2872 IF_EVENT(printk(">ia_setsockopt\n");)
2873 return -EINVAL;
2874}
2875
2876static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2877 IADEV *iadev;
2878 struct dle *wr_ptr;
2879 struct tx_buf_desc __iomem *buf_desc_ptr;
2880 int desc;
2881 int comp_code;
2882 int total_len;
2883 struct cpcs_trailer *trailer;
2884 struct ia_vcc *iavcc;
2885
2886 iadev = INPH_IA_DEV(vcc->dev);
2887 iavcc = INPH_IA_VCC(vcc);
2888 if (!iavcc->txing) {
2889 printk("discard packet on closed VC\n");
2890 if (vcc->pop)
2891 vcc->pop(vcc, skb);
2892 else
2893 dev_kfree_skb_any(skb);
2894 return 0;
2895 }
2896
2897 if (skb->len > iadev->tx_buf_sz - 8) {
2898 printk("Transmit size over tx buffer size\n");
2899 if (vcc->pop)
2900 vcc->pop(vcc, skb);
2901 else
2902 dev_kfree_skb_any(skb);
2903 return 0;
2904 }
2905 if ((u32)skb->data & 3) {
2906 printk("Misaligned SKB\n");
2907 if (vcc->pop)
2908 vcc->pop(vcc, skb);
2909 else
2910 dev_kfree_skb_any(skb);
2911 return 0;
2912 }
2913 /* Get a descriptor number from our free descriptor queue
2914 We get the descr number from the TCQ now, since I am using
2915 the TCQ as a free buffer queue. Initially TCQ will be
2916 initialized with all the descriptors and is hence, full.
2917 */
2918 desc = get_desc (iadev, iavcc);
2919 if (desc == 0xffff)
2920 return 1;
2921 comp_code = desc >> 13;
2922 desc &= 0x1fff;
2923
2924 if ((desc == 0) || (desc > iadev->num_tx_desc))
2925 {
2926 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2927 atomic_inc(&vcc->stats->tx);
2928 if (vcc->pop)
2929 vcc->pop(vcc, skb);
2930 else
2931 dev_kfree_skb_any(skb);
2932 return 0; /* return SUCCESS */
2933 }
2934
2935 if (comp_code)
2936 {
2937 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2938 desc, comp_code);)
2939 }
2940
2941 /* remember the desc and vcc mapping */
2942 iavcc->vc_desc_cnt++;
2943 iadev->desc_tbl[desc-1].iavcc = iavcc;
2944 iadev->desc_tbl[desc-1].txskb = skb;
2945 IA_SKB_STATE(skb) = 0;
2946
2947 iadev->ffL.tcq_rd += 2;
2948 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2949 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2950 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2951
2952 /* Put the descriptor number in the packet ready queue
2953 and put the updated write pointer in the DLE field
2954 */
2955 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2956
2957 iadev->ffL.prq_wr += 2;
2958 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2959 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2960
2961 /* Figure out the exact length of the packet and padding required to
2962 make it aligned on a 48 byte boundary. */
2963 total_len = skb->len + sizeof(struct cpcs_trailer);
2964 total_len = ((total_len + 47) / 48) * 48;
2965 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2966
2967 /* Put the packet in a tx buffer */
2968 trailer = iadev->tx_buf[desc-1].cpcs;
2969 IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2970 (u32)skb, (u32)skb->data, skb->len, desc);)
2971 trailer->control = 0;
2972 /*big endian*/
2973 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2974 trailer->crc32 = 0; /* not needed - dummy bytes */
2975
2976 /* Display the packet */
2977 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2978 skb->len, tcnter++);
2979 xdump(skb->data, skb->len, "TX: ");
2980 printk("\n");)
2981
2982 /* Build the buffer descriptor */
2983 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2984 buf_desc_ptr += desc; /* points to the corresponding entry */
2985 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2986 /* Huh ? p.115 of users guide describes this as a read-only register */
2987 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2988 buf_desc_ptr->vc_index = vcc->vci;
2989 buf_desc_ptr->bytes = total_len;
2990
2991 if (vcc->qos.txtp.traffic_class == ATM_ABR)
2992 clear_lockup (vcc, iadev);
2993
2994 /* Build the DLE structure */
2995 wr_ptr = iadev->tx_dle_q.write;
2996 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
2997 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2998 skb->len, PCI_DMA_TODEVICE);
2999 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3000 buf_desc_ptr->buf_start_lo;
3001 /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */
3002 wr_ptr->bytes = skb->len;
3003
3004 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3005 if ((wr_ptr->bytes >> 2) == 0xb)
3006 wr_ptr->bytes = 0x30;
3007
3008 wr_ptr->mode = TX_DLE_PSI;
3009 wr_ptr->prq_wr_ptr_data = 0;
3010
3011 /* end is not to be used for the DLE q */
3012 if (++wr_ptr == iadev->tx_dle_q.end)
3013 wr_ptr = iadev->tx_dle_q.start;
3014
3015 /* Build trailer dle */
3016 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3017 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3018 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3019
3020 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3021 wr_ptr->mode = DMA_INT_ENABLE;
3022 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3023
3024 /* end is not to be used for the DLE q */
3025 if (++wr_ptr == iadev->tx_dle_q.end)
3026 wr_ptr = iadev->tx_dle_q.start;
3027
3028 iadev->tx_dle_q.write = wr_ptr;
3029 ATM_DESC(skb) = vcc->vci;
3030 skb_queue_tail(&iadev->tx_dma_q, skb);
3031
3032 atomic_inc(&vcc->stats->tx);
3033 iadev->tx_pkt_cnt++;
3034 /* Increment transaction counter */
3035 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3036
3037#if 0
3038 /* add flow control logic */
3039 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3040 if (iavcc->vc_desc_cnt > 10) {
3041 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3042 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3043 iavcc->flow_inc = -1;
3044 iavcc->saved_tx_quota = vcc->tx_quota;
3045 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3046 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3047 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3048 iavcc->flow_inc = 0;
3049 }
3050 }
3051#endif
3052 IF_TX(printk("ia send done\n");)
3053 return 0;
3054}
3055
3056static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3057{
3058 IADEV *iadev;
3059 struct ia_vcc *iavcc;
3060 unsigned long flags;
3061
3062 iadev = INPH_IA_DEV(vcc->dev);
3063 iavcc = INPH_IA_VCC(vcc);
3064 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3065 {
3066 if (!skb)
3067 printk(KERN_CRIT "null skb in ia_send\n");
3068 else dev_kfree_skb_any(skb);
3069 return -EINVAL;
3070 }
3071 spin_lock_irqsave(&iadev->tx_lock, flags);
3072 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3073 dev_kfree_skb_any(skb);
3074 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3075 return -EINVAL;
3076 }
3077 ATM_SKB(skb)->vcc = vcc;
3078
3079 if (skb_peek(&iadev->tx_backlog)) {
3080 skb_queue_tail(&iadev->tx_backlog, skb);
3081 }
3082 else {
3083 if (ia_pkt_tx (vcc, skb)) {
3084 skb_queue_tail(&iadev->tx_backlog, skb);
3085 }
3086 }
3087 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3088 return 0;
3089
3090}
3091
3092static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3093{
3094 int left = *pos, n;
3095 char *tmpPtr;
3096 IADEV *iadev = INPH_IA_DEV(dev);
3097 if(!left--) {
3098 if (iadev->phy_type == FE_25MBIT_PHY) {
3099 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3100 return n;
3101 }
3102 if (iadev->phy_type == FE_DS3_PHY)
3103 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3104 else if (iadev->phy_type == FE_E3_PHY)
3105 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3106 else if (iadev->phy_type == FE_UTP_OPTION)
3107 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3108 else
3109 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3110 tmpPtr = page + n;
3111 if (iadev->pci_map_size == 0x40000)
3112 n += sprintf(tmpPtr, "-1KVC-");
3113 else
3114 n += sprintf(tmpPtr, "-4KVC-");
3115 tmpPtr = page + n;
3116 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3117 n += sprintf(tmpPtr, "1M \n");
3118 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3119 n += sprintf(tmpPtr, "512K\n");
3120 else
3121 n += sprintf(tmpPtr, "128K\n");
3122 return n;
3123 }
3124 if (!left) {
3125 return sprintf(page, " Number of Tx Buffer: %u\n"
3126 " Size of Tx Buffer : %u\n"
3127 " Number of Rx Buffer: %u\n"
3128 " Size of Rx Buffer : %u\n"
3129 " Packets Receiverd : %u\n"
3130 " Packets Transmitted: %u\n"
3131 " Cells Received : %u\n"
3132 " Cells Transmitted : %u\n"
3133 " Board Dropped Cells: %u\n"
3134 " Board Dropped Pkts : %u\n",
3135 iadev->num_tx_desc, iadev->tx_buf_sz,
3136 iadev->num_rx_desc, iadev->rx_buf_sz,
3137 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3138 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3139 iadev->drop_rxcell, iadev->drop_rxpkt);
3140 }
3141 return 0;
3142}
3143
3144static const struct atmdev_ops ops = {
3145 .open = ia_open,
3146 .close = ia_close,
3147 .ioctl = ia_ioctl,
3148 .getsockopt = ia_getsockopt,
3149 .setsockopt = ia_setsockopt,
3150 .send = ia_send,
3151 .phy_put = ia_phy_put,
3152 .phy_get = ia_phy_get,
3153 .change_qos = ia_change_qos,
3154 .proc_read = ia_proc_read,
3155 .owner = THIS_MODULE,
3156};
3157
3158static int __devinit ia_init_one(struct pci_dev *pdev,
3159 const struct pci_device_id *ent)
3160{
3161 struct atm_dev *dev;
3162 IADEV *iadev;
3163 unsigned long flags;
3164 int ret;
3165
f7141761 3166 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
1da177e4
LT
3167 if (!iadev) {
3168 ret = -ENOMEM;
3169 goto err_out;
3170 }
f7141761 3171
1da177e4
LT
3172 iadev->pci = pdev;
3173
3174 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3175 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3176 if (pci_enable_device(pdev)) {
3177 ret = -ENODEV;
3178 goto err_out_free_iadev;
3179 }
3180 dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3181 if (!dev) {
3182 ret = -ENOMEM;
3183 goto err_out_disable_dev;
3184 }
3185 dev->dev_data = iadev;
3186 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3187 IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3188 iadev->LineRate);)
3189
c0ed0b60
JBD
3190 pci_set_drvdata(pdev, dev);
3191
1da177e4
LT
3192 ia_dev[iadev_count] = iadev;
3193 _ia_dev[iadev_count] = dev;
3194 iadev_count++;
3195 spin_lock_init(&iadev->misc_lock);
3196 /* First fixes first. I don't want to think about this now. */
3197 spin_lock_irqsave(&iadev->misc_lock, flags);
3198 if (ia_init(dev) || ia_start(dev)) {
3199 IF_INIT(printk("IA register failed!\n");)
3200 iadev_count--;
3201 ia_dev[iadev_count] = NULL;
3202 _ia_dev[iadev_count] = NULL;
3203 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3204 ret = -EINVAL;
3205 goto err_out_deregister_dev;
3206 }
3207 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3208 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3209
3210 iadev->next_board = ia_boards;
3211 ia_boards = dev;
3212
1da177e4
LT
3213 return 0;
3214
3215err_out_deregister_dev:
3216 atm_dev_deregister(dev);
3217err_out_disable_dev:
3218 pci_disable_device(pdev);
3219err_out_free_iadev:
3220 kfree(iadev);
3221err_out:
3222 return ret;
3223}
3224
3225static void __devexit ia_remove_one(struct pci_dev *pdev)
3226{
3227 struct atm_dev *dev = pci_get_drvdata(pdev);
3228 IADEV *iadev = INPH_IA_DEV(dev);
3229
d6c1d704
JBD
3230 /* Disable phy interrupts */
3231 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3232 SUNI_RSOP_CIE);
1da177e4
LT
3233 udelay(1);
3234
d6c1d704
JBD
3235 if (dev->phy && dev->phy->stop)
3236 dev->phy->stop(dev);
3237
1da177e4
LT
3238 /* De-register device */
3239 free_irq(iadev->irq, dev);
3240 iadev_count--;
3241 ia_dev[iadev_count] = NULL;
3242 _ia_dev[iadev_count] = NULL;
3243 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3244 atm_dev_deregister(dev);
3245
3246 iounmap(iadev->base);
3247 pci_disable_device(pdev);
3248
3249 ia_free_rx(iadev);
3250 ia_free_tx(iadev);
3251
3252 kfree(iadev);
3253}
3254
3255static struct pci_device_id ia_pci_tbl[] = {
3256 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3257 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3258 { 0,}
3259};
3260MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3261
3262static struct pci_driver ia_driver = {
3263 .name = DEV_LABEL,
3264 .id_table = ia_pci_tbl,
3265 .probe = ia_init_one,
3266 .remove = __devexit_p(ia_remove_one),
3267};
3268
3269static int __init ia_module_init(void)
3270{
3271 int ret;
3272
3273 ret = pci_register_driver(&ia_driver);
3274 if (ret >= 0) {
3275 ia_timer.expires = jiffies + 3*HZ;
3276 add_timer(&ia_timer);
3277 } else
3278 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3279 return ret;
3280}
3281
3282static void __exit ia_module_exit(void)
3283{
3284 pci_unregister_driver(&ia_driver);
3285
3286 del_timer(&ia_timer);
3287}
3288
3289module_init(ia_module_init);
3290module_exit(ia_module_exit);