]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/s390/net/qeth_main.c
[PATCH] s390: some more qeth fixes
[net-next-2.6.git] / drivers / s390 / net / qeth_main.c
CommitLineData
1da177e4
LT
1/*
2 *
d805d7c6 3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.238 $)
1da177e4
LT
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
11 * Rewritten by
12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com>
14 *
d805d7c6 15 * $Revision: 1.238 $ $Date: 2005/05/04 20:19:18 $
1da177e4
LT
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
1da177e4
LT
32
33#include <linux/config.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/string.h>
37#include <linux/errno.h>
38#include <linux/mm.h>
39#include <linux/ip.h>
40#include <linux/inetdevice.h>
41#include <linux/netdevice.h>
42#include <linux/sched.h>
43#include <linux/workqueue.h>
44#include <linux/kernel.h>
45#include <linux/slab.h>
46#include <linux/interrupt.h>
47#include <linux/tcp.h>
48#include <linux/icmp.h>
49#include <linux/skbuff.h>
50#include <linux/in.h>
51#include <linux/igmp.h>
52#include <linux/init.h>
53#include <linux/reboot.h>
54#include <linux/mii.h>
55#include <linux/rcupdate.h>
56#include <linux/ethtool.h>
57
58#include <net/arp.h>
59#include <net/ip.h>
60#include <net/route.h>
61
62#include <asm/ebcdic.h>
63#include <asm/io.h>
64#include <asm/qeth.h>
65#include <asm/timex.h>
66#include <asm/semaphore.h>
67#include <asm/uaccess.h>
68
69#include "qeth.h"
70#include "qeth_mpc.h"
71#include "qeth_fs.h"
72#include "qeth_eddp.h"
73#include "qeth_tso.h"
74
d805d7c6 75#define VERSION_QETH_C "$Revision: 1.238 $"
1da177e4
LT
76static const char *version = "qeth S/390 OSA-Express driver";
77
78/**
79 * Debug Facility Stuff
80 */
81static debug_info_t *qeth_dbf_setup = NULL;
82static debug_info_t *qeth_dbf_data = NULL;
83static debug_info_t *qeth_dbf_misc = NULL;
84static debug_info_t *qeth_dbf_control = NULL;
85debug_info_t *qeth_dbf_trace = NULL;
86static debug_info_t *qeth_dbf_sense = NULL;
87static debug_info_t *qeth_dbf_qerr = NULL;
88
89DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
90
91/**
92 * some more definitions and declarations
93 */
94static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
95
96/* list of our cards */
97struct qeth_card_list_struct qeth_card_list;
98/*process list want to be notified*/
99spinlock_t qeth_notify_lock;
100struct list_head qeth_notify_list;
101
102static void qeth_send_control_data_cb(struct qeth_channel *,
103 struct qeth_cmd_buffer *);
104
105/**
106 * here we go with function implementation
107 */
108static void
109qeth_init_qdio_info(struct qeth_card *card);
110
111static int
112qeth_init_qdio_queues(struct qeth_card *card);
113
114static int
115qeth_alloc_qdio_buffers(struct qeth_card *card);
116
117static void
118qeth_free_qdio_buffers(struct qeth_card *);
119
120static void
121qeth_clear_qdio_buffers(struct qeth_card *);
122
123static void
124qeth_clear_ip_list(struct qeth_card *, int, int);
125
126static void
127qeth_clear_ipacmd_list(struct qeth_card *);
128
129static int
130qeth_qdio_clear_card(struct qeth_card *, int);
131
132static void
133qeth_clear_working_pool_list(struct qeth_card *);
134
135static void
136qeth_clear_cmd_buffers(struct qeth_channel *);
137
138static int
139qeth_stop(struct net_device *);
140
141static void
142qeth_clear_ipato_list(struct qeth_card *);
143
144static int
145qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
146
147static void
148qeth_irq_tasklet(unsigned long);
149
150static int
151qeth_set_online(struct ccwgroup_device *);
152
05e08a2a
FP
153static int
154__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode);
155
1da177e4
LT
156static struct qeth_ipaddr *
157qeth_get_addr_buffer(enum qeth_prot_versions);
158
159static void
160qeth_set_multicast_list(struct net_device *);
161
162static void
163qeth_notify_processes(void)
164{
165 /*notify all registered processes */
166 struct qeth_notify_list_struct *n_entry;
167
168 QETH_DBF_TEXT(trace,3,"procnoti");
169 spin_lock(&qeth_notify_lock);
170 list_for_each_entry(n_entry, &qeth_notify_list, list) {
171 send_sig(n_entry->signum, n_entry->task, 1);
172 }
173 spin_unlock(&qeth_notify_lock);
174
175}
176int
177qeth_notifier_unregister(struct task_struct *p)
178{
179 struct qeth_notify_list_struct *n_entry, *tmp;
180
181 QETH_DBF_TEXT(trace, 2, "notunreg");
182 spin_lock(&qeth_notify_lock);
183 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
184 if (n_entry->task == p) {
185 list_del(&n_entry->list);
186 kfree(n_entry);
187 goto out;
188 }
189 }
190out:
191 spin_unlock(&qeth_notify_lock);
192 return 0;
193}
194int
195qeth_notifier_register(struct task_struct *p, int signum)
196{
197 struct qeth_notify_list_struct *n_entry;
198
1da177e4
LT
199 /*check first if entry already exists*/
200 spin_lock(&qeth_notify_lock);
201 list_for_each_entry(n_entry, &qeth_notify_list, list) {
202 if (n_entry->task == p) {
203 n_entry->signum = signum;
204 spin_unlock(&qeth_notify_lock);
205 return 0;
206 }
207 }
208 spin_unlock(&qeth_notify_lock);
209
210 n_entry = (struct qeth_notify_list_struct *)
211 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
212 if (!n_entry)
213 return -ENOMEM;
214 n_entry->task = p;
215 n_entry->signum = signum;
216 spin_lock(&qeth_notify_lock);
217 list_add(&n_entry->list,&qeth_notify_list);
218 spin_unlock(&qeth_notify_lock);
219 return 0;
220}
221
222
223/**
224 * free channel command buffers
225 */
226static void
227qeth_clean_channel(struct qeth_channel *channel)
228{
229 int cnt;
230
231 QETH_DBF_TEXT(setup, 2, "freech");
232 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
233 kfree(channel->iob[cnt].data);
234}
235
236/**
237 * free card
238 */
239static void
240qeth_free_card(struct qeth_card *card)
241{
242
243 QETH_DBF_TEXT(setup, 2, "freecrd");
244 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
245 qeth_clean_channel(&card->read);
246 qeth_clean_channel(&card->write);
247 if (card->dev)
248 free_netdev(card->dev);
249 qeth_clear_ip_list(card, 0, 0);
250 qeth_clear_ipato_list(card);
251 kfree(card->ip_tbd_list);
252 qeth_free_qdio_buffers(card);
253 kfree(card);
254}
255
256/**
257 * alloc memory for command buffer per channel
258 */
259static int
260qeth_setup_channel(struct qeth_channel *channel)
261{
262 int cnt;
263
264 QETH_DBF_TEXT(setup, 2, "setupch");
265 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
266 channel->iob[cnt].data = (char *)
267 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
268 if (channel->iob[cnt].data == NULL)
269 break;
270 channel->iob[cnt].state = BUF_STATE_FREE;
271 channel->iob[cnt].channel = channel;
272 channel->iob[cnt].callback = qeth_send_control_data_cb;
273 channel->iob[cnt].rc = 0;
274 }
275 if (cnt < QETH_CMD_BUFFER_NO) {
276 while (cnt-- > 0)
277 kfree(channel->iob[cnt].data);
278 return -ENOMEM;
279 }
280 channel->buf_no = 0;
281 channel->io_buf_no = 0;
282 atomic_set(&channel->irq_pending, 0);
283 spin_lock_init(&channel->iob_lock);
284
285 init_waitqueue_head(&channel->wait_q);
286 channel->irq_tasklet.data = (unsigned long) channel;
287 channel->irq_tasklet.func = qeth_irq_tasklet;
288 return 0;
289}
290
291/**
292 * alloc memory for card structure
293 */
294static struct qeth_card *
295qeth_alloc_card(void)
296{
297 struct qeth_card *card;
298
299 QETH_DBF_TEXT(setup, 2, "alloccrd");
300 card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
301 GFP_DMA|GFP_KERNEL);
302 if (!card)
303 return NULL;
304 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
305 memset(card, 0, sizeof(struct qeth_card));
306 if (qeth_setup_channel(&card->read)) {
307 kfree(card);
308 return NULL;
309 }
310 if (qeth_setup_channel(&card->write)) {
311 qeth_clean_channel(&card->read);
312 kfree(card);
313 return NULL;
314 }
315 return card;
316}
317
318static long
319__qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
320{
321 if (!IS_ERR(irb))
322 return 0;
323
324 switch (PTR_ERR(irb)) {
325 case -EIO:
326 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
327 QETH_DBF_TEXT(trace, 2, "ckirberr");
328 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
329 break;
330 case -ETIMEDOUT:
331 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
332 QETH_DBF_TEXT(trace, 2, "ckirberr");
333 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
334 break;
335 default:
336 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
337 cdev->dev.bus_id);
338 QETH_DBF_TEXT(trace, 2, "ckirberr");
339 QETH_DBF_TEXT(trace, 2, " rc???");
340 }
341 return PTR_ERR(irb);
342}
343
344static int
345qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
346{
347 int dstat,cstat;
348 char *sense;
349
350 sense = (char *) irb->ecw;
351 cstat = irb->scsw.cstat;
352 dstat = irb->scsw.dstat;
353
354 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
355 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
356 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
357 QETH_DBF_TEXT(trace,2, "CGENCHK");
358 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
359 cdev->dev.bus_id, dstat, cstat);
360 HEXDUMP16(WARN, "irb: ", irb);
361 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
362 return 1;
363 }
364
365 if (dstat & DEV_STAT_UNIT_CHECK) {
366 if (sense[SENSE_RESETTING_EVENT_BYTE] &
367 SENSE_RESETTING_EVENT_FLAG) {
368 QETH_DBF_TEXT(trace,2,"REVIND");
369 return 1;
370 }
371 if (sense[SENSE_COMMAND_REJECT_BYTE] &
372 SENSE_COMMAND_REJECT_FLAG) {
373 QETH_DBF_TEXT(trace,2,"CMDREJi");
374 return 0;
375 }
376 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
377 QETH_DBF_TEXT(trace,2,"AFFE");
378 return 1;
379 }
380 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
381 QETH_DBF_TEXT(trace,2,"ZEROSEN");
382 return 0;
383 }
384 QETH_DBF_TEXT(trace,2,"DGENCHK");
385 return 1;
386 }
387 return 0;
388}
389static int qeth_issue_next_read(struct qeth_card *);
390
391/**
392 * interrupt handler
393 */
394static void
395qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
396{
397 int rc;
398 int cstat,dstat;
399 struct qeth_cmd_buffer *buffer;
400 struct qeth_channel *channel;
401 struct qeth_card *card;
402
403 QETH_DBF_TEXT(trace,5,"irq");
404
405 if (__qeth_check_irb_error(cdev, irb))
406 return;
407 cstat = irb->scsw.cstat;
408 dstat = irb->scsw.dstat;
409
410 card = CARD_FROM_CDEV(cdev);
411 if (!card)
412 return;
413
414 if (card->read.ccwdev == cdev){
415 channel = &card->read;
416 QETH_DBF_TEXT(trace,5,"read");
417 } else if (card->write.ccwdev == cdev) {
418 channel = &card->write;
419 QETH_DBF_TEXT(trace,5,"write");
420 } else {
421 channel = &card->data;
422 QETH_DBF_TEXT(trace,5,"data");
423 }
424 atomic_set(&channel->irq_pending, 0);
425
426 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
427 channel->state = CH_STATE_STOPPED;
428
429 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
430 channel->state = CH_STATE_HALTED;
431
432 /*let's wake up immediately on data channel*/
433 if ((channel == &card->data) && (intparm != 0))
434 goto out;
435
436 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
437 QETH_DBF_TEXT(trace, 6, "clrchpar");
438 /* we don't have to handle this further */
439 intparm = 0;
440 }
441 if (intparm == QETH_HALT_CHANNEL_PARM) {
442 QETH_DBF_TEXT(trace, 6, "hltchpar");
443 /* we don't have to handle this further */
444 intparm = 0;
445 }
446 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
447 (dstat & DEV_STAT_UNIT_CHECK) ||
448 (cstat)) {
449 if (irb->esw.esw0.erw.cons) {
450 /* TODO: we should make this s390dbf */
451 PRINT_WARN("sense data available on channel %s.\n",
452 CHANNEL_ID(channel));
453 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
454 HEXDUMP16(WARN,"irb: ",irb);
455 HEXDUMP16(WARN,"sense data: ",irb->ecw);
456 }
457 rc = qeth_get_problem(cdev,irb);
458 if (rc) {
459 qeth_schedule_recovery(card);
460 goto out;
461 }
462 }
463
464 if (intparm) {
465 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
466 buffer->state = BUF_STATE_PROCESSED;
467 }
468 if (channel == &card->data)
469 return;
470
471 if (channel == &card->read &&
472 channel->state == CH_STATE_UP)
473 qeth_issue_next_read(card);
474
475 tasklet_schedule(&channel->irq_tasklet);
476 return;
477out:
478 wake_up(&card->wait_q);
479}
480
481/**
482 * tasklet function scheduled from irq handler
483 */
484static void
485qeth_irq_tasklet(unsigned long data)
486{
487 struct qeth_card *card;
488 struct qeth_channel *channel;
489 struct qeth_cmd_buffer *iob;
490 __u8 index;
491
492 QETH_DBF_TEXT(trace,5,"irqtlet");
493 channel = (struct qeth_channel *) data;
494 iob = channel->iob;
495 index = channel->buf_no;
496 card = CARD_FROM_CDEV(channel->ccwdev);
497 while (iob[index].state == BUF_STATE_PROCESSED) {
498 if (iob[index].callback !=NULL) {
499 iob[index].callback(channel,iob + index);
500 }
501 index = (index + 1) % QETH_CMD_BUFFER_NO;
502 }
503 channel->buf_no = index;
504 wake_up(&card->wait_q);
505}
506
05e08a2a 507static int qeth_stop_card(struct qeth_card *, int);
1da177e4
LT
508
509static int
05e08a2a 510__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
1da177e4
LT
511{
512 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
9123e0d7 513 int rc = 0, rc2 = 0, rc3 = 0;
1da177e4
LT
514 enum qeth_card_states recover_flag;
515
516 QETH_DBF_TEXT(setup, 3, "setoffl");
517 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
518
519 recover_flag = card->state;
05e08a2a 520 if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
1da177e4
LT
521 PRINT_WARN("Stopping card %s interrupted by user!\n",
522 CARD_BUS_ID(card));
523 return -ERESTARTSYS;
524 }
9123e0d7
UB
525 rc = ccw_device_set_offline(CARD_DDEV(card));
526 rc2 = ccw_device_set_offline(CARD_WDEV(card));
527 rc3 = ccw_device_set_offline(CARD_RDEV(card));
528 if (!rc)
529 rc = (rc2) ? rc2 : rc3;
530 if (rc)
1da177e4 531 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1da177e4
LT
532 if (recover_flag == CARD_STATE_UP)
533 card->state = CARD_STATE_RECOVER;
534 qeth_notify_processes();
535 return 0;
536}
537
05e08a2a
FP
538static int
539qeth_set_offline(struct ccwgroup_device *cgdev)
540{
541 return __qeth_set_offline(cgdev, 0);
542}
543
1da177e4
LT
544static int
545qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
546
547
548static void
549qeth_remove_device(struct ccwgroup_device *cgdev)
550{
551 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
552 unsigned long flags;
553
554 QETH_DBF_TEXT(setup, 3, "rmdev");
555 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
556
557 if (!card)
558 return;
559
560 if (qeth_wait_for_threads(card, 0xffffffff))
561 return;
562
563 if (cgdev->state == CCWGROUP_ONLINE){
564 card->use_hard_stop = 1;
565 qeth_set_offline(cgdev);
566 }
567 /* remove form our internal list */
568 write_lock_irqsave(&qeth_card_list.rwlock, flags);
569 list_del(&card->list);
570 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
571 if (card->dev)
572 unregister_netdev(card->dev);
573 qeth_remove_device_attributes(&cgdev->dev);
574 qeth_free_card(card);
575 cgdev->dev.driver_data = NULL;
576 put_device(&cgdev->dev);
577}
578
579static int
580qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
581static int
582qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
583
584/**
585 * Add/remove address to/from card's ip list, i.e. try to add or remove
586 * reference to/from an IP address that is already registered on the card.
587 * Returns:
588 * 0 address was on card and its reference count has been adjusted,
589 * but is still > 0, so nothing has to be done
590 * also returns 0 if card was not on card and the todo was to delete
591 * the address -> there is also nothing to be done
592 * 1 address was not on card and the todo is to add it to the card's ip
593 * list
594 * -1 address was on card and its reference count has been decremented
595 * to <= 0 by the todo -> address must be removed from card
596 */
597static int
598__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
599 struct qeth_ipaddr **__addr)
600{
601 struct qeth_ipaddr *addr;
602 int found = 0;
603
604 list_for_each_entry(addr, &card->ip_list, entry) {
6c88ad2d
FP
605 if (card->options.layer2) {
606 if ((addr->type == todo->type) &&
607 (memcmp(&addr->mac, &todo->mac,
608 OSA_ADDR_LEN) == 0)) {
609 found = 1;
610 break;
611 }
612 continue;
613 }
1da177e4
LT
614 if ((addr->proto == QETH_PROT_IPV4) &&
615 (todo->proto == QETH_PROT_IPV4) &&
616 (addr->type == todo->type) &&
617 (addr->u.a4.addr == todo->u.a4.addr) &&
6c88ad2d 618 (addr->u.a4.mask == todo->u.a4.mask)) {
1da177e4
LT
619 found = 1;
620 break;
621 }
622 if ((addr->proto == QETH_PROT_IPV6) &&
623 (todo->proto == QETH_PROT_IPV6) &&
624 (addr->type == todo->type) &&
625 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
626 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
6c88ad2d 627 sizeof(struct in6_addr)) == 0)) {
1da177e4
LT
628 found = 1;
629 break;
630 }
631 }
6c88ad2d 632 if (found) {
1da177e4
LT
633 addr->users += todo->users;
634 if (addr->users <= 0){
635 *__addr = addr;
636 return -1;
637 } else {
638 /* for VIPA and RXIP limit refcount to 1 */
639 if (addr->type != QETH_IP_TYPE_NORMAL)
640 addr->users = 1;
641 return 0;
642 }
643 }
6c88ad2d 644 if (todo->users > 0) {
1da177e4
LT
645 /* for VIPA and RXIP limit refcount to 1 */
646 if (todo->type != QETH_IP_TYPE_NORMAL)
647 todo->users = 1;
648 return 1;
649 } else
650 return 0;
651}
652
653static inline int
654__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
655 int same_type)
656{
657 struct qeth_ipaddr *tmp;
658
659 list_for_each_entry(tmp, list, entry) {
660 if ((tmp->proto == QETH_PROT_IPV4) &&
661 (addr->proto == QETH_PROT_IPV4) &&
662 ((same_type && (tmp->type == addr->type)) ||
663 (!same_type && (tmp->type != addr->type)) ) &&
664 (tmp->u.a4.addr == addr->u.a4.addr) ){
665 return 1;
666 }
667 if ((tmp->proto == QETH_PROT_IPV6) &&
668 (addr->proto == QETH_PROT_IPV6) &&
669 ((same_type && (tmp->type == addr->type)) ||
670 (!same_type && (tmp->type != addr->type)) ) &&
671 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
672 sizeof(struct in6_addr)) == 0) ) {
673 return 1;
674 }
675 }
676 return 0;
677}
678
679/*
680 * Add IP to be added to todo list. If there is already an "add todo"
681 * in this list we just incremenent the reference count.
682 * Returns 0 if we just incremented reference count.
683 */
684static int
685__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
686{
687 struct qeth_ipaddr *tmp, *t;
688 int found = 0;
689
690 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
691 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
692 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
693 return 0;
6c88ad2d
FP
694 if (card->options.layer2) {
695 if ((tmp->type == addr->type) &&
696 (tmp->is_multicast == addr->is_multicast) &&
697 (memcmp(&tmp->mac, &addr->mac,
698 OSA_ADDR_LEN) == 0)) {
699 found = 1;
700 break;
701 }
702 continue;
703 }
1da177e4
LT
704 if ((tmp->proto == QETH_PROT_IPV4) &&
705 (addr->proto == QETH_PROT_IPV4) &&
706 (tmp->type == addr->type) &&
707 (tmp->is_multicast == addr->is_multicast) &&
708 (tmp->u.a4.addr == addr->u.a4.addr) &&
6c88ad2d 709 (tmp->u.a4.mask == addr->u.a4.mask)) {
1da177e4
LT
710 found = 1;
711 break;
712 }
713 if ((tmp->proto == QETH_PROT_IPV6) &&
714 (addr->proto == QETH_PROT_IPV6) &&
715 (tmp->type == addr->type) &&
716 (tmp->is_multicast == addr->is_multicast) &&
717 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
718 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
6c88ad2d 719 sizeof(struct in6_addr)) == 0)) {
1da177e4
LT
720 found = 1;
721 break;
722 }
723 }
724 if (found){
725 if (addr->users != 0)
726 tmp->users += addr->users;
727 else
728 tmp->users += add? 1:-1;
6c88ad2d 729 if (tmp->users == 0) {
1da177e4
LT
730 list_del(&tmp->entry);
731 kfree(tmp);
732 }
733 return 0;
734 } else {
735 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
736 list_add(&addr->entry, card->ip_tbd_list);
737 else {
738 if (addr->users == 0)
739 addr->users += add? 1:-1;
740 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
741 qeth_is_addr_covered_by_ipato(card, addr)){
742 QETH_DBF_TEXT(trace, 2, "tkovaddr");
743 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
744 }
745 list_add_tail(&addr->entry, card->ip_tbd_list);
746 }
747 return 1;
748 }
749}
750
751/**
752 * Remove IP address from list
753 */
754static int
755qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
756{
757 unsigned long flags;
758 int rc = 0;
759
6c88ad2d
FP
760 QETH_DBF_TEXT(trace, 4, "delip");
761
762 if (card->options.layer2)
763 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
764 else if (addr->proto == QETH_PROT_IPV4)
765 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
1da177e4 766 else {
6c88ad2d
FP
767 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
768 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
1da177e4
LT
769 }
770 spin_lock_irqsave(&card->ip_lock, flags);
771 rc = __qeth_insert_ip_todo(card, addr, 0);
772 spin_unlock_irqrestore(&card->ip_lock, flags);
773 return rc;
774}
775
776static int
777qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
778{
779 unsigned long flags;
780 int rc = 0;
781
6c88ad2d
FP
782 QETH_DBF_TEXT(trace, 4, "addip");
783 if (card->options.layer2)
784 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
785 else if (addr->proto == QETH_PROT_IPV4)
786 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
1da177e4 787 else {
6c88ad2d
FP
788 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
789 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
1da177e4
LT
790 }
791 spin_lock_irqsave(&card->ip_lock, flags);
792 rc = __qeth_insert_ip_todo(card, addr, 1);
793 spin_unlock_irqrestore(&card->ip_lock, flags);
794 return rc;
795}
796
797static inline void
798__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
799{
800 struct qeth_ipaddr *addr, *tmp;
801 int rc;
d805d7c6 802again:
1da177e4
LT
803 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
804 if (addr->is_multicast) {
805 spin_unlock_irqrestore(&card->ip_lock, *flags);
806 rc = qeth_deregister_addr_entry(card, addr);
807 spin_lock_irqsave(&card->ip_lock, *flags);
808 if (!rc) {
809 list_del(&addr->entry);
810 kfree(addr);
d805d7c6 811 goto again;
1da177e4
LT
812 }
813 }
814 }
815}
816
817static void
818qeth_set_ip_addr_list(struct qeth_card *card)
819{
820 struct list_head *tbd_list;
821 struct qeth_ipaddr *todo, *addr;
822 unsigned long flags;
823 int rc;
824
825 QETH_DBF_TEXT(trace, 2, "sdiplist");
826 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
827
828 spin_lock_irqsave(&card->ip_lock, flags);
829 tbd_list = card->ip_tbd_list;
830 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
831 if (!card->ip_tbd_list) {
832 QETH_DBF_TEXT(trace, 0, "silnomem");
833 card->ip_tbd_list = tbd_list;
834 spin_unlock_irqrestore(&card->ip_lock, flags);
835 return;
836 } else
837 INIT_LIST_HEAD(card->ip_tbd_list);
838
839 while (!list_empty(tbd_list)){
840 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
841 list_del(&todo->entry);
842 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
843 __qeth_delete_all_mc(card, &flags);
844 kfree(todo);
845 continue;
846 }
847 rc = __qeth_ref_ip_on_card(card, todo, &addr);
848 if (rc == 0) {
849 /* nothing to be done; only adjusted refcount */
850 kfree(todo);
851 } else if (rc == 1) {
852 /* new entry to be added to on-card list */
853 spin_unlock_irqrestore(&card->ip_lock, flags);
854 rc = qeth_register_addr_entry(card, todo);
855 spin_lock_irqsave(&card->ip_lock, flags);
856 if (!rc)
857 list_add_tail(&todo->entry, &card->ip_list);
858 else
859 kfree(todo);
860 } else if (rc == -1) {
861 /* on-card entry to be removed */
862 list_del_init(&addr->entry);
863 spin_unlock_irqrestore(&card->ip_lock, flags);
864 rc = qeth_deregister_addr_entry(card, addr);
865 spin_lock_irqsave(&card->ip_lock, flags);
866 if (!rc)
867 kfree(addr);
868 else
869 list_add_tail(&addr->entry, &card->ip_list);
870 kfree(todo);
871 }
872 }
873 spin_unlock_irqrestore(&card->ip_lock, flags);
874 kfree(tbd_list);
875}
876
877static void qeth_delete_mc_addresses(struct qeth_card *);
878static void qeth_add_multicast_ipv4(struct qeth_card *);
6c88ad2d 879static void qeth_layer2_add_multicast(struct qeth_card *);
1da177e4
LT
880#ifdef CONFIG_QETH_IPV6
881static void qeth_add_multicast_ipv6(struct qeth_card *);
882#endif
883
884static inline int
885qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
886{
887 unsigned long flags;
888
889 spin_lock_irqsave(&card->thread_mask_lock, flags);
890 if ( !(card->thread_allowed_mask & thread) ||
891 (card->thread_start_mask & thread) ) {
892 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
893 return -EPERM;
894 }
895 card->thread_start_mask |= thread;
896 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
897 return 0;
898}
899
900static void
901qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
902{
903 unsigned long flags;
904
905 spin_lock_irqsave(&card->thread_mask_lock, flags);
906 card->thread_start_mask &= ~thread;
907 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
908 wake_up(&card->wait_q);
909}
910
911static void
912qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
913{
914 unsigned long flags;
915
916 spin_lock_irqsave(&card->thread_mask_lock, flags);
917 card->thread_running_mask &= ~thread;
918 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
919 wake_up(&card->wait_q);
920}
921
922static inline int
923__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
924{
925 unsigned long flags;
926 int rc = 0;
927
928 spin_lock_irqsave(&card->thread_mask_lock, flags);
929 if (card->thread_start_mask & thread){
930 if ((card->thread_allowed_mask & thread) &&
931 !(card->thread_running_mask & thread)){
932 rc = 1;
933 card->thread_start_mask &= ~thread;
934 card->thread_running_mask |= thread;
935 } else
936 rc = -EPERM;
937 }
938 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
939 return rc;
940}
941
942static int
943qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
944{
945 int rc = 0;
946
947 wait_event(card->wait_q,
948 (rc = __qeth_do_run_thread(card, thread)) >= 0);
949 return rc;
950}
951
952static int
953qeth_register_ip_addresses(void *ptr)
954{
955 struct qeth_card *card;
956
957 card = (struct qeth_card *) ptr;
958 daemonize("qeth_reg_ip");
959 QETH_DBF_TEXT(trace,4,"regipth1");
960 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
961 return 0;
962 QETH_DBF_TEXT(trace,4,"regipth2");
963 qeth_set_ip_addr_list(card);
964 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
965 return 0;
966}
967
968static int
969qeth_recover(void *ptr)
970{
971 struct qeth_card *card;
972 int rc = 0;
973
974 card = (struct qeth_card *) ptr;
975 daemonize("qeth_recover");
976 QETH_DBF_TEXT(trace,2,"recover1");
977 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
978 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
979 return 0;
980 QETH_DBF_TEXT(trace,2,"recover2");
981 PRINT_WARN("Recovery of device %s started ...\n",
982 CARD_BUS_ID(card));
983 card->use_hard_stop = 1;
05e08a2a
FP
984 __qeth_set_offline(card->gdev,1);
985 rc = __qeth_set_online(card->gdev,1);
1da177e4
LT
986 if (!rc)
987 PRINT_INFO("Device %s successfully recovered!\n",
988 CARD_BUS_ID(card));
989 else
990 PRINT_INFO("Device %s could not be recovered!\n",
991 CARD_BUS_ID(card));
992 /* don't run another scheduled recovery */
993 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
994 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
995 return 0;
996}
997
998void
999qeth_schedule_recovery(struct qeth_card *card)
1000{
1001 QETH_DBF_TEXT(trace,2,"startrec");
1002
1003 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
1004 schedule_work(&card->kernel_thread_starter);
1005}
1006
1007static int
1008qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1009{
1010 unsigned long flags;
1011 int rc = 0;
1012
1013 spin_lock_irqsave(&card->thread_mask_lock, flags);
1014 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
1015 (u8) card->thread_start_mask,
1016 (u8) card->thread_allowed_mask,
1017 (u8) card->thread_running_mask);
1018 rc = (card->thread_start_mask & thread);
1019 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1020 return rc;
1021}
1022
1023static void
1024qeth_start_kernel_thread(struct qeth_card *card)
1025{
1026 QETH_DBF_TEXT(trace , 2, "strthrd");
1027
1028 if (card->read.state != CH_STATE_UP &&
1029 card->write.state != CH_STATE_UP)
1030 return;
1031
1032 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
1033 kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
1034 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1035 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1036}
1037
1038
1039static void
1040qeth_set_intial_options(struct qeth_card *card)
1041{
1042 card->options.route4.type = NO_ROUTER;
1043#ifdef CONFIG_QETH_IPV6
1044 card->options.route6.type = NO_ROUTER;
1045#endif /* QETH_IPV6 */
1046 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1047 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1048 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1049 card->options.fake_broadcast = 0;
1050 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1051 card->options.fake_ll = 0;
500f83ab
UB
1052 if (card->info.type == QETH_CARD_TYPE_OSN)
1053 card->options.layer2 = 1;
1054 else
1055 card->options.layer2 = 0;
1da177e4
LT
1056}
1057
1058/**
1059 * initialize channels ,card and all state machines
1060 */
1061static int
1062qeth_setup_card(struct qeth_card *card)
1063{
1064
1065 QETH_DBF_TEXT(setup, 2, "setupcrd");
1066 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1067
1068 card->read.state = CH_STATE_DOWN;
1069 card->write.state = CH_STATE_DOWN;
1070 card->data.state = CH_STATE_DOWN;
1071 card->state = CARD_STATE_DOWN;
1072 card->lan_online = 0;
1073 card->use_hard_stop = 0;
1074 card->dev = NULL;
1075#ifdef CONFIG_QETH_VLAN
1076 spin_lock_init(&card->vlanlock);
1077 card->vlangrp = NULL;
1078#endif
9123e0d7 1079 spin_lock_init(&card->lock);
1da177e4
LT
1080 spin_lock_init(&card->ip_lock);
1081 spin_lock_init(&card->thread_mask_lock);
1082 card->thread_start_mask = 0;
1083 card->thread_allowed_mask = 0;
1084 card->thread_running_mask = 0;
1085 INIT_WORK(&card->kernel_thread_starter,
1086 (void *)qeth_start_kernel_thread,card);
1087 INIT_LIST_HEAD(&card->ip_list);
1088 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1089 if (!card->ip_tbd_list) {
1090 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1091 return -ENOMEM;
1092 }
1093 INIT_LIST_HEAD(card->ip_tbd_list);
1094 INIT_LIST_HEAD(&card->cmd_waiter_list);
1095 init_waitqueue_head(&card->wait_q);
1096 /* intial options */
1097 qeth_set_intial_options(card);
1098 /* IP address takeover */
1099 INIT_LIST_HEAD(&card->ipato.entries);
1100 card->ipato.enabled = 0;
1101 card->ipato.invert4 = 0;
1102 card->ipato.invert6 = 0;
1103 /* init QDIO stuff */
1104 qeth_init_qdio_info(card);
1105 return 0;
1106}
1107
1108static int
1109is_1920_device (struct qeth_card *card)
1110{
1111 int single_queue = 0;
1112 struct ccw_device *ccwdev;
1113 struct channelPath_dsc {
1114 u8 flags;
1115 u8 lsn;
1116 u8 desc;
1117 u8 chpid;
1118 u8 swla;
1119 u8 zeroes;
1120 u8 chla;
1121 u8 chpp;
1122 } *chp_dsc;
1123
1124 QETH_DBF_TEXT(setup, 2, "chk_1920");
1125
1126 ccwdev = card->data.ccwdev;
1127 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1128 if (chp_dsc != NULL) {
1129 /* CHPP field bit 6 == 1 -> single queue */
1130 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1131 kfree(chp_dsc);
1132 }
1133 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1134 return single_queue;
1135}
1136
1137static int
1138qeth_determine_card_type(struct qeth_card *card)
1139{
1140 int i = 0;
1141
1142 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1143
500f83ab
UB
1144 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1145 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1da177e4
LT
1146 while (known_devices[i][4]) {
1147 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1148 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1149 card->info.type = known_devices[i][4];
500f83ab
UB
1150 card->qdio.no_out_queues = known_devices[i][8];
1151 card->info.is_multicast_different = known_devices[i][9];
1da177e4
LT
1152 if (is_1920_device(card)) {
1153 PRINT_INFO("Priority Queueing not able "
1154 "due to hardware limitations!\n");
1155 card->qdio.no_out_queues = 1;
1156 card->qdio.default_out_queue = 0;
500f83ab 1157 }
1da177e4
LT
1158 return 0;
1159 }
1160 i++;
1161 }
1162 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1163 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1164 return -ENOENT;
1165}
1166
1167static int
1168qeth_probe_device(struct ccwgroup_device *gdev)
1169{
1170 struct qeth_card *card;
1171 struct device *dev;
1172 unsigned long flags;
1173 int rc;
1174
1175 QETH_DBF_TEXT(setup, 2, "probedev");
1176
1177 dev = &gdev->dev;
1178 if (!get_device(dev))
1179 return -ENODEV;
1180
500f83ab
UB
1181 QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
1182
1da177e4
LT
1183 card = qeth_alloc_card();
1184 if (!card) {
1185 put_device(dev);
1186 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1187 return -ENOMEM;
1188 }
1189 card->read.ccwdev = gdev->cdev[0];
1190 card->write.ccwdev = gdev->cdev[1];
1191 card->data.ccwdev = gdev->cdev[2];
1da177e4
LT
1192 gdev->dev.driver_data = card;
1193 card->gdev = gdev;
1194 gdev->cdev[0]->handler = qeth_irq;
1195 gdev->cdev[1]->handler = qeth_irq;
1196 gdev->cdev[2]->handler = qeth_irq;
1197
500f83ab
UB
1198 if ((rc = qeth_determine_card_type(card))){
1199 PRINT_WARN("%s: not a valid card type\n", __func__);
1200 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1201 put_device(dev);
1202 qeth_free_card(card);
1203 return rc;
1204 }
1205 if ((rc = qeth_setup_card(card))){
1206 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1da177e4
LT
1207 put_device(dev);
1208 qeth_free_card(card);
1209 return rc;
1210 }
500f83ab
UB
1211 rc = qeth_create_device_attributes(dev);
1212 if (rc) {
1da177e4
LT
1213 put_device(dev);
1214 qeth_free_card(card);
1215 return rc;
1216 }
1217 /* insert into our internal list */
1218 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1219 list_add_tail(&card->list, &qeth_card_list.list);
1220 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1221 return rc;
1222}
1223
1224
1225static int
1226qeth_get_unitaddr(struct qeth_card *card)
1227{
1228 int length;
1229 char *prcd;
1230 int rc;
1231
1232 QETH_DBF_TEXT(setup, 2, "getunit");
1233 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
1234 if (rc) {
1235 PRINT_ERR("read_conf_data for device %s returned %i\n",
1236 CARD_DDEV_ID(card), rc);
1237 return rc;
1238 }
1239 card->info.chpid = prcd[30];
1240 card->info.unit_addr2 = prcd[31];
1241 card->info.cula = prcd[63];
1242 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1243 (prcd[0x11] == _ascebc['M']));
1244 return 0;
1245}
1246
1247static void
1248qeth_init_tokens(struct qeth_card *card)
1249{
1250 card->token.issuer_rm_w = 0x00010103UL;
1251 card->token.cm_filter_w = 0x00010108UL;
1252 card->token.cm_connection_w = 0x0001010aUL;
1253 card->token.ulp_filter_w = 0x0001010bUL;
1254 card->token.ulp_connection_w = 0x0001010dUL;
1255}
1256
1257static inline __u16
1258raw_devno_from_bus_id(char *id)
1259{
1260 id += (strlen(id) - 4);
1261 return (__u16) simple_strtoul(id, &id, 16);
1262}
1263/**
1264 * setup channel
1265 */
1266static void
1267qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1268{
1269 struct qeth_card *card;
1270
1271 QETH_DBF_TEXT(trace, 4, "setupccw");
1272 card = CARD_FROM_CDEV(channel->ccwdev);
1273 if (channel == &card->read)
1274 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1275 else
1276 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1277 channel->ccw.count = len;
1278 channel->ccw.cda = (__u32) __pa(iob);
1279}
1280
1281/**
1282 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1283 */
1284static struct qeth_cmd_buffer *
1285__qeth_get_buffer(struct qeth_channel *channel)
1286{
1287 __u8 index;
1288
1289 QETH_DBF_TEXT(trace, 6, "getbuff");
1290 index = channel->io_buf_no;
1291 do {
1292 if (channel->iob[index].state == BUF_STATE_FREE) {
1293 channel->iob[index].state = BUF_STATE_LOCKED;
1294 channel->io_buf_no = (channel->io_buf_no + 1) %
1295 QETH_CMD_BUFFER_NO;
1296 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1297 return channel->iob + index;
1298 }
1299 index = (index + 1) % QETH_CMD_BUFFER_NO;
1300 } while(index != channel->io_buf_no);
1301
1302 return NULL;
1303}
1304
1305/**
1306 * release command buffer
1307 */
1308static void
1309qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1310{
1311 unsigned long flags;
1312
1313 QETH_DBF_TEXT(trace, 6, "relbuff");
1314 spin_lock_irqsave(&channel->iob_lock, flags);
1315 memset(iob->data, 0, QETH_BUFSIZE);
1316 iob->state = BUF_STATE_FREE;
1317 iob->callback = qeth_send_control_data_cb;
1318 iob->rc = 0;
1319 spin_unlock_irqrestore(&channel->iob_lock, flags);
1320}
1321
1322static struct qeth_cmd_buffer *
1323qeth_get_buffer(struct qeth_channel *channel)
1324{
1325 struct qeth_cmd_buffer *buffer = NULL;
1326 unsigned long flags;
1327
1328 spin_lock_irqsave(&channel->iob_lock, flags);
1329 buffer = __qeth_get_buffer(channel);
1330 spin_unlock_irqrestore(&channel->iob_lock, flags);
1331 return buffer;
1332}
1333
1334static struct qeth_cmd_buffer *
1335qeth_wait_for_buffer(struct qeth_channel *channel)
1336{
1337 struct qeth_cmd_buffer *buffer;
1338 wait_event(channel->wait_q,
1339 ((buffer = qeth_get_buffer(channel)) != NULL));
1340 return buffer;
1341}
1342
1343static void
1344qeth_clear_cmd_buffers(struct qeth_channel *channel)
1345{
1346 int cnt = 0;
1347
1348 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1349 qeth_release_buffer(channel,&channel->iob[cnt]);
1350 channel->buf_no = 0;
1351 channel->io_buf_no = 0;
1352}
1353
1354/**
1355 * start IDX for read and write channel
1356 */
1357static int
1358qeth_idx_activate_get_answer(struct qeth_channel *channel,
1359 void (*idx_reply_cb)(struct qeth_channel *,
1360 struct qeth_cmd_buffer *))
1361{
1362 struct qeth_cmd_buffer *iob;
1363 unsigned long flags;
1364 int rc;
1365 struct qeth_card *card;
1366
1367 QETH_DBF_TEXT(setup, 2, "idxanswr");
1368 card = CARD_FROM_CDEV(channel->ccwdev);
1369 iob = qeth_get_buffer(channel);
1370 iob->callback = idx_reply_cb;
1371 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1372 channel->ccw.count = QETH_BUFSIZE;
1373 channel->ccw.cda = (__u32) __pa(iob->data);
1374
1375 wait_event(card->wait_q,
1376 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1377 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1378 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1379 rc = ccw_device_start(channel->ccwdev,
1380 &channel->ccw,(addr_t) iob, 0, 0);
1381 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1382
1383 if (rc) {
1384 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1385 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1386 atomic_set(&channel->irq_pending, 0);
1387 wake_up(&card->wait_q);
1388 return rc;
1389 }
1390 rc = wait_event_interruptible_timeout(card->wait_q,
1391 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1392 if (rc == -ERESTARTSYS)
1393 return rc;
1394 if (channel->state != CH_STATE_UP){
1395 rc = -ETIME;
1396 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1397 qeth_clear_cmd_buffers(channel);
1398 } else
1399 rc = 0;
1400 return rc;
1401}
1402
1403static int
1404qeth_idx_activate_channel(struct qeth_channel *channel,
1405 void (*idx_reply_cb)(struct qeth_channel *,
1406 struct qeth_cmd_buffer *))
1407{
1408 struct qeth_card *card;
1409 struct qeth_cmd_buffer *iob;
1410 unsigned long flags;
1411 __u16 temp;
1412 int rc;
1413
1414 card = CARD_FROM_CDEV(channel->ccwdev);
1415
1416 QETH_DBF_TEXT(setup, 2, "idxactch");
1417
1418 iob = qeth_get_buffer(channel);
1419 iob->callback = idx_reply_cb;
1420 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1421 channel->ccw.count = IDX_ACTIVATE_SIZE;
1422 channel->ccw.cda = (__u32) __pa(iob->data);
1423 if (channel == &card->write) {
1424 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1425 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1426 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1427 card->seqno.trans_hdr++;
1428 } else {
1429 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1430 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1431 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1432 }
1433 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1434 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1435 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1436 &card->info.func_level,sizeof(__u16));
1437 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1438 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1439 temp = (card->info.cula << 8) + card->info.unit_addr2;
1440 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1441
1442 wait_event(card->wait_q,
1443 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1444 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1445 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1446 rc = ccw_device_start(channel->ccwdev,
1447 &channel->ccw,(addr_t) iob, 0, 0);
1448 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1449
1450 if (rc) {
1451 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1452 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1453 atomic_set(&channel->irq_pending, 0);
1454 wake_up(&card->wait_q);
1455 return rc;
1456 }
1457 rc = wait_event_interruptible_timeout(card->wait_q,
1458 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1459 if (rc == -ERESTARTSYS)
1460 return rc;
1461 if (channel->state != CH_STATE_ACTIVATING) {
1462 PRINT_WARN("qeth: IDX activate timed out!\n");
1463 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1464 qeth_clear_cmd_buffers(channel);
1465 return -ETIME;
1466 }
1467 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1468}
1469
1470static int
1471qeth_peer_func_level(int level)
1472{
1473 if ((level & 0xff) == 8)
1474 return (level & 0xff) + 0x400;
1475 if (((level >> 8) & 3) == 1)
1476 return (level & 0xff) + 0x200;
1477 return level;
1478}
1479
1480static void
1481qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1482{
1483 struct qeth_card *card;
1484 __u16 temp;
1485
1486 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1487
1488 if (channel->state == CH_STATE_DOWN) {
1489 channel->state = CH_STATE_ACTIVATING;
1490 goto out;
1491 }
1492 card = CARD_FROM_CDEV(channel->ccwdev);
1493
1494 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1495 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1496 "reply\n", CARD_WDEV_ID(card));
1497 goto out;
1498 }
1499 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1500 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1501 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1502 "function level mismatch "
1503 "(sent: 0x%x, received: 0x%x)\n",
1504 CARD_WDEV_ID(card), card->info.func_level, temp);
1505 goto out;
1506 }
1507 channel->state = CH_STATE_UP;
1508out:
1509 qeth_release_buffer(channel, iob);
1510}
1511
1512static int
1513qeth_check_idx_response(unsigned char *buffer)
1514{
1515 if (!buffer)
1516 return 0;
1517
1518 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1519 if ((buffer[2] & 0xc0) == 0xc0) {
1520 PRINT_WARN("received an IDX TERMINATE "
1521 "with cause code 0x%02x%s\n",
1522 buffer[4],
1523 ((buffer[4] == 0x22) ?
1524 " -- try another portname" : ""));
1525 QETH_DBF_TEXT(trace, 2, "ckidxres");
1526 QETH_DBF_TEXT(trace, 2, " idxterm");
1527 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1528 return -EIO;
1529 }
1530 return 0;
1531}
1532
1533static void
1534qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1535{
1536 struct qeth_card *card;
1537 __u16 temp;
1538
1539 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1540 if (channel->state == CH_STATE_DOWN) {
1541 channel->state = CH_STATE_ACTIVATING;
1542 goto out;
1543 }
1544
1545 card = CARD_FROM_CDEV(channel->ccwdev);
1546 if (qeth_check_idx_response(iob->data)) {
1547 goto out;
1548 }
1549 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1550 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1551 "reply\n", CARD_RDEV_ID(card));
1552 goto out;
1553 }
1554
1555/**
1556 * temporary fix for microcode bug
1557 * to revert it,replace OR by AND
1558 */
1559 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1560 (card->info.type == QETH_CARD_TYPE_OSAE) )
1561 card->info.portname_required = 1;
1562
1563 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1564 if (temp != qeth_peer_func_level(card->info.func_level)) {
1565 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1566 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1567 CARD_RDEV_ID(card), card->info.func_level, temp);
1568 goto out;
1569 }
1570 memcpy(&card->token.issuer_rm_r,
1571 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1572 QETH_MPC_TOKEN_LENGTH);
1573 memcpy(&card->info.mcl_level[0],
1574 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1575 channel->state = CH_STATE_UP;
1576out:
1577 qeth_release_buffer(channel,iob);
1578}
1579
1580static int
1581qeth_issue_next_read(struct qeth_card *card)
1582{
1583 int rc;
1584 struct qeth_cmd_buffer *iob;
1585
1586 QETH_DBF_TEXT(trace,5,"issnxrd");
1587 if (card->read.state != CH_STATE_UP)
1588 return -EIO;
1589 iob = qeth_get_buffer(&card->read);
1590 if (!iob) {
1591 PRINT_WARN("issue_next_read failed: no iob available!\n");
1592 return -ENOMEM;
1593 }
1594 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1595 wait_event(card->wait_q,
1596 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
1597 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1598 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1599 (addr_t) iob, 0, 0);
1600 if (rc) {
1601 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1602 atomic_set(&card->read.irq_pending, 0);
1603 qeth_schedule_recovery(card);
1604 wake_up(&card->wait_q);
1605 }
1606 return rc;
1607}
1608
1609static struct qeth_reply *
1610qeth_alloc_reply(struct qeth_card *card)
1611{
1612 struct qeth_reply *reply;
1613
1614 reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1615 if (reply){
1616 memset(reply, 0, sizeof(struct qeth_reply));
1617 atomic_set(&reply->refcnt, 1);
1618 reply->card = card;
1619 };
1620 return reply;
1621}
1622
1623static void
1624qeth_get_reply(struct qeth_reply *reply)
1625{
1626 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1627 atomic_inc(&reply->refcnt);
1628}
1629
1630static void
1631qeth_put_reply(struct qeth_reply *reply)
1632{
1633 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1634 if (atomic_dec_and_test(&reply->refcnt))
1635 kfree(reply);
1636}
1637
1638static void
1639qeth_cmd_timeout(unsigned long data)
1640{
1641 struct qeth_reply *reply, *list_reply, *r;
1642 unsigned long flags;
1643
1644 reply = (struct qeth_reply *) data;
1645 spin_lock_irqsave(&reply->card->lock, flags);
1646 list_for_each_entry_safe(list_reply, r,
1647 &reply->card->cmd_waiter_list, list) {
1648 if (reply == list_reply){
1649 qeth_get_reply(reply);
1650 list_del_init(&reply->list);
1651 spin_unlock_irqrestore(&reply->card->lock, flags);
1652 reply->rc = -ETIME;
1653 reply->received = 1;
1654 wake_up(&reply->wait_q);
1655 qeth_put_reply(reply);
1656 return;
1657 }
1658 }
1659 spin_unlock_irqrestore(&reply->card->lock, flags);
1660}
1661
1da177e4
LT
1662static struct qeth_ipa_cmd *
1663qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1664{
1665 struct qeth_ipa_cmd *cmd = NULL;
1666
1667 QETH_DBF_TEXT(trace,5,"chkipad");
1668 if (IS_IPA(iob->data)){
1669 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1670 if (IS_IPA_REPLY(cmd))
1671 return cmd;
1672 else {
1673 switch (cmd->hdr.command) {
1674 case IPA_CMD_STOPLAN:
1675 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1676 "there is a network problem or "
1677 "someone pulled the cable or "
1678 "disabled the port.\n",
1679 QETH_CARD_IFNAME(card),
1680 card->info.chpid);
1681 card->lan_online = 0;
1682 netif_carrier_off(card->dev);
1683 return NULL;
1684 case IPA_CMD_STARTLAN:
1685 PRINT_INFO("Link reestablished on %s "
1686 "(CHPID 0x%X). Scheduling "
1687 "IP address reset.\n",
1688 QETH_CARD_IFNAME(card),
1689 card->info.chpid);
1da177e4 1690 netif_carrier_on(card->dev);
9123e0d7 1691 qeth_schedule_recovery(card);
1da177e4 1692 return NULL;
500f83ab
UB
1693 case IPA_CMD_MODCCID:
1694 return cmd;
1da177e4
LT
1695 case IPA_CMD_REGISTER_LOCAL_ADDR:
1696 QETH_DBF_TEXT(trace,3, "irla");
1697 break;
1698 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1699 QETH_DBF_TEXT(trace,3, "urla");
1700 break;
1701 default:
1702 PRINT_WARN("Received data is IPA "
1703 "but not a reply!\n");
1704 break;
1705 }
1706 }
1707 }
1708 return cmd;
1709}
1710
1711/**
1712 * wake all waiting ipa commands
1713 */
1714static void
1715qeth_clear_ipacmd_list(struct qeth_card *card)
1716{
1717 struct qeth_reply *reply, *r;
1718 unsigned long flags;
1719
1720 QETH_DBF_TEXT(trace, 4, "clipalst");
1721
1722 spin_lock_irqsave(&card->lock, flags);
1723 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1724 qeth_get_reply(reply);
1725 reply->rc = -EIO;
1726 reply->received = 1;
1727 list_del_init(&reply->list);
1728 wake_up(&reply->wait_q);
1729 qeth_put_reply(reply);
1730 }
1731 spin_unlock_irqrestore(&card->lock, flags);
1732}
1733
1734static void
1735qeth_send_control_data_cb(struct qeth_channel *channel,
1736 struct qeth_cmd_buffer *iob)
1737{
1738 struct qeth_card *card;
1739 struct qeth_reply *reply, *r;
1740 struct qeth_ipa_cmd *cmd;
1741 unsigned long flags;
1742 int keep_reply;
1743
1744 QETH_DBF_TEXT(trace,4,"sndctlcb");
1745
1746 card = CARD_FROM_CDEV(channel->ccwdev);
1747 if (qeth_check_idx_response(iob->data)) {
1748 qeth_clear_ipacmd_list(card);
1749 qeth_schedule_recovery(card);
1750 goto out;
1751 }
1752
1753 cmd = qeth_check_ipa_data(card, iob);
1754 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1755 goto out;
500f83ab
UB
1756 /*in case of OSN : check if cmd is set */
1757 if (card->info.type == QETH_CARD_TYPE_OSN &&
1758 cmd &&
1759 cmd->hdr.command != IPA_CMD_STARTLAN &&
1760 card->osn_info.assist_cb != NULL) {
1761 card->osn_info.assist_cb(card->dev, cmd);
1762 goto out;
1763 }
1da177e4
LT
1764
1765 spin_lock_irqsave(&card->lock, flags);
1766 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1767 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1768 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1769 qeth_get_reply(reply);
1770 list_del_init(&reply->list);
1771 spin_unlock_irqrestore(&card->lock, flags);
1772 keep_reply = 0;
1773 if (reply->callback != NULL) {
1774 if (cmd) {
1775 reply->offset = (__u16)((char*)cmd -
1776 (char *)iob->data);
1777 keep_reply = reply->callback(card,
1778 reply,
1779 (unsigned long)cmd);
500f83ab 1780 } else
1da177e4
LT
1781 keep_reply = reply->callback(card,
1782 reply,
1783 (unsigned long)iob);
1784 }
1785 if (cmd)
1786 reply->rc = (u16) cmd->hdr.return_code;
1787 else if (iob->rc)
1788 reply->rc = iob->rc;
1789 if (keep_reply) {
1790 spin_lock_irqsave(&card->lock, flags);
1791 list_add_tail(&reply->list,
1792 &card->cmd_waiter_list);
1793 spin_unlock_irqrestore(&card->lock, flags);
1794 } else {
1795 reply->received = 1;
1796 wake_up(&reply->wait_q);
1797 }
1798 qeth_put_reply(reply);
1799 goto out;
1800 }
1801 }
1802 spin_unlock_irqrestore(&card->lock, flags);
1803out:
1804 memcpy(&card->seqno.pdu_hdr_ack,
1805 QETH_PDU_HEADER_SEQ_NO(iob->data),
1806 QETH_SEQ_NO_LENGTH);
1807 qeth_release_buffer(channel,iob);
1808}
1809
500f83ab
UB
1810static inline void
1811qeth_prepare_control_data(struct qeth_card *card, int len,
1812struct qeth_cmd_buffer *iob)
1813{
1814 qeth_setup_ccw(&card->write,iob->data,len);
1815 iob->callback = qeth_release_buffer;
1816
1817 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1818 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1819 card->seqno.trans_hdr++;
1820 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1821 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1822 card->seqno.pdu_hdr++;
1823 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1824 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1825 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1826}
1827
1da177e4
LT
1828static int
1829qeth_send_control_data(struct qeth_card *card, int len,
1830 struct qeth_cmd_buffer *iob,
1831 int (*reply_cb)
1832 (struct qeth_card *, struct qeth_reply*, unsigned long),
1833 void *reply_param)
1834
1835{
1836 int rc;
1837 unsigned long flags;
500f83ab 1838 struct qeth_reply *reply = NULL;
1da177e4
LT
1839 struct timer_list timer;
1840
1841 QETH_DBF_TEXT(trace, 2, "sendctl");
1842
1da177e4
LT
1843 reply = qeth_alloc_reply(card);
1844 if (!reply) {
1845 PRINT_WARN("Could no alloc qeth_reply!\n");
1846 return -ENOMEM;
1847 }
1848 reply->callback = reply_cb;
1849 reply->param = reply_param;
1850 if (card->state == CARD_STATE_DOWN)
1851 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1852 else
1853 reply->seqno = card->seqno.ipa++;
1854 init_timer(&timer);
1855 timer.function = qeth_cmd_timeout;
1856 timer.data = (unsigned long) reply;
1da177e4
LT
1857 init_waitqueue_head(&reply->wait_q);
1858 spin_lock_irqsave(&card->lock, flags);
1859 list_add_tail(&reply->list, &card->cmd_waiter_list);
1860 spin_unlock_irqrestore(&card->lock, flags);
1861 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1862 wait_event(card->wait_q,
1863 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
500f83ab
UB
1864 qeth_prepare_control_data(card, len, iob);
1865 if (IS_IPA(iob->data))
1866 timer.expires = jiffies + QETH_IPA_TIMEOUT;
1867 else
1868 timer.expires = jiffies + QETH_TIMEOUT;
1da177e4
LT
1869 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1870 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1871 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1872 (addr_t) iob, 0, 0);
1873 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1874 if (rc){
1875 PRINT_WARN("qeth_send_control_data: "
1876 "ccw_device_start rc = %i\n", rc);
1877 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1878 spin_lock_irqsave(&card->lock, flags);
1879 list_del_init(&reply->list);
1880 qeth_put_reply(reply);
1881 spin_unlock_irqrestore(&card->lock, flags);
1882 qeth_release_buffer(iob->channel, iob);
1883 atomic_set(&card->write.irq_pending, 0);
1884 wake_up(&card->wait_q);
1885 return rc;
1886 }
1887 add_timer(&timer);
1888 wait_event(reply->wait_q, reply->received);
1889 del_timer_sync(&timer);
1890 rc = reply->rc;
1891 qeth_put_reply(reply);
1892 return rc;
1893}
1894
500f83ab
UB
1895static int
1896qeth_osn_send_control_data(struct qeth_card *card, int len,
1897 struct qeth_cmd_buffer *iob)
1898{
1899 unsigned long flags;
1900 int rc = 0;
1901
1902 QETH_DBF_TEXT(trace, 5, "osndctrd");
1903
1904 wait_event(card->wait_q,
1905 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
1906 qeth_prepare_control_data(card, len, iob);
1907 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1908 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1909 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1910 (addr_t) iob, 0, 0);
1911 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1912 if (rc){
1913 PRINT_WARN("qeth_osn_send_control_data: "
1914 "ccw_device_start rc = %i\n", rc);
1915 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1916 qeth_release_buffer(iob->channel, iob);
1917 atomic_set(&card->write.irq_pending, 0);
1918 wake_up(&card->wait_q);
1919 }
1920 return rc;
1921}
1922
1923static inline void
1924qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1925 char prot_type)
1926{
1927 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1928 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
1929 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1930 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1931}
1932
1933static int
1934qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1935 int data_len)
1936{
1937 u16 s1, s2;
1938
1939QETH_DBF_TEXT(trace,4,"osndipa");
1940
1941 qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
1942 s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
1943 s2 = (u16)data_len;
1944 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
1945 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
1946 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
1947 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
1948 return qeth_osn_send_control_data(card, s1, iob);
1949}
1950
1da177e4
LT
1951static int
1952qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1953 int (*reply_cb)
1954 (struct qeth_card *,struct qeth_reply*, unsigned long),
1955 void *reply_param)
1956{
1957 int rc;
1958 char prot_type;
1959
1960 QETH_DBF_TEXT(trace,4,"sendipa");
1961
1da177e4 1962 if (card->options.layer2)
500f83ab
UB
1963 if (card->info.type == QETH_CARD_TYPE_OSN)
1964 prot_type = QETH_PROT_OSN2;
1965 else
1966 prot_type = QETH_PROT_LAYER2;
1da177e4
LT
1967 else
1968 prot_type = QETH_PROT_TCPIP;
500f83ab 1969 qeth_prepare_ipa_cmd(card,iob,prot_type);
1da177e4
LT
1970 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
1971 reply_cb, reply_param);
1972 return rc;
1973}
1974
1975
1976static int
1977qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1978 unsigned long data)
1979{
1980 struct qeth_cmd_buffer *iob;
1981
1982 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1983
1984 iob = (struct qeth_cmd_buffer *) data;
1985 memcpy(&card->token.cm_filter_r,
1986 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1987 QETH_MPC_TOKEN_LENGTH);
1988 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1989 return 0;
1990}
1991
1992static int
1993qeth_cm_enable(struct qeth_card *card)
1994{
1995 int rc;
1996 struct qeth_cmd_buffer *iob;
1997
1998 QETH_DBF_TEXT(setup,2,"cmenable");
1999
2000 iob = qeth_wait_for_buffer(&card->write);
2001 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2002 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2003 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2004 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2005 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2006
2007 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2008 qeth_cm_enable_cb, NULL);
2009 return rc;
2010}
2011
2012static int
2013qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2014 unsigned long data)
2015{
2016
2017 struct qeth_cmd_buffer *iob;
2018
2019 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
2020
2021 iob = (struct qeth_cmd_buffer *) data;
2022 memcpy(&card->token.cm_connection_r,
2023 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2024 QETH_MPC_TOKEN_LENGTH);
2025 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2026 return 0;
2027}
2028
2029static int
2030qeth_cm_setup(struct qeth_card *card)
2031{
2032 int rc;
2033 struct qeth_cmd_buffer *iob;
2034
2035 QETH_DBF_TEXT(setup,2,"cmsetup");
2036
2037 iob = qeth_wait_for_buffer(&card->write);
2038 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2039 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2040 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2041 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2042 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2043 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2044 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2045 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2046 qeth_cm_setup_cb, NULL);
2047 return rc;
2048
2049}
2050
2051static int
2052qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2053 unsigned long data)
2054{
2055
2056 __u16 mtu, framesize;
2057 __u16 len;
2058 __u8 link_type;
2059 struct qeth_cmd_buffer *iob;
2060
2061 QETH_DBF_TEXT(setup, 2, "ulpenacb");
2062
2063 iob = (struct qeth_cmd_buffer *) data;
2064 memcpy(&card->token.ulp_filter_r,
2065 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2066 QETH_MPC_TOKEN_LENGTH);
2067 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
2068 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2069 mtu = qeth_get_mtu_outof_framesize(framesize);
2070 if (!mtu) {
2071 iob->rc = -EINVAL;
2072 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2073 return 0;
2074 }
2075 card->info.max_mtu = mtu;
2076 card->info.initial_mtu = mtu;
2077 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
2078 } else {
2079 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
2080 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
2081 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2082 }
2083
2084 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2085 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2086 memcpy(&link_type,
2087 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2088 card->info.link_type = link_type;
2089 } else
2090 card->info.link_type = 0;
2091 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2092 return 0;
2093}
2094
2095static int
2096qeth_ulp_enable(struct qeth_card *card)
2097{
2098 int rc;
2099 char prot_type;
2100 struct qeth_cmd_buffer *iob;
2101
2102 /*FIXME: trace view callbacks*/
2103 QETH_DBF_TEXT(setup,2,"ulpenabl");
2104
2105 iob = qeth_wait_for_buffer(&card->write);
2106 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2107
2108 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
2109 (__u8) card->info.portno;
2110 if (card->options.layer2)
500f83ab
UB
2111 if (card->info.type == QETH_CARD_TYPE_OSN)
2112 prot_type = QETH_PROT_OSN2;
2113 else
2114 prot_type = QETH_PROT_LAYER2;
1da177e4
LT
2115 else
2116 prot_type = QETH_PROT_TCPIP;
2117
2118 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
2119 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2120 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2121 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2122 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2123 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2124 card->info.portname, 9);
2125 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2126 qeth_ulp_enable_cb, NULL);
2127 return rc;
2128
2129}
2130
2131static inline __u16
2132__raw_devno_from_bus_id(char *id)
2133{
2134 id += (strlen(id) - 4);
2135 return (__u16) simple_strtoul(id, &id, 16);
2136}
2137
2138static int
2139qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2140 unsigned long data)
2141{
2142 struct qeth_cmd_buffer *iob;
2143
2144 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
2145
2146 iob = (struct qeth_cmd_buffer *) data;
2147 memcpy(&card->token.ulp_connection_r,
2148 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2149 QETH_MPC_TOKEN_LENGTH);
2150 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2151 return 0;
2152}
2153
2154static int
2155qeth_ulp_setup(struct qeth_card *card)
2156{
2157 int rc;
2158 __u16 temp;
2159 struct qeth_cmd_buffer *iob;
2160
2161 QETH_DBF_TEXT(setup,2,"ulpsetup");
2162
2163 iob = qeth_wait_for_buffer(&card->write);
2164 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2165
2166 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2167 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2168 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2169 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2170 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2171 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2172
2173 temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
2174 memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
2175 temp = (card->info.cula << 8) + card->info.unit_addr2;
2176 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2177 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2178 qeth_ulp_setup_cb, NULL);
2179 return rc;
2180}
2181
2182static inline int
2183qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
2184 unsigned int qdio_error,
2185 unsigned int siga_error)
2186{
2187 int rc = 0;
2188
2189 if (qdio_error || siga_error) {
2190 QETH_DBF_TEXT(trace, 2, "qdinerr");
2191 QETH_DBF_TEXT(qerr, 2, "qdinerr");
2192 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2193 buf->buffer->element[15].flags & 0xff);
2194 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2195 buf->buffer->element[14].flags & 0xff);
2196 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2197 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2198 rc = 1;
2199 }
2200 return rc;
2201}
2202
2203static inline struct sk_buff *
500f83ab 2204qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
1da177e4
LT
2205{
2206 struct sk_buff* skb;
500f83ab
UB
2207 int add_len;
2208
2209 add_len = 0;
2210 if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN)
2211 add_len = sizeof(struct qeth_hdr);
1da177e4 2212#ifdef CONFIG_QETH_VLAN
500f83ab
UB
2213 else
2214 add_len = VLAN_HLEN;
1da177e4 2215#endif
500f83ab
UB
2216 skb = dev_alloc_skb(length + add_len);
2217 if (skb && add_len)
2218 skb_reserve(skb, add_len);
1da177e4
LT
2219 return skb;
2220}
2221
2222static inline struct sk_buff *
2223qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2224 struct qdio_buffer_element **__element, int *__offset,
2225 struct qeth_hdr **hdr)
2226{
2227 struct qdio_buffer_element *element = *__element;
2228 int offset = *__offset;
2229 struct sk_buff *skb = NULL;
2230 int skb_len;
2231 void *data_ptr;
2232 int data_len;
2233
2234 QETH_DBF_TEXT(trace,6,"nextskb");
2235 /* qeth_hdr must not cross element boundaries */
2236 if (element->length < offset + sizeof(struct qeth_hdr)){
2237 if (qeth_is_last_sbale(element))
2238 return NULL;
2239 element++;
2240 offset = 0;
2241 if (element->length < sizeof(struct qeth_hdr))
2242 return NULL;
2243 }
2244 *hdr = element->addr + offset;
2245
2246 offset += sizeof(struct qeth_hdr);
2247 if (card->options.layer2)
500f83ab
UB
2248 if (card->info.type == QETH_CARD_TYPE_OSN)
2249 skb_len = (*hdr)->hdr.osn.pdu_length;
2250 else
2251 skb_len = (*hdr)->hdr.l2.pkt_length;
1da177e4
LT
2252 else
2253 skb_len = (*hdr)->hdr.l3.length;
2254
2255 if (!skb_len)
2256 return NULL;
2257 if (card->options.fake_ll){
e23dd9cd 2258 if(card->dev->type == ARPHRD_IEEE802_TR){
500f83ab 2259 if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr)))
e23dd9cd
FP
2260 goto no_mem;
2261 skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
2262 } else {
500f83ab 2263 if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr)))
e23dd9cd
FP
2264 goto no_mem;
2265 skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
2266 }
500f83ab 2267 } else if (!(skb = qeth_get_skb(skb_len, *hdr)))
1da177e4
LT
2268 goto no_mem;
2269 data_ptr = element->addr + offset;
2270 while (skb_len) {
2271 data_len = min(skb_len, (int)(element->length - offset));
2272 if (data_len)
2273 memcpy(skb_put(skb, data_len), data_ptr, data_len);
2274 skb_len -= data_len;
2275 if (skb_len){
2276 if (qeth_is_last_sbale(element)){
2277 QETH_DBF_TEXT(trace,4,"unexeob");
2278 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2279 QETH_DBF_TEXT(qerr,2,"unexeob");
2280 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2281 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2282 dev_kfree_skb_any(skb);
2283 card->stats.rx_errors++;
2284 return NULL;
2285 }
2286 element++;
2287 offset = 0;
2288 data_ptr = element->addr;
2289 } else {
2290 offset += data_len;
2291 }
2292 }
2293 *__element = element;
2294 *__offset = offset;
2295 return skb;
2296no_mem:
2297 if (net_ratelimit()){
2298 PRINT_WARN("No memory for packet received on %s.\n",
2299 QETH_CARD_IFNAME(card));
2300 QETH_DBF_TEXT(trace,2,"noskbmem");
2301 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2302 }
2303 card->stats.rx_dropped++;
2304 return NULL;
2305}
2306
ab611487 2307static inline __be16
1da177e4
LT
2308qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2309{
2310 struct qeth_card *card;
2311 struct ethhdr *eth;
2312
2313 QETH_DBF_TEXT(trace,6,"typtrans");
2314
2315 card = (struct qeth_card *)dev->priv;
2316#ifdef CONFIG_TR
2317 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2318 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2319 return tr_type_trans(skb,dev);
2320#endif /* CONFIG_TR */
2321 skb->mac.raw = skb->data;
2322 skb_pull(skb, ETH_HLEN );
2323 eth = eth_hdr(skb);
2324
2325 if (*eth->h_dest & 1) {
2326 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2327 skb->pkt_type = PACKET_BROADCAST;
2328 else
2329 skb->pkt_type = PACKET_MULTICAST;
2330 } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
2331 skb->pkt_type = PACKET_OTHERHOST;
2332
2333 if (ntohs(eth->h_proto) >= 1536)
2334 return eth->h_proto;
2335 if (*(unsigned short *) (skb->data) == 0xFFFF)
2336 return htons(ETH_P_802_3);
2337 return htons(ETH_P_802_2);
2338}
2339
2340static inline void
e23dd9cd
FP
2341qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2342 struct qeth_hdr *hdr)
2343{
2344 struct trh_hdr *fake_hdr;
2345 struct trllc *fake_llc;
2346 struct iphdr *ip_hdr;
2347
2348 QETH_DBF_TEXT(trace,5,"skbfktr");
2349 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR;
2350 /* this is a fake ethernet header */
2351 fake_hdr = (struct trh_hdr *) skb->mac.raw;
2352
2353 /* the destination MAC address */
2354 switch (skb->pkt_type){
2355 case PACKET_MULTICAST:
2356 switch (skb->protocol){
2357#ifdef CONFIG_QETH_IPV6
2358 case __constant_htons(ETH_P_IPV6):
2359 ndisc_mc_map((struct in6_addr *)
2360 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2361 fake_hdr->daddr, card->dev, 0);
2362 break;
2363#endif /* CONFIG_QETH_IPV6 */
2364 case __constant_htons(ETH_P_IP):
2365 ip_hdr = (struct iphdr *)skb->data;
2366 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->daddr);
2367 break;
2368 default:
2369 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2370 }
2371 break;
2372 case PACKET_BROADCAST:
2373 memset(fake_hdr->daddr, 0xff, TR_ALEN);
2374 break;
2375 default:
2376 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2377 }
2378 /* the source MAC address */
2379 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2380 memcpy(fake_hdr->saddr, &hdr->hdr.l3.dest_addr[2], TR_ALEN);
2381 else
2382 memset(fake_hdr->saddr, 0, TR_ALEN);
2383 fake_hdr->rcf=0;
2384 fake_llc = (struct trllc*)&(fake_hdr->rcf);
2385 fake_llc->dsap = EXTENDED_SAP;
2386 fake_llc->ssap = EXTENDED_SAP;
2387 fake_llc->llc = UI_CMD;
2388 fake_llc->protid[0] = 0;
2389 fake_llc->protid[1] = 0;
2390 fake_llc->protid[2] = 0;
2391 fake_llc->ethertype = ETH_P_IP;
2392}
2393
2394static inline void
2395qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
1da177e4
LT
2396 struct qeth_hdr *hdr)
2397{
2398 struct ethhdr *fake_hdr;
2399 struct iphdr *ip_hdr;
2400
e23dd9cd
FP
2401 QETH_DBF_TEXT(trace,5,"skbfketh");
2402 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH;
1da177e4
LT
2403 /* this is a fake ethernet header */
2404 fake_hdr = (struct ethhdr *) skb->mac.raw;
2405
2406 /* the destination MAC address */
2407 switch (skb->pkt_type){
2408 case PACKET_MULTICAST:
2409 switch (skb->protocol){
2410#ifdef CONFIG_QETH_IPV6
2411 case __constant_htons(ETH_P_IPV6):
2412 ndisc_mc_map((struct in6_addr *)
2413 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2414 fake_hdr->h_dest, card->dev, 0);
2415 break;
2416#endif /* CONFIG_QETH_IPV6 */
2417 case __constant_htons(ETH_P_IP):
2418 ip_hdr = (struct iphdr *)skb->data;
e23dd9cd 2419 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
1da177e4
LT
2420 break;
2421 default:
2422 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2423 }
2424 break;
2425 case PACKET_BROADCAST:
2426 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2427 break;
2428 default:
2429 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2430 }
2431 /* the source MAC address */
2432 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2433 memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
2434 else
2435 memset(fake_hdr->h_source, 0, ETH_ALEN);
2436 /* the protocol */
2437 fake_hdr->h_proto = skb->protocol;
2438}
2439
e23dd9cd
FP
2440static inline void
2441qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2442 struct qeth_hdr *hdr)
2443{
2444 if (card->dev->type == ARPHRD_IEEE802_TR)
2445 qeth_rebuild_skb_fake_ll_tr(card, skb, hdr);
2446 else
2447 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
2448}
2449
1da177e4
LT
2450static inline void
2451qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
2452 struct qeth_hdr *hdr)
2453{
2454#ifdef CONFIG_QETH_VLAN
2455 u16 *vlan_tag;
2456
2457 if (hdr->hdr.l3.ext_flags &
2458 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2459 vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
2460 *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2461 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2462 *(vlan_tag + 1) = skb->protocol;
2463 skb->protocol = __constant_htons(ETH_P_8021Q);
2464 }
2465#endif /* CONFIG_QETH_VLAN */
2466}
2467
2468static inline __u16
2469qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2470 struct qeth_hdr *hdr)
2471{
2472 unsigned short vlan_id = 0;
2473#ifdef CONFIG_QETH_VLAN
2474 struct vlan_hdr *vhdr;
2475#endif
2476
2477 skb->pkt_type = PACKET_HOST;
2478 skb->protocol = qeth_type_trans(skb, skb->dev);
2479 if (card->options.checksum_type == NO_CHECKSUMMING)
2480 skb->ip_summed = CHECKSUM_UNNECESSARY;
2481 else
2482 skb->ip_summed = CHECKSUM_NONE;
2483#ifdef CONFIG_QETH_VLAN
2484 if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
2485 vhdr = (struct vlan_hdr *) skb->data;
2486 skb->protocol =
2487 __constant_htons(vhdr->h_vlan_encapsulated_proto);
2488 vlan_id = hdr->hdr.l2.vlan_id;
2489 skb_pull(skb, VLAN_HLEN);
2490 }
2491#endif
9123e0d7 2492 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
1da177e4
LT
2493 return vlan_id;
2494}
2495
2496static inline void
2497qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2498 struct qeth_hdr *hdr)
2499{
2500#ifdef CONFIG_QETH_IPV6
2501 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
2502 skb->pkt_type = PACKET_HOST;
2503 skb->protocol = qeth_type_trans(skb, card->dev);
2504 return;
2505 }
2506#endif /* CONFIG_QETH_IPV6 */
2507 skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2508 ETH_P_IP);
2509 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
2510 case QETH_CAST_UNICAST:
2511 skb->pkt_type = PACKET_HOST;
2512 break;
2513 case QETH_CAST_MULTICAST:
2514 skb->pkt_type = PACKET_MULTICAST;
2515 card->stats.multicast++;
2516 break;
2517 case QETH_CAST_BROADCAST:
2518 skb->pkt_type = PACKET_BROADCAST;
2519 card->stats.multicast++;
2520 break;
2521 case QETH_CAST_ANYCAST:
2522 case QETH_CAST_NOCAST:
2523 default:
2524 skb->pkt_type = PACKET_HOST;
2525 }
2526 qeth_rebuild_skb_vlan(card, skb, hdr);
2527 if (card->options.fake_ll)
2528 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2529 else
2530 skb->mac.raw = skb->data;
2531 skb->ip_summed = card->options.checksum_type;
2532 if (card->options.checksum_type == HW_CHECKSUMMING){
2533 if ( (hdr->hdr.l3.ext_flags &
2534 (QETH_HDR_EXT_CSUM_HDR_REQ |
2535 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2536 (QETH_HDR_EXT_CSUM_HDR_REQ |
2537 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2538 skb->ip_summed = CHECKSUM_UNNECESSARY;
2539 else
2540 skb->ip_summed = SW_CHECKSUMMING;
2541 }
2542}
2543
2544static inline void
2545qeth_process_inbound_buffer(struct qeth_card *card,
2546 struct qeth_qdio_buffer *buf, int index)
2547{
2548 struct qdio_buffer_element *element;
2549 struct sk_buff *skb;
2550 struct qeth_hdr *hdr;
2551 int offset;
2552 int rxrc;
2553 __u16 vlan_tag = 0;
2554
2555 /* get first element of current buffer */
2556 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2557 offset = 0;
2558#ifdef CONFIG_QETH_PERF_STATS
2559 card->perf_stats.bufs_rec++;
2560#endif
2561 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2562 &offset, &hdr))) {
2563 skb->dev = card->dev;
2564 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2565 vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
500f83ab 2566 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
1da177e4 2567 qeth_rebuild_skb(card, skb, hdr);
500f83ab
UB
2568 else { /*in case of OSN*/
2569 skb_push(skb, sizeof(struct qeth_hdr));
2570 memcpy(skb->data, hdr, sizeof(struct qeth_hdr));
2571 }
1da177e4
LT
2572 /* is device UP ? */
2573 if (!(card->dev->flags & IFF_UP)){
2574 dev_kfree_skb_any(skb);
2575 continue;
2576 }
2577#ifdef CONFIG_QETH_VLAN
2578 if (vlan_tag)
2579 vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
2580 else
2581#endif
500f83ab
UB
2582 if (card->info.type == QETH_CARD_TYPE_OSN)
2583 rxrc = card->osn_info.data_cb(skb);
2584 else
2585 rxrc = netif_rx(skb);
1da177e4
LT
2586 card->dev->last_rx = jiffies;
2587 card->stats.rx_packets++;
2588 card->stats.rx_bytes += skb->len;
2589 }
2590}
2591
2592static inline struct qeth_buffer_pool_entry *
2593qeth_get_buffer_pool_entry(struct qeth_card *card)
2594{
2595 struct qeth_buffer_pool_entry *entry;
2596
2597 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2598 if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2599 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2600 struct qeth_buffer_pool_entry, list);
2601 list_del_init(&entry->list);
2602 return entry;
2603 }
2604 return NULL;
2605}
2606
2607static inline void
2608qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2609{
2610 struct qeth_buffer_pool_entry *pool_entry;
2611 int i;
2612
2613 pool_entry = qeth_get_buffer_pool_entry(card);
2614 /*
2615 * since the buffer is accessed only from the input_tasklet
2616 * there shouldn't be a need to synchronize; also, since we use
2617 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2618 * buffers
2619 */
2620 BUG_ON(!pool_entry);
2621
2622 buf->pool_entry = pool_entry;
2623 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2624 buf->buffer->element[i].length = PAGE_SIZE;
2625 buf->buffer->element[i].addr = pool_entry->elements[i];
2626 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2627 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2628 else
2629 buf->buffer->element[i].flags = 0;
2630 }
2631 buf->state = QETH_QDIO_BUF_EMPTY;
2632}
2633
2634static inline void
2635qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2636 struct qeth_qdio_out_buffer *buf)
2637{
2638 int i;
2639 struct sk_buff *skb;
2640
2641 /* is PCI flag set on buffer? */
2642 if (buf->buffer->element[0].flags & 0x40)
2643 atomic_dec(&queue->set_pci_flags_count);
2644
2645 while ((skb = skb_dequeue(&buf->skb_list))){
2646 atomic_dec(&skb->users);
2647 dev_kfree_skb_any(skb);
2648 }
2649 qeth_eddp_buf_release_contexts(buf);
2650 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2651 buf->buffer->element[i].length = 0;
2652 buf->buffer->element[i].addr = NULL;
2653 buf->buffer->element[i].flags = 0;
2654 }
2655 buf->next_element_to_fill = 0;
2656 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2657}
2658
2659static inline void
2660qeth_queue_input_buffer(struct qeth_card *card, int index)
2661{
2662 struct qeth_qdio_q *queue = card->qdio.in_q;
2663 int count;
2664 int i;
2665 int rc;
2666
2667 QETH_DBF_TEXT(trace,6,"queinbuf");
2668 count = (index < queue->next_buf_to_init)?
2669 card->qdio.in_buf_pool.buf_count -
2670 (queue->next_buf_to_init - index) :
2671 card->qdio.in_buf_pool.buf_count -
2672 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2673 /* only requeue at a certain threshold to avoid SIGAs */
2674 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2675 for (i = queue->next_buf_to_init;
2676 i < queue->next_buf_to_init + count; ++i)
2677 qeth_init_input_buffer(card,
2678 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2679 /*
2680 * according to old code it should be avoided to requeue all
2681 * 128 buffers in order to benefit from PCI avoidance.
2682 * this function keeps at least one buffer (the buffer at
2683 * 'index') un-requeued -> this buffer is the first buffer that
2684 * will be requeued the next time
2685 */
2686#ifdef CONFIG_QETH_PERF_STATS
2687 card->perf_stats.inbound_do_qdio_cnt++;
2688 card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
2689#endif
2690 rc = do_QDIO(CARD_DDEV(card),
2691 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2692 0, queue->next_buf_to_init, count, NULL);
2693#ifdef CONFIG_QETH_PERF_STATS
2694 card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
2695 card->perf_stats.inbound_do_qdio_start_time;
2696#endif
2697 if (rc){
2698 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2699 "return %i (device %s).\n",
2700 rc, CARD_DDEV_ID(card));
2701 QETH_DBF_TEXT(trace,2,"qinberr");
2702 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2703 }
2704 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2705 QDIO_MAX_BUFFERS_PER_Q;
2706 }
2707}
2708
2709static inline void
2710qeth_put_buffer_pool_entry(struct qeth_card *card,
2711 struct qeth_buffer_pool_entry *entry)
2712{
2713 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2714 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2715}
2716
2717static void
2718qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2719 unsigned int qdio_err, unsigned int siga_err,
2720 unsigned int queue, int first_element, int count,
2721 unsigned long card_ptr)
2722{
2723 struct net_device *net_dev;
2724 struct qeth_card *card;
2725 struct qeth_qdio_buffer *buffer;
2726 int index;
2727 int i;
2728
2729 QETH_DBF_TEXT(trace, 6, "qdinput");
2730 card = (struct qeth_card *) card_ptr;
2731 net_dev = card->dev;
2732#ifdef CONFIG_QETH_PERF_STATS
2733 card->perf_stats.inbound_cnt++;
2734 card->perf_stats.inbound_start_time = qeth_get_micros();
2735#endif
2736 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2737 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2738 QETH_DBF_TEXT(trace, 1,"qdinchk");
2739 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2740 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2741 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2742 qeth_schedule_recovery(card);
2743 return;
2744 }
2745 }
2746 for (i = first_element; i < (first_element + count); ++i) {
2747 index = i % QDIO_MAX_BUFFERS_PER_Q;
2748 buffer = &card->qdio.in_q->bufs[index];
2749 if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) &&
2750 qeth_check_for_inbound_error(buffer, qdio_err, siga_err)))
2751 qeth_process_inbound_buffer(card, buffer, index);
2752 /* clear buffer and give back to hardware */
2753 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2754 qeth_queue_input_buffer(card, index);
2755 }
2756#ifdef CONFIG_QETH_PERF_STATS
2757 card->perf_stats.inbound_time += qeth_get_micros() -
2758 card->perf_stats.inbound_start_time;
2759#endif
2760}
2761
2762static inline int
2763qeth_handle_send_error(struct qeth_card *card,
2764 struct qeth_qdio_out_buffer *buffer,
2765 int qdio_err, int siga_err)
2766{
2767 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2768 int cc = siga_err & 3;
2769
2770 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2771 switch (cc) {
2772 case 0:
2773 if (qdio_err){
2774 QETH_DBF_TEXT(trace, 1,"lnkfail");
2775 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2776 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2777 (u16)qdio_err, (u8)sbalf15);
2778 return QETH_SEND_ERROR_LINK_FAILURE;
2779 }
2780 return QETH_SEND_ERROR_NONE;
2781 case 2:
2782 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2783 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2784 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2785 return QETH_SEND_ERROR_KICK_IT;
2786 }
2787 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2788 return QETH_SEND_ERROR_RETRY;
2789 return QETH_SEND_ERROR_LINK_FAILURE;
2790 /* look at qdio_error and sbalf 15 */
2791 case 1:
2792 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2793 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2794 return QETH_SEND_ERROR_LINK_FAILURE;
2795 case 3:
2796 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2797 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2798 return QETH_SEND_ERROR_KICK_IT;
2799 }
2800 return QETH_SEND_ERROR_LINK_FAILURE;
2801}
2802
2803void
2804qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2805 int index, int count)
2806{
2807 struct qeth_qdio_out_buffer *buf;
2808 int rc;
2809 int i;
2810
2811 QETH_DBF_TEXT(trace, 6, "flushbuf");
2812
2813 for (i = index; i < index + count; ++i) {
2814 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2815 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2816 SBAL_FLAGS_LAST_ENTRY;
2817
2818 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2819 continue;
2820
2821 if (!queue->do_pack){
2822 if ((atomic_read(&queue->used_buffers) >=
2823 (QETH_HIGH_WATERMARK_PACK -
2824 QETH_WATERMARK_PACK_FUZZ)) &&
2825 !atomic_read(&queue->set_pci_flags_count)){
2826 /* it's likely that we'll go to packing
2827 * mode soon */
2828 atomic_inc(&queue->set_pci_flags_count);
2829 buf->buffer->element[0].flags |= 0x40;
2830 }
2831 } else {
2832 if (!atomic_read(&queue->set_pci_flags_count)){
2833 /*
2834 * there's no outstanding PCI any more, so we
2835 * have to request a PCI to be sure the the PCI
2836 * will wake at some time in the future then we
2837 * can flush packed buffers that might still be
2838 * hanging around, which can happen if no
2839 * further send was requested by the stack
2840 */
2841 atomic_inc(&queue->set_pci_flags_count);
2842 buf->buffer->element[0].flags |= 0x40;
2843 }
2844 }
2845 }
2846
2847 queue->card->dev->trans_start = jiffies;
2848#ifdef CONFIG_QETH_PERF_STATS
2849 queue->card->perf_stats.outbound_do_qdio_cnt++;
2850 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
2851#endif
2852 if (under_int)
2853 rc = do_QDIO(CARD_DDEV(queue->card),
2854 QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
2855 queue->queue_no, index, count, NULL);
2856 else
2857 rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
2858 queue->queue_no, index, count, NULL);
2859#ifdef CONFIG_QETH_PERF_STATS
2860 queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
2861 queue->card->perf_stats.outbound_do_qdio_start_time;
2862#endif
2863 if (rc){
1da177e4
LT
2864 QETH_DBF_TEXT(trace, 2, "flushbuf");
2865 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
f3d242e8 2866 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
1da177e4
LT
2867 queue->card->stats.tx_errors += count;
2868 /* this must not happen under normal circumstances. if it
2869 * happens something is really wrong -> recover */
2870 qeth_schedule_recovery(queue->card);
2871 return;
2872 }
2873 atomic_add(count, &queue->used_buffers);
2874#ifdef CONFIG_QETH_PERF_STATS
2875 queue->card->perf_stats.bufs_sent += count;
2876#endif
2877}
2878
2879/*
2880 * Switched to packing state if the number of used buffers on a queue
2881 * reaches a certain limit.
2882 */
2883static inline void
2884qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2885{
2886 if (!queue->do_pack) {
2887 if (atomic_read(&queue->used_buffers)
2888 >= QETH_HIGH_WATERMARK_PACK){
2889 /* switch non-PACKING -> PACKING */
2890 QETH_DBF_TEXT(trace, 6, "np->pack");
2891#ifdef CONFIG_QETH_PERF_STATS
2892 queue->card->perf_stats.sc_dp_p++;
2893#endif
2894 queue->do_pack = 1;
2895 }
2896 }
2897}
2898
2899/*
2900 * Switches from packing to non-packing mode. If there is a packing
2901 * buffer on the queue this buffer will be prepared to be flushed.
2902 * In that case 1 is returned to inform the caller. If no buffer
2903 * has to be flushed, zero is returned.
2904 */
2905static inline int
2906qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2907{
2908 struct qeth_qdio_out_buffer *buffer;
2909 int flush_count = 0;
2910
2911 if (queue->do_pack) {
2912 if (atomic_read(&queue->used_buffers)
2913 <= QETH_LOW_WATERMARK_PACK) {
2914 /* switch PACKING -> non-PACKING */
2915 QETH_DBF_TEXT(trace, 6, "pack->np");
2916#ifdef CONFIG_QETH_PERF_STATS
2917 queue->card->perf_stats.sc_p_dp++;
2918#endif
2919 queue->do_pack = 0;
2920 /* flush packing buffers */
2921 buffer = &queue->bufs[queue->next_buf_to_fill];
2922 if ((atomic_read(&buffer->state) ==
2923 QETH_QDIO_BUF_EMPTY) &&
2924 (buffer->next_element_to_fill > 0)) {
2925 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2926 flush_count++;
2927 queue->next_buf_to_fill =
2928 (queue->next_buf_to_fill + 1) %
2929 QDIO_MAX_BUFFERS_PER_Q;
2930 }
2931 }
2932 }
2933 return flush_count;
2934}
2935
2936/*
2937 * Called to flush a packing buffer if no more pci flags are on the queue.
2938 * Checks if there is a packing buffer and prepares it to be flushed.
2939 * In that case returns 1, otherwise zero.
2940 */
2941static inline int
2942qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2943{
2944 struct qeth_qdio_out_buffer *buffer;
2945
2946 buffer = &queue->bufs[queue->next_buf_to_fill];
2947 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2948 (buffer->next_element_to_fill > 0)){
2949 /* it's a packing buffer */
2950 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2951 queue->next_buf_to_fill =
2952 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2953 return 1;
2954 }
2955 return 0;
2956}
2957
2958static inline void
2959qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2960{
2961 int index;
2962 int flush_cnt = 0;
2963 int q_was_packing = 0;
2964
2965 /*
2966 * check if weed have to switch to non-packing mode or if
2967 * we have to get a pci flag out on the queue
2968 */
2969 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2970 !atomic_read(&queue->set_pci_flags_count)){
2971 if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2972 QETH_OUT_Q_UNLOCKED) {
2973 /*
2974 * If we get in here, there was no action in
2975 * do_send_packet. So, we check if there is a
2976 * packing buffer to be flushed here.
2977 */
2978 netif_stop_queue(queue->card->dev);
2979 index = queue->next_buf_to_fill;
2980 q_was_packing = queue->do_pack;
2981 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2982 if (!flush_cnt &&
2983 !atomic_read(&queue->set_pci_flags_count))
2984 flush_cnt +=
2985 qeth_flush_buffers_on_no_pci(queue);
2986#ifdef CONFIG_QETH_PERF_STATS
2987 if (q_was_packing)
2988 queue->card->perf_stats.bufs_sent_pack +=
2989 flush_cnt;
2990#endif
2991 if (flush_cnt)
2992 qeth_flush_buffers(queue, 1, index, flush_cnt);
2993 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2994 }
2995 }
2996}
2997
2998static void
2999qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
3000 unsigned int qdio_error, unsigned int siga_error,
3001 unsigned int __queue, int first_element, int count,
3002 unsigned long card_ptr)
3003{
3004 struct qeth_card *card = (struct qeth_card *) card_ptr;
3005 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3006 struct qeth_qdio_out_buffer *buffer;
3007 int i;
3008
3009 QETH_DBF_TEXT(trace, 6, "qdouhdl");
3010 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
3011 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
f3d242e8
FP
3012 QETH_DBF_TEXT(trace, 2, "achkcond");
3013 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
1da177e4
LT
3014 QETH_DBF_TEXT_(trace, 2, "%08x", status);
3015 netif_stop_queue(card->dev);
3016 qeth_schedule_recovery(card);
3017 return;
3018 }
3019 }
3020#ifdef CONFIG_QETH_PERF_STATS
3021 card->perf_stats.outbound_handler_cnt++;
3022 card->perf_stats.outbound_handler_start_time = qeth_get_micros();
3023#endif
3024 for(i = first_element; i < (first_element + count); ++i){
3025 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
3026 /*we only handle the KICK_IT error by doing a recovery */
3027 if (qeth_handle_send_error(card, buffer, qdio_error, siga_error)
3028 == QETH_SEND_ERROR_KICK_IT){
3029 netif_stop_queue(card->dev);
3030 qeth_schedule_recovery(card);
3031 return;
3032 }
3033 qeth_clear_output_buffer(queue, buffer);
3034 }
3035 atomic_sub(count, &queue->used_buffers);
3036 /* check if we need to do something on this outbound queue */
3037 if (card->info.type != QETH_CARD_TYPE_IQD)
3038 qeth_check_outbound_queue(queue);
3039
3040 netif_wake_queue(queue->card->dev);
3041#ifdef CONFIG_QETH_PERF_STATS
3042 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3043 card->perf_stats.outbound_handler_start_time;
3044#endif
3045}
3046
3047static void
3048qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
3049{
3050
3051 param_field[0] = _ascebc['P'];
3052 param_field[1] = _ascebc['C'];
3053 param_field[2] = _ascebc['I'];
3054 param_field[3] = _ascebc['T'];
3055 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
3056 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
3057 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
3058}
3059
3060static void
3061qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
3062{
3063 param_field[16] = _ascebc['B'];
3064 param_field[17] = _ascebc['L'];
3065 param_field[18] = _ascebc['K'];
3066 param_field[19] = _ascebc['T'];
3067 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
3068 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
3069 *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
3070}
3071
3072static void
3073qeth_initialize_working_pool_list(struct qeth_card *card)
3074{
3075 struct qeth_buffer_pool_entry *entry;
3076
3077 QETH_DBF_TEXT(trace,5,"inwrklst");
3078
3079 list_for_each_entry(entry,
3080 &card->qdio.init_pool.entry_list, init_list) {
3081 qeth_put_buffer_pool_entry(card,entry);
3082 }
3083}
3084
3085static void
3086qeth_clear_working_pool_list(struct qeth_card *card)
3087{
3088 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3089
3090 QETH_DBF_TEXT(trace,5,"clwrklst");
3091 list_for_each_entry_safe(pool_entry, tmp,
3092 &card->qdio.in_buf_pool.entry_list, list){
3093 list_del(&pool_entry->list);
3094 }
3095}
3096
3097static void
3098qeth_free_buffer_pool(struct qeth_card *card)
3099{
3100 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3101 int i=0;
3102 QETH_DBF_TEXT(trace,5,"freepool");
3103 list_for_each_entry_safe(pool_entry, tmp,
3104 &card->qdio.init_pool.entry_list, init_list){
3105 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
3106 free_page((unsigned long)pool_entry->elements[i]);
3107 list_del(&pool_entry->init_list);
3108 kfree(pool_entry);
3109 }
3110}
3111
3112static int
3113qeth_alloc_buffer_pool(struct qeth_card *card)
3114{
3115 struct qeth_buffer_pool_entry *pool_entry;
3116 void *ptr;
3117 int i, j;
3118
3119 QETH_DBF_TEXT(trace,5,"alocpool");
3120 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
3121 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
3122 if (!pool_entry){
3123 qeth_free_buffer_pool(card);
3124 return -ENOMEM;
3125 }
3126 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
9123e0d7 3127 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
1da177e4
LT
3128 if (!ptr) {
3129 while (j > 0)
3130 free_page((unsigned long)
3131 pool_entry->elements[--j]);
3132 kfree(pool_entry);
3133 qeth_free_buffer_pool(card);
3134 return -ENOMEM;
3135 }
3136 pool_entry->elements[j] = ptr;
3137 }
3138 list_add(&pool_entry->init_list,
3139 &card->qdio.init_pool.entry_list);
3140 }
3141 return 0;
3142}
3143
3144int
3145qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
3146{
3147 QETH_DBF_TEXT(trace, 2, "realcbp");
3148
3149 if ((card->state != CARD_STATE_DOWN) &&
3150 (card->state != CARD_STATE_RECOVER))
3151 return -EPERM;
3152
3153 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
3154 qeth_clear_working_pool_list(card);
3155 qeth_free_buffer_pool(card);
3156 card->qdio.in_buf_pool.buf_count = bufcnt;
3157 card->qdio.init_pool.buf_count = bufcnt;
3158 return qeth_alloc_buffer_pool(card);
3159}
3160
3161static int
3162qeth_alloc_qdio_buffers(struct qeth_card *card)
3163{
3164 int i, j;
3165
3166 QETH_DBF_TEXT(setup, 2, "allcqdbf");
3167
3168 if (card->qdio.state == QETH_QDIO_ALLOCATED)
3169 return 0;
3170
9123e0d7
UB
3171 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3172 GFP_KERNEL|GFP_DMA);
1da177e4
LT
3173 if (!card->qdio.in_q)
3174 return - ENOMEM;
3175 QETH_DBF_TEXT(setup, 2, "inq");
3176 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
3177 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
3178 /* give inbound qeth_qdio_buffers their qdio_buffers */
3179 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3180 card->qdio.in_q->bufs[i].buffer =
3181 &card->qdio.in_q->qdio_bufs[i];
3182 /* inbound buffer pool */
3183 if (qeth_alloc_buffer_pool(card)){
3184 kfree(card->qdio.in_q);
3185 return -ENOMEM;
3186 }
3187 /* outbound */
3188 card->qdio.out_qs =
3189 kmalloc(card->qdio.no_out_queues *
3190 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
3191 if (!card->qdio.out_qs){
3192 qeth_free_buffer_pool(card);
3193 return -ENOMEM;
3194 }
3195 for (i = 0; i < card->qdio.no_out_queues; ++i){
3196 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
9123e0d7 3197 GFP_KERNEL|GFP_DMA);
1da177e4
LT
3198 if (!card->qdio.out_qs[i]){
3199 while (i > 0)
3200 kfree(card->qdio.out_qs[--i]);
3201 kfree(card->qdio.out_qs);
3202 return -ENOMEM;
3203 }
3204 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
3205 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
3206 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
3207 card->qdio.out_qs[i]->queue_no = i;
3208 /* give outbound qeth_qdio_buffers their qdio_buffers */
3209 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3210 card->qdio.out_qs[i]->bufs[j].buffer =
3211 &card->qdio.out_qs[i]->qdio_bufs[j];
3212 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3213 skb_list);
3214 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3215 }
3216 }
3217 card->qdio.state = QETH_QDIO_ALLOCATED;
3218 return 0;
3219}
3220
3221static void
3222qeth_free_qdio_buffers(struct qeth_card *card)
3223{
3224 int i, j;
3225
3226 QETH_DBF_TEXT(trace, 2, "freeqdbf");
3227 if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
3228 return;
3229 kfree(card->qdio.in_q);
3230 /* inbound buffer pool */
3231 qeth_free_buffer_pool(card);
3232 /* free outbound qdio_qs */
3233 for (i = 0; i < card->qdio.no_out_queues; ++i){
3234 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3235 qeth_clear_output_buffer(card->qdio.out_qs[i],
3236 &card->qdio.out_qs[i]->bufs[j]);
3237 kfree(card->qdio.out_qs[i]);
3238 }
3239 kfree(card->qdio.out_qs);
3240 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3241}
3242
3243static void
3244qeth_clear_qdio_buffers(struct qeth_card *card)
3245{
3246 int i, j;
3247
3248 QETH_DBF_TEXT(trace, 2, "clearqdbf");
3249 /* clear outbound buffers to free skbs */
3250 for (i = 0; i < card->qdio.no_out_queues; ++i)
3251 if (card->qdio.out_qs[i]){
3252 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3253 qeth_clear_output_buffer(card->qdio.out_qs[i],
3254 &card->qdio.out_qs[i]->bufs[j]);
3255 }
3256}
3257
3258static void
3259qeth_init_qdio_info(struct qeth_card *card)
3260{
3261 QETH_DBF_TEXT(setup, 4, "intqdinf");
3262 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3263 /* inbound */
3264 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
3265 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
3266 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
3267 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
3268 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
3269 /* outbound */
1da177e4
LT
3270}
3271
3272static int
3273qeth_init_qdio_queues(struct qeth_card *card)
3274{
3275 int i, j;
3276 int rc;
3277
3278 QETH_DBF_TEXT(setup, 2, "initqdqs");
3279
3280 /* inbound queue */
3281 memset(card->qdio.in_q->qdio_bufs, 0,
3282 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3283 qeth_initialize_working_pool_list(card);
3284 /*give only as many buffers to hardware as we have buffer pool entries*/
3285 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3286 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3287 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3288 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3289 card->qdio.in_buf_pool.buf_count - 1, NULL);
3290 if (rc) {
3291 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3292 return rc;
3293 }
3294 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3295 if (rc) {
3296 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3297 return rc;
3298 }
3299 /* outbound queue */
3300 for (i = 0; i < card->qdio.no_out_queues; ++i){
3301 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3302 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3303 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3304 qeth_clear_output_buffer(card->qdio.out_qs[i],
3305 &card->qdio.out_qs[i]->bufs[j]);
3306 }
3307 card->qdio.out_qs[i]->card = card;
3308 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3309 card->qdio.out_qs[i]->do_pack = 0;
3310 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3311 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3312 atomic_set(&card->qdio.out_qs[i]->state,
3313 QETH_OUT_Q_UNLOCKED);
3314 }
3315 return 0;
3316}
3317
3318static int
3319qeth_qdio_establish(struct qeth_card *card)
3320{
3321 struct qdio_initialize init_data;
3322 char *qib_param_field;
3323 struct qdio_buffer **in_sbal_ptrs;
3324 struct qdio_buffer **out_sbal_ptrs;
3325 int i, j, k;
3326 int rc;
3327
3328 QETH_DBF_TEXT(setup, 2, "qdioest");
3329
3330 qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3331 GFP_KERNEL);
3332 if (!qib_param_field)
3333 return -ENOMEM;
3334
3335 memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
3336
3337 qeth_create_qib_param_field(card, qib_param_field);
3338 qeth_create_qib_param_field_blkt(card, qib_param_field);
3339
3340 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3341 GFP_KERNEL);
3342 if (!in_sbal_ptrs) {
3343 kfree(qib_param_field);
3344 return -ENOMEM;
3345 }
3346 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3347 in_sbal_ptrs[i] = (struct qdio_buffer *)
3348 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3349
3350 out_sbal_ptrs =
3351 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3352 sizeof(void *), GFP_KERNEL);
3353 if (!out_sbal_ptrs) {
3354 kfree(in_sbal_ptrs);
3355 kfree(qib_param_field);
3356 return -ENOMEM;
3357 }
3358 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3359 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3360 out_sbal_ptrs[k] = (struct qdio_buffer *)
3361 virt_to_phys(card->qdio.out_qs[i]->
3362 bufs[j].buffer);
3363 }
3364
3365 memset(&init_data, 0, sizeof(struct qdio_initialize));
3366 init_data.cdev = CARD_DDEV(card);
3367 init_data.q_format = qeth_get_qdio_q_format(card);
3368 init_data.qib_param_field_format = 0;
3369 init_data.qib_param_field = qib_param_field;
3370 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3371 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3372 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3373 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3374 init_data.no_input_qs = 1;
3375 init_data.no_output_qs = card->qdio.no_out_queues;
3376 init_data.input_handler = (qdio_handler_t *)
3377 qeth_qdio_input_handler;
3378 init_data.output_handler = (qdio_handler_t *)
3379 qeth_qdio_output_handler;
3380 init_data.int_parm = (unsigned long) card;
3381 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3382 QDIO_OUTBOUND_0COPY_SBALS |
3383 QDIO_USE_OUTBOUND_PCIS;
3384 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3385 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3386
3387 if (!(rc = qdio_initialize(&init_data)))
3388 card->qdio.state = QETH_QDIO_ESTABLISHED;
3389
3390 kfree(out_sbal_ptrs);
3391 kfree(in_sbal_ptrs);
3392 kfree(qib_param_field);
3393 return rc;
3394}
3395
3396static int
3397qeth_qdio_activate(struct qeth_card *card)
3398{
3399 QETH_DBF_TEXT(setup,3,"qdioact");
3400 return qdio_activate(CARD_DDEV(card), 0);
3401}
3402
3403static int
3404qeth_clear_channel(struct qeth_channel *channel)
3405{
3406 unsigned long flags;
3407 struct qeth_card *card;
3408 int rc;
3409
3410 QETH_DBF_TEXT(trace,3,"clearch");
3411 card = CARD_FROM_CDEV(channel->ccwdev);
3412 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3413 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3414 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3415
3416 if (rc)
3417 return rc;
3418 rc = wait_event_interruptible_timeout(card->wait_q,
3419 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3420 if (rc == -ERESTARTSYS)
3421 return rc;
3422 if (channel->state != CH_STATE_STOPPED)
3423 return -ETIME;
3424 channel->state = CH_STATE_DOWN;
3425 return 0;
3426}
3427
3428static int
3429qeth_halt_channel(struct qeth_channel *channel)
3430{
3431 unsigned long flags;
3432 struct qeth_card *card;
3433 int rc;
3434
3435 QETH_DBF_TEXT(trace,3,"haltch");
3436 card = CARD_FROM_CDEV(channel->ccwdev);
3437 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3438 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3439 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3440
3441 if (rc)
3442 return rc;
3443 rc = wait_event_interruptible_timeout(card->wait_q,
3444 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3445 if (rc == -ERESTARTSYS)
3446 return rc;
3447 if (channel->state != CH_STATE_HALTED)
3448 return -ETIME;
3449 return 0;
3450}
3451
3452static int
3453qeth_halt_channels(struct qeth_card *card)
3454{
f3d242e8 3455 int rc1 = 0, rc2=0, rc3 = 0;
1da177e4
LT
3456
3457 QETH_DBF_TEXT(trace,3,"haltchs");
f3d242e8
FP
3458 rc1 = qeth_halt_channel(&card->read);
3459 rc2 = qeth_halt_channel(&card->write);
3460 rc3 = qeth_halt_channel(&card->data);
3461 if (rc1)
3462 return rc1;
3463 if (rc2)
3464 return rc2;
3465 return rc3;
1da177e4
LT
3466}
3467static int
3468qeth_clear_channels(struct qeth_card *card)
3469{
f3d242e8 3470 int rc1 = 0, rc2=0, rc3 = 0;
1da177e4
LT
3471
3472 QETH_DBF_TEXT(trace,3,"clearchs");
f3d242e8
FP
3473 rc1 = qeth_clear_channel(&card->read);
3474 rc2 = qeth_clear_channel(&card->write);
3475 rc3 = qeth_clear_channel(&card->data);
3476 if (rc1)
3477 return rc1;
3478 if (rc2)
3479 return rc2;
3480 return rc3;
1da177e4
LT
3481}
3482
3483static int
3484qeth_clear_halt_card(struct qeth_card *card, int halt)
3485{
3486 int rc = 0;
3487
3488 QETH_DBF_TEXT(trace,3,"clhacrd");
3489 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3490
3491 if (halt)
3492 rc = qeth_halt_channels(card);
3493 if (rc)
3494 return rc;
3495 return qeth_clear_channels(card);
3496}
3497
3498static int
3499qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3500{
3501 int rc = 0;
3502
3503 QETH_DBF_TEXT(trace,3,"qdioclr");
3504 if (card->qdio.state == QETH_QDIO_ESTABLISHED){
3505 if ((rc = qdio_cleanup(CARD_DDEV(card),
3506 (card->info.type == QETH_CARD_TYPE_IQD) ?
3507 QDIO_FLAG_CLEANUP_USING_HALT :
3508 QDIO_FLAG_CLEANUP_USING_CLEAR)))
3509 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
3510 card->qdio.state = QETH_QDIO_ALLOCATED;
3511 }
3512 if ((rc = qeth_clear_halt_card(card, use_halt)))
3513 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
3514 card->state = CARD_STATE_DOWN;
3515 return rc;
3516}
3517
3518static int
3519qeth_dm_act(struct qeth_card *card)
3520{
3521 int rc;
3522 struct qeth_cmd_buffer *iob;
3523
3524 QETH_DBF_TEXT(setup,2,"dmact");
3525
3526 iob = qeth_wait_for_buffer(&card->write);
3527 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3528
3529 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3530 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3531 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3532 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3533 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3534 return rc;
3535}
3536
3537static int
3538qeth_mpc_initialize(struct qeth_card *card)
3539{
3540 int rc;
3541
3542 QETH_DBF_TEXT(setup,2,"mpcinit");
3543
3544 if ((rc = qeth_issue_next_read(card))){
3545 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3546 return rc;
3547 }
3548 if ((rc = qeth_cm_enable(card))){
3549 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
f3d242e8 3550 goto out_qdio;
1da177e4
LT
3551 }
3552 if ((rc = qeth_cm_setup(card))){
3553 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
f3d242e8 3554 goto out_qdio;
1da177e4
LT
3555 }
3556 if ((rc = qeth_ulp_enable(card))){
3557 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
f3d242e8 3558 goto out_qdio;
1da177e4
LT
3559 }
3560 if ((rc = qeth_ulp_setup(card))){
3561 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
f3d242e8 3562 goto out_qdio;
1da177e4
LT
3563 }
3564 if ((rc = qeth_alloc_qdio_buffers(card))){
3565 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
f3d242e8 3566 goto out_qdio;
1da177e4
LT
3567 }
3568 if ((rc = qeth_qdio_establish(card))){
3569 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3570 qeth_free_qdio_buffers(card);
3571 goto out_qdio;
3572 }
3573 if ((rc = qeth_qdio_activate(card))){
3574 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3575 goto out_qdio;
3576 }
3577 if ((rc = qeth_dm_act(card))){
3578 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3579 goto out_qdio;
3580 }
3581
3582 return 0;
3583out_qdio:
500f83ab 3584 qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD);
1da177e4
LT
3585 return rc;
3586}
3587
3588static struct net_device *
3589qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3590{
3591 struct net_device *dev = NULL;
3592
3593 switch (type) {
3594 case QETH_CARD_TYPE_OSAE:
3595 switch (linktype) {
3596 case QETH_LINK_TYPE_LANE_TR:
3597 case QETH_LINK_TYPE_HSTR:
3598#ifdef CONFIG_TR
3599 dev = alloc_trdev(0);
3600#endif /* CONFIG_TR */
3601 break;
3602 default:
3603 dev = alloc_etherdev(0);
3604 }
3605 break;
3606 case QETH_CARD_TYPE_IQD:
3607 dev = alloc_netdev(0, "hsi%d", ether_setup);
3608 break;
500f83ab
UB
3609 case QETH_CARD_TYPE_OSN:
3610 dev = alloc_netdev(0, "osn%d", ether_setup);
3611 break;
1da177e4
LT
3612 default:
3613 dev = alloc_etherdev(0);
3614 }
3615 return dev;
3616}
3617
3618/*hard_header fake function; used in case fake_ll is set */
3619static int
3620qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3621 unsigned short type, void *daddr, void *saddr,
3622 unsigned len)
3623{
e23dd9cd
FP
3624 if(dev->type == ARPHRD_IEEE802_TR){
3625 struct trh_hdr *hdr;
3626 hdr = (struct trh_hdr *)skb_push(skb, QETH_FAKE_LL_LEN_TR);
3627 memcpy(hdr->saddr, dev->dev_addr, TR_ALEN);
3628 memcpy(hdr->daddr, "FAKELL", TR_ALEN);
3629 return QETH_FAKE_LL_LEN_TR;
3630
3631 } else {
3632 struct ethhdr *hdr;
3633 hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN_ETH);
3634 memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
3635 memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
3636 if (type != ETH_P_802_3)
3637 hdr->h_proto = htons(type);
3638 else
3639 hdr->h_proto = htons(len);
3640 return QETH_FAKE_LL_LEN_ETH;
1da177e4 3641
e23dd9cd 3642 }
1da177e4
LT
3643}
3644
3645static inline int
3646qeth_send_packet(struct qeth_card *, struct sk_buff *);
3647
3648static int
3649qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3650{
3651 int rc;
3652 struct qeth_card *card;
3653
3654 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3655 card = (struct qeth_card *)dev->priv;
3656 if (skb==NULL) {
3657 card->stats.tx_dropped++;
3658 card->stats.tx_errors++;
3659 /* return OK; otherwise ksoftirqd goes to 100% */
3660 return NETDEV_TX_OK;
3661 }
3662 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
3663 card->stats.tx_dropped++;
3664 card->stats.tx_errors++;
3665 card->stats.tx_carrier_errors++;
3666 dev_kfree_skb_any(skb);
3667 /* return OK; otherwise ksoftirqd goes to 100% */
3668 return NETDEV_TX_OK;
3669 }
3670#ifdef CONFIG_QETH_PERF_STATS
3671 card->perf_stats.outbound_cnt++;
3672 card->perf_stats.outbound_start_time = qeth_get_micros();
3673#endif
3674 netif_stop_queue(dev);
3675 if ((rc = qeth_send_packet(card, skb))) {
3676 if (rc == -EBUSY) {
3677 return NETDEV_TX_BUSY;
3678 } else {
3679 card->stats.tx_errors++;
3680 card->stats.tx_dropped++;
3681 dev_kfree_skb_any(skb);
3682 /*set to OK; otherwise ksoftirqd goes to 100% */
3683 rc = NETDEV_TX_OK;
3684 }
3685 }
3686 netif_wake_queue(dev);
3687#ifdef CONFIG_QETH_PERF_STATS
3688 card->perf_stats.outbound_time += qeth_get_micros() -
3689 card->perf_stats.outbound_start_time;
3690#endif
3691 return rc;
3692}
3693
3694static int
3695qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3696{
3697 int rc = 0;
3698#ifdef CONFIG_QETH_VLAN
3699 struct vlan_group *vg;
3700 int i;
3701
3702 if (!(vg = card->vlangrp))
3703 return rc;
3704
3705 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3706 if (vg->vlan_devices[i] == dev){
3707 rc = QETH_VLAN_CARD;
3708 break;
3709 }
3710 }
3711#endif
3712 return rc;
3713}
3714
3715static int
3716qeth_verify_dev(struct net_device *dev)
3717{
3718 struct qeth_card *card;
3719 unsigned long flags;
3720 int rc = 0;
3721
3722 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3723 list_for_each_entry(card, &qeth_card_list.list, list){
3724 if (card->dev == dev){
3725 rc = QETH_REAL_CARD;
3726 break;
3727 }
3728 rc = qeth_verify_vlan_dev(dev, card);
3729 if (rc)
3730 break;
3731 }
3732 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3733
3734 return rc;
3735}
3736
3737static struct qeth_card *
3738qeth_get_card_from_dev(struct net_device *dev)
3739{
3740 struct qeth_card *card = NULL;
3741 int rc;
3742
3743 rc = qeth_verify_dev(dev);
3744 if (rc == QETH_REAL_CARD)
3745 card = (struct qeth_card *)dev->priv;
3746 else if (rc == QETH_VLAN_CARD)
3747 card = (struct qeth_card *)
3748 VLAN_DEV_INFO(dev)->real_dev->priv;
3749
3750 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3751 return card ;
3752}
3753
3754static void
3755qeth_tx_timeout(struct net_device *dev)
3756{
3757 struct qeth_card *card;
3758
3759 card = (struct qeth_card *) dev->priv;
3760 card->stats.tx_errors++;
3761 qeth_schedule_recovery(card);
3762}
3763
3764static int
3765qeth_open(struct net_device *dev)
3766{
3767 struct qeth_card *card;
3768
3769 QETH_DBF_TEXT(trace, 4, "qethopen");
3770
3771 card = (struct qeth_card *) dev->priv;
3772
3773 if (card->state != CARD_STATE_SOFTSETUP)
3774 return -ENODEV;
3775
500f83ab
UB
3776 if ( (card->info.type != QETH_CARD_TYPE_OSN) &&
3777 (card->options.layer2) &&
1da177e4
LT
3778 (!card->info.layer2_mac_registered)) {
3779 QETH_DBF_TEXT(trace,4,"nomacadr");
3780 return -EPERM;
3781 }
3782 card->dev->flags |= IFF_UP;
3783 netif_start_queue(dev);
3784 card->data.state = CH_STATE_UP;
3785 card->state = CARD_STATE_UP;
3786
3787 if (!card->lan_online){
3788 if (netif_carrier_ok(dev))
3789 netif_carrier_off(dev);
3790 }
3791 return 0;
3792}
3793
3794static int
3795qeth_stop(struct net_device *dev)
3796{
3797 struct qeth_card *card;
3798
3799 QETH_DBF_TEXT(trace, 4, "qethstop");
3800
3801 card = (struct qeth_card *) dev->priv;
3802
3803 netif_stop_queue(dev);
3804 card->dev->flags &= ~IFF_UP;
3805 if (card->state == CARD_STATE_UP)
3806 card->state = CARD_STATE_SOFTSETUP;
3807 return 0;
3808}
3809
3810static inline int
3811qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3812{
3813 int cast_type = RTN_UNSPEC;
3814
500f83ab
UB
3815 if (card->info.type == QETH_CARD_TYPE_OSN)
3816 return cast_type;
3817
1da177e4
LT
3818 if (skb->dst && skb->dst->neighbour){
3819 cast_type = skb->dst->neighbour->type;
3820 if ((cast_type == RTN_BROADCAST) ||
3821 (cast_type == RTN_MULTICAST) ||
3822 (cast_type == RTN_ANYCAST))
3823 return cast_type;
3824 else
3825 return RTN_UNSPEC;
3826 }
3827 /* try something else */
3828 if (skb->protocol == ETH_P_IPV6)
3829 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
3830 else if (skb->protocol == ETH_P_IP)
3831 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
3832 /* ... */
3833 if (!memcmp(skb->data, skb->dev->broadcast, 6))
3834 return RTN_BROADCAST;
3835 else {
3836 u16 hdr_mac;
3837
3838 hdr_mac = *((u16 *)skb->data);
3839 /* tr multicast? */
3840 switch (card->info.link_type) {
3841 case QETH_LINK_TYPE_HSTR:
3842 case QETH_LINK_TYPE_LANE_TR:
3843 if ((hdr_mac == QETH_TR_MAC_NC) ||
3844 (hdr_mac == QETH_TR_MAC_C))
3845 return RTN_MULTICAST;
3846 /* eth or so multicast? */
3847 default:
3848 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3849 (hdr_mac == QETH_ETH_MAC_V6))
3850 return RTN_MULTICAST;
3851 }
3852 }
3853 return cast_type;
3854}
3855
3856static inline int
3857qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3858 int ipv, int cast_type)
3859{
3860 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3861 return card->qdio.default_out_queue;
3862 switch (card->qdio.no_out_queues) {
3863 case 4:
3864 if (cast_type && card->info.is_multicast_different)
3865 return card->info.is_multicast_different &
3866 (card->qdio.no_out_queues - 1);
3867 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3868 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3869 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
3870 return 3;
3871 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
3872 return 2;
3873 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
3874 return 1;
3875 if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
3876 return 0;
3877 }
3878 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3879 return 3 - (skb->nh.iph->tos >> 6);
3880 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3881 /* TODO: IPv6!!! */
3882 }
3883 return card->qdio.default_out_queue;
3884 case 1: /* fallthrough for single-out-queue 1920-device */
3885 default:
3886 return card->qdio.default_out_queue;
3887 }
3888}
3889
3890static inline int
3891qeth_get_ip_version(struct sk_buff *skb)
3892{
3893 switch (skb->protocol) {
3894 case ETH_P_IPV6:
3895 return 6;
3896 case ETH_P_IP:
3897 return 4;
3898 default:
3899 return 0;
3900 }
3901}
3902
3903static inline int
3904qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3905 struct qeth_hdr **hdr, int ipv)
3906{
500f83ab 3907 int rc = 0;
1da177e4
LT
3908#ifdef CONFIG_QETH_VLAN
3909 u16 *tag;
3910#endif
3911
3912 QETH_DBF_TEXT(trace, 6, "prepskb");
500f83ab
UB
3913 if (card->info.type == QETH_CARD_TYPE_OSN) {
3914 *hdr = (struct qeth_hdr *)(*skb)->data;
3915 return rc;
3916 }
9cb90de8
FP
3917 rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
3918 if (rc)
3919 return rc;
1da177e4
LT
3920#ifdef CONFIG_QETH_VLAN
3921 if (card->vlangrp && vlan_tx_tag_present(*skb) &&
3922 ((ipv == 6) || card->options.layer2) ) {
3923 /*
3924 * Move the mac addresses (6 bytes src, 6 bytes dest)
3925 * to the beginning of the new header. We are using three
3926 * memcpys instead of one memmove to save cycles.
3927 */
3928 skb_push(*skb, VLAN_HLEN);
3929 memcpy((*skb)->data, (*skb)->data + 4, 4);
3930 memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
3931 memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
3932 tag = (u16 *)((*skb)->data + 12);
3933 /*
3934 * first two bytes = ETH_P_8021Q (0x8100)
3935 * second two bytes = VLANID
3936 */
3937 *tag = __constant_htons(ETH_P_8021Q);
3938 *(tag + 1) = htons(vlan_tx_tag_get(*skb));
3939 }
3940#endif
3941 *hdr = (struct qeth_hdr *)
3942 qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
3943 if (hdr == NULL)
3944 return -EINVAL;
3945 return 0;
3946}
3947
3948static inline u8
3949qeth_get_qeth_hdr_flags4(int cast_type)
3950{
3951 if (cast_type == RTN_MULTICAST)
3952 return QETH_CAST_MULTICAST;
3953 if (cast_type == RTN_BROADCAST)
3954 return QETH_CAST_BROADCAST;
3955 return QETH_CAST_UNICAST;
3956}
3957
3958static inline u8
3959qeth_get_qeth_hdr_flags6(int cast_type)
3960{
3961 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
3962 if (cast_type == RTN_MULTICAST)
3963 return ct | QETH_CAST_MULTICAST;
3964 if (cast_type == RTN_ANYCAST)
3965 return ct | QETH_CAST_ANYCAST;
3966 if (cast_type == RTN_BROADCAST)
3967 return ct | QETH_CAST_BROADCAST;
3968 return ct | QETH_CAST_UNICAST;
3969}
3970
3971static inline void
3972qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3973 struct sk_buff *skb)
3974{
3975 __u16 hdr_mac;
3976
3977 if (!memcmp(skb->data+QETH_HEADER_SIZE,
3978 skb->dev->broadcast,6)) { /* broadcast? */
3979 *(__u32 *)hdr->hdr.l2.flags |=
3980 QETH_LAYER2_FLAG_BROADCAST << 8;
3981 return;
3982 }
3983 hdr_mac=*((__u16*)skb->data);
3984 /* tr multicast? */
3985 switch (card->info.link_type) {
3986 case QETH_LINK_TYPE_HSTR:
3987 case QETH_LINK_TYPE_LANE_TR:
3988 if ((hdr_mac == QETH_TR_MAC_NC) ||
3989 (hdr_mac == QETH_TR_MAC_C) )
3990 *(__u32 *)hdr->hdr.l2.flags |=
3991 QETH_LAYER2_FLAG_MULTICAST << 8;
3992 else
3993 *(__u32 *)hdr->hdr.l2.flags |=
3994 QETH_LAYER2_FLAG_UNICAST << 8;
3995 break;
3996 /* eth or so multicast? */
3997 default:
3998 if ( (hdr_mac==QETH_ETH_MAC_V4) ||
3999 (hdr_mac==QETH_ETH_MAC_V6) )
4000 *(__u32 *)hdr->hdr.l2.flags |=
4001 QETH_LAYER2_FLAG_MULTICAST << 8;
4002 else
4003 *(__u32 *)hdr->hdr.l2.flags |=
4004 QETH_LAYER2_FLAG_UNICAST << 8;
4005 }
4006}
4007
4008static inline void
4009qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4010 struct sk_buff *skb, int cast_type)
4011{
4012 memset(hdr, 0, sizeof(struct qeth_hdr));
4013 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
4014
4015 /* set byte 0 to "0x02" and byte 3 to casting flags */
4016 if (cast_type==RTN_MULTICAST)
4017 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
4018 else if (cast_type==RTN_BROADCAST)
4019 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
4020 else
4021 qeth_layer2_get_packet_type(card, hdr, skb);
4022
4023 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
4024#ifdef CONFIG_QETH_VLAN
4025 /* VSWITCH relies on the VLAN
4026 * information to be present in
4027 * the QDIO header */
4028 if ((card->vlangrp != NULL) &&
4029 vlan_tx_tag_present(skb)) {
4030 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
4031 hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
4032 }
4033#endif
4034}
4035
4036void
4037qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4038 struct sk_buff *skb, int ipv, int cast_type)
4039{
4040 QETH_DBF_TEXT(trace, 6, "fillhdr");
4041
4042 memset(hdr, 0, sizeof(struct qeth_hdr));
4043 if (card->options.layer2) {
4044 qeth_layer2_fill_header(card, hdr, skb, cast_type);
4045 return;
4046 }
4047 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
4048 hdr->hdr.l3.ext_flags = 0;
4049#ifdef CONFIG_QETH_VLAN
4050 /*
4051 * before we're going to overwrite this location with next hop ip.
4052 * v6 uses passthrough, v4 sets the tag in the QDIO header.
4053 */
4054 if (card->vlangrp && vlan_tx_tag_present(skb)) {
4055 hdr->hdr.l3.ext_flags = (ipv == 4) ?
4056 QETH_HDR_EXT_VLAN_FRAME :
4057 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
4058 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
4059 }
4060#endif /* CONFIG_QETH_VLAN */
4061 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
4062 if (ipv == 4) { /* IPv4 */
4063 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
4064 memset(hdr->hdr.l3.dest_addr, 0, 12);
4065 if ((skb->dst) && (skb->dst->neighbour)) {
4066 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
4067 *((u32 *) skb->dst->neighbour->primary_key);
4068 } else {
4069 /* fill in destination address used in ip header */
4070 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr;
4071 }
4072 } else if (ipv == 6) { /* IPv6 or passthru */
4073 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
4074 if ((skb->dst) && (skb->dst->neighbour)) {
4075 memcpy(hdr->hdr.l3.dest_addr,
4076 skb->dst->neighbour->primary_key, 16);
4077 } else {
4078 /* fill in destination address used in ip header */
4079 memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16);
4080 }
4081 } else { /* passthrough */
e23dd9cd 4082 if((skb->dev->type == ARPHRD_IEEE802_TR) &&
9a455819
FP
4083 !memcmp(skb->data + sizeof(struct qeth_hdr) +
4084 sizeof(__u16), skb->dev->broadcast, 6)) {
4085 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4086 QETH_HDR_PASSTHRU;
e23dd9cd 4087 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
1da177e4 4088 skb->dev->broadcast, 6)) { /* broadcast? */
e23dd9cd
FP
4089 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4090 QETH_HDR_PASSTHRU;
1da177e4
LT
4091 } else {
4092 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
4093 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
4094 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
4095 }
4096 }
4097}
4098
1da177e4
LT
4099static inline void
4100__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
05e08a2a 4101 int is_tso, int *next_element_to_fill)
1da177e4
LT
4102{
4103 int length = skb->len;
4104 int length_here;
4105 int element;
4106 char *data;
05e08a2a 4107 int first_lap ;
1da177e4
LT
4108
4109 element = *next_element_to_fill;
4110 data = skb->data;
05e08a2a
FP
4111 first_lap = (is_tso == 0 ? 1 : 0);
4112
1da177e4
LT
4113 while (length > 0) {
4114 /* length_here is the remaining amount of data in this page */
4115 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
4116 if (length < length_here)
4117 length_here = length;
05e08a2a 4118
1da177e4
LT
4119 buffer->element[element].addr = data;
4120 buffer->element[element].length = length_here;
4121 length -= length_here;
05e08a2a 4122 if (!length) {
1da177e4
LT
4123 if (first_lap)
4124 buffer->element[element].flags = 0;
4125 else
4126 buffer->element[element].flags =
4127 SBAL_FLAGS_LAST_FRAG;
4128 } else {
4129 if (first_lap)
4130 buffer->element[element].flags =
4131 SBAL_FLAGS_FIRST_FRAG;
4132 else
4133 buffer->element[element].flags =
4134 SBAL_FLAGS_MIDDLE_FRAG;
4135 }
4136 data += length_here;
4137 element++;
4138 first_lap = 0;
4139 }
4140 *next_element_to_fill = element;
4141}
4142
4143static inline int
4144qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4145 struct qeth_qdio_out_buffer *buf,
4146 struct sk_buff *skb)
4147{
4148 struct qdio_buffer *buffer;
05e08a2a
FP
4149 struct qeth_hdr_tso *hdr;
4150 int flush_cnt = 0, hdr_len, large_send = 0;
1da177e4
LT
4151
4152 QETH_DBF_TEXT(trace, 6, "qdfillbf");
05e08a2a 4153
1da177e4
LT
4154 buffer = buf->buffer;
4155 atomic_inc(&skb->users);
4156 skb_queue_tail(&buf->skb_list, skb);
05e08a2a
FP
4157
4158 hdr = (struct qeth_hdr_tso *) skb->data;
4159 /*check first on TSO ....*/
4160 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
4161 int element = buf->next_element_to_fill;
4162
4163 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
4164 /*fill first buffer entry only with header information */
4165 buffer->element[element].addr = skb->data;
4166 buffer->element[element].length = hdr_len;
4167 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
4168 buf->next_element_to_fill++;
4169 skb->data += hdr_len;
4170 skb->len -= hdr_len;
4171 large_send = 1;
4172 }
1da177e4 4173 if (skb_shinfo(skb)->nr_frags == 0)
05e08a2a 4174 __qeth_fill_buffer(skb, buffer, large_send,
1da177e4
LT
4175 (int *)&buf->next_element_to_fill);
4176 else
05e08a2a 4177 __qeth_fill_buffer_frag(skb, buffer, large_send,
1da177e4
LT
4178 (int *)&buf->next_element_to_fill);
4179
4180 if (!queue->do_pack) {
4181 QETH_DBF_TEXT(trace, 6, "fillbfnp");
4182 /* set state to PRIMED -> will be flushed */
4183 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4184 flush_cnt = 1;
4185 } else {
4186 QETH_DBF_TEXT(trace, 6, "fillbfpa");
4187#ifdef CONFIG_QETH_PERF_STATS
4188 queue->card->perf_stats.skbs_sent_pack++;
4189#endif
4190 if (buf->next_element_to_fill >=
4191 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4192 /*
4193 * packed buffer if full -> set state PRIMED
4194 * -> will be flushed
4195 */
4196 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4197 flush_cnt = 1;
4198 }
4199 }
4200 return flush_cnt;
4201}
4202
4203static inline int
4204qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4205 struct sk_buff *skb, struct qeth_hdr *hdr,
4206 int elements_needed,
4207 struct qeth_eddp_context *ctx)
4208{
4209 struct qeth_qdio_out_buffer *buffer;
4210 int buffers_needed = 0;
4211 int flush_cnt = 0;
4212 int index;
4213
4214 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4215
4216 /* spin until we get the queue ... */
4217 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4218 QETH_OUT_Q_LOCKED,
4219 &queue->state));
4220 /* ... now we've got the queue */
4221 index = queue->next_buf_to_fill;
4222 buffer = &queue->bufs[queue->next_buf_to_fill];
4223 /*
4224 * check if buffer is empty to make sure that we do not 'overtake'
4225 * ourselves and try to fill a buffer that is already primed
4226 */
4227 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4228 card->stats.tx_dropped++;
4229 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4230 return -EBUSY;
4231 }
4232 if (ctx == NULL)
4233 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4234 QDIO_MAX_BUFFERS_PER_Q;
4235 else {
4236 buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
4237 if (buffers_needed < 0) {
4238 card->stats.tx_dropped++;
4239 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4240 return -EBUSY;
4241 }
4242 queue->next_buf_to_fill =
4243 (queue->next_buf_to_fill + buffers_needed) %
4244 QDIO_MAX_BUFFERS_PER_Q;
4245 }
4246 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4247 if (ctx == NULL) {
4248 qeth_fill_buffer(queue, buffer, skb);
4249 qeth_flush_buffers(queue, 0, index, 1);
4250 } else {
4251 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
4252 WARN_ON(buffers_needed != flush_cnt);
4253 qeth_flush_buffers(queue, 0, index, flush_cnt);
4254 }
4255 return 0;
4256}
4257
4258static inline int
4259qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4260 struct sk_buff *skb, struct qeth_hdr *hdr,
4261 int elements_needed, struct qeth_eddp_context *ctx)
4262{
4263 struct qeth_qdio_out_buffer *buffer;
4264 int start_index;
4265 int flush_count = 0;
4266 int do_pack = 0;
4267 int tmp;
4268 int rc = 0;
4269
4270 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4271
4272 /* spin until we get the queue ... */
4273 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4274 QETH_OUT_Q_LOCKED,
4275 &queue->state));
4276 start_index = queue->next_buf_to_fill;
4277 buffer = &queue->bufs[queue->next_buf_to_fill];
4278 /*
4279 * check if buffer is empty to make sure that we do not 'overtake'
4280 * ourselves and try to fill a buffer that is already primed
4281 */
4282 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
4283 card->stats.tx_dropped++;
4284 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4285 return -EBUSY;
4286 }
4287 /* check if we need to switch packing state of this queue */
4288 qeth_switch_to_packing_if_needed(queue);
4289 if (queue->do_pack){
4290 do_pack = 1;
4291 if (ctx == NULL) {
4292 /* does packet fit in current buffer? */
4293 if((QETH_MAX_BUFFER_ELEMENTS(card) -
4294 buffer->next_element_to_fill) < elements_needed){
4295 /* ... no -> set state PRIMED */
4296 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
4297 flush_count++;
4298 queue->next_buf_to_fill =
4299 (queue->next_buf_to_fill + 1) %
4300 QDIO_MAX_BUFFERS_PER_Q;
4301 buffer = &queue->bufs[queue->next_buf_to_fill];
4302 /* we did a step forward, so check buffer state
4303 * again */
4304 if (atomic_read(&buffer->state) !=
4305 QETH_QDIO_BUF_EMPTY){
4306 card->stats.tx_dropped++;
4307 qeth_flush_buffers(queue, 0, start_index, flush_count);
4308 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4309 return -EBUSY;
4310 }
4311 }
4312 } else {
4313 /* check if we have enough elements (including following
4314 * free buffers) to handle eddp context */
4315 if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
4316 printk("eddp tx_dropped 1\n");
4317 card->stats.tx_dropped++;
4318 rc = -EBUSY;
4319 goto out;
4320 }
4321 }
4322 }
4323 if (ctx == NULL)
4324 tmp = qeth_fill_buffer(queue, buffer, skb);
4325 else {
4326 tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
4327 if (tmp < 0) {
4328 printk("eddp tx_dropped 2\n");
4329 card->stats.tx_dropped++;
4330 rc = - EBUSY;
4331 goto out;
4332 }
4333 }
4334 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4335 QDIO_MAX_BUFFERS_PER_Q;
4336 flush_count += tmp;
4337out:
4338 if (flush_count)
4339 qeth_flush_buffers(queue, 0, start_index, flush_count);
d805d7c6
FP
4340 else if (!atomic_read(&queue->set_pci_flags_count))
4341 atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
1da177e4
LT
4342 /*
4343 * queue->state will go from LOCKED -> UNLOCKED or from
4344 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4345 * (switch packing state or flush buffer to get another pci flag out).
4346 * In that case we will enter this loop
4347 */
4348 while (atomic_dec_return(&queue->state)){
4349 flush_count = 0;
4350 start_index = queue->next_buf_to_fill;
4351 /* check if we can go back to non-packing state */
4352 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
4353 /*
4354 * check if we need to flush a packing buffer to get a pci
4355 * flag out on the queue
4356 */
4357 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
4358 flush_count += qeth_flush_buffers_on_no_pci(queue);
4359 if (flush_count)
4360 qeth_flush_buffers(queue, 0, start_index, flush_count);
4361 }
4362 /* at this point the queue is UNLOCKED again */
4363#ifdef CONFIG_QETH_PERF_STATS
4364 if (do_pack)
4365 queue->card->perf_stats.bufs_sent_pack += flush_count;
4366#endif /* CONFIG_QETH_PERF_STATS */
4367
4368 return rc;
4369}
4370
05e08a2a 4371static inline int
9cb90de8
FP
4372qeth_get_elements_no(struct qeth_card *card, void *hdr,
4373 struct sk_buff *skb, int elems)
05e08a2a
FP
4374{
4375 int elements_needed = 0;
4376
4377 if (skb_shinfo(skb)->nr_frags > 0) {
4378 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
4379 }
4380 if (elements_needed == 0 )
4381 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4382 + skb->len) >> PAGE_SHIFT);
9cb90de8 4383 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
05e08a2a 4384 PRINT_ERR("qeth_do_send_packet: invalid size of "
9cb90de8
FP
4385 "IP packet (Number=%d / Length=%d). Discarded.\n",
4386 (elements_needed+elems), skb->len);
05e08a2a
FP
4387 return 0;
4388 }
4389 return elements_needed;
4390}
4391
1da177e4
LT
4392static inline int
4393qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4394{
4395 int ipv = 0;
4396 int cast_type;
4397 struct qeth_qdio_out_q *queue;
f3d242e8 4398 struct qeth_hdr *hdr = NULL;
1da177e4
LT
4399 int elements_needed = 0;
4400 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4401 struct qeth_eddp_context *ctx = NULL;
4402 int rc;
4403
4404 QETH_DBF_TEXT(trace, 6, "sendpkt");
4405
4406 if (!card->options.layer2) {
4407 ipv = qeth_get_ip_version(skb);
4408 if ((card->dev->hard_header == qeth_fake_header) && ipv) {
4409 if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) {
4410 card->stats.tx_dropped++;
4411 dev_kfree_skb_irq(skb);
4412 return 0;
4413 }
e23dd9cd
FP
4414 if(card->dev->type == ARPHRD_IEEE802_TR){
4415 skb_pull(skb, QETH_FAKE_LL_LEN_TR);
4416 } else {
4417 skb_pull(skb, QETH_FAKE_LL_LEN_ETH);
4418 }
1da177e4
LT
4419 }
4420 }
500f83ab
UB
4421 if ((card->info.type == QETH_CARD_TYPE_OSN) &&
4422 (skb->protocol == htons(ETH_P_IPV6))) {
4423 dev_kfree_skb_any(skb);
4424 return 0;
4425 }
1da177e4 4426 cast_type = qeth_get_cast_type(card, skb);
500f83ab
UB
4427 if ((cast_type == RTN_BROADCAST) &&
4428 (card->info.broadcast_capable == 0)){
1da177e4
LT
4429 card->stats.tx_dropped++;
4430 card->stats.tx_errors++;
4431 dev_kfree_skb_any(skb);
4432 return NETDEV_TX_OK;
4433 }
4434 queue = card->qdio.out_qs
4435 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
4436
4437 if (skb_shinfo(skb)->tso_size)
4438 large_send = card->options.large_send;
4439
1da177e4
LT
4440 /*are we able to do TSO ? If so ,prepare and send it from here */
4441 if ((large_send == QETH_LARGE_SEND_TSO) &&
4442 (cast_type == RTN_UNSPEC)) {
05e08a2a
FP
4443 rc = qeth_tso_prepare_packet(card, skb, ipv, cast_type);
4444 if (rc) {
4445 card->stats.tx_dropped++;
4446 card->stats.tx_errors++;
4447 dev_kfree_skb_any(skb);
4448 return NETDEV_TX_OK;
4449 }
4450 elements_needed++;
4451 } else {
4452 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) {
4453 QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
4454 return rc;
4455 }
500f83ab
UB
4456 if (card->info.type != QETH_CARD_TYPE_OSN)
4457 qeth_fill_header(card, hdr, skb, ipv, cast_type);
1da177e4
LT
4458 }
4459
1da177e4
LT
4460 if (large_send == QETH_LARGE_SEND_EDDP) {
4461 ctx = qeth_eddp_create_context(card, skb, hdr);
4462 if (ctx == NULL) {
4463 PRINT_WARN("could not create eddp context\n");
4464 return -EINVAL;
4465 }
4466 } else {
9cb90de8
FP
4467 int elems = qeth_get_elements_no(card,(void*) hdr, skb,
4468 elements_needed);
4469 if (!elems)
1da177e4 4470 return -EINVAL;
9cb90de8 4471 elements_needed += elems;
1da177e4
LT
4472 }
4473
4474 if (card->info.type != QETH_CARD_TYPE_IQD)
4475 rc = qeth_do_send_packet(card, queue, skb, hdr,
4476 elements_needed, ctx);
4477 else
4478 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
4479 elements_needed, ctx);
1da177e4
LT
4480 if (!rc){
4481 card->stats.tx_packets++;
4482 card->stats.tx_bytes += skb->len;
4483#ifdef CONFIG_QETH_PERF_STATS
05e08a2a
FP
4484 if (skb_shinfo(skb)->tso_size &&
4485 !(large_send == QETH_LARGE_SEND_NO)) {
1da177e4
LT
4486 card->perf_stats.large_send_bytes += skb->len;
4487 card->perf_stats.large_send_cnt++;
4488 }
4489 if (skb_shinfo(skb)->nr_frags > 0){
4490 card->perf_stats.sg_skbs_sent++;
4491 /* nr_frags + skb->data */
4492 card->perf_stats.sg_frags_sent +=
4493 skb_shinfo(skb)->nr_frags + 1;
4494 }
4495#endif /* CONFIG_QETH_PERF_STATS */
4496 }
4497 if (ctx != NULL) {
4498 /* drop creator's reference */
4499 qeth_eddp_put_context(ctx);
4500 /* free skb; it's not referenced by a buffer */
4501 if (rc == 0)
4502 dev_kfree_skb_any(skb);
4503
4504 }
4505 return rc;
4506}
4507
4508static int
4509qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4510{
4511 struct qeth_card *card = (struct qeth_card *) dev->priv;
4512 int rc = 0;
4513
4514 switch(regnum){
4515 case MII_BMCR: /* Basic mode control register */
4516 rc = BMCR_FULLDPLX;
4517 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
500f83ab 4518 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
1da177e4
LT
4519 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4520 rc |= BMCR_SPEED100;
4521 break;
4522 case MII_BMSR: /* Basic mode status register */
4523 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4524 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4525 BMSR_100BASE4;
4526 break;
4527 case MII_PHYSID1: /* PHYS ID 1 */
4528 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4529 dev->dev_addr[2];
4530 rc = (rc >> 5) & 0xFFFF;
4531 break;
4532 case MII_PHYSID2: /* PHYS ID 2 */
4533 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4534 break;
4535 case MII_ADVERTISE: /* Advertisement control reg */
4536 rc = ADVERTISE_ALL;
4537 break;
4538 case MII_LPA: /* Link partner ability reg */
4539 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4540 LPA_100BASE4 | LPA_LPACK;
4541 break;
4542 case MII_EXPANSION: /* Expansion register */
4543 break;
4544 case MII_DCOUNTER: /* disconnect counter */
4545 break;
4546 case MII_FCSCOUNTER: /* false carrier counter */
4547 break;
4548 case MII_NWAYTEST: /* N-way auto-neg test register */
4549 break;
4550 case MII_RERRCOUNTER: /* rx error counter */
4551 rc = card->stats.rx_errors;
4552 break;
4553 case MII_SREVISION: /* silicon revision */
4554 break;
4555 case MII_RESV1: /* reserved 1 */
4556 break;
4557 case MII_LBRERROR: /* loopback, rx, bypass error */
4558 break;
4559 case MII_PHYADDR: /* physical address */
4560 break;
4561 case MII_RESV2: /* reserved 2 */
4562 break;
4563 case MII_TPISTATUS: /* TPI status for 10mbps */
4564 break;
4565 case MII_NCONFIG: /* network interface config */
4566 break;
4567 default:
4568 rc = 0;
4569 break;
4570 }
4571 return rc;
4572}
4573
4574static void
4575qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
4576{
4577 switch(regnum){
4578 case MII_BMCR: /* Basic mode control register */
4579 case MII_BMSR: /* Basic mode status register */
4580 case MII_PHYSID1: /* PHYS ID 1 */
4581 case MII_PHYSID2: /* PHYS ID 2 */
4582 case MII_ADVERTISE: /* Advertisement control reg */
4583 case MII_LPA: /* Link partner ability reg */
4584 case MII_EXPANSION: /* Expansion register */
4585 case MII_DCOUNTER: /* disconnect counter */
4586 case MII_FCSCOUNTER: /* false carrier counter */
4587 case MII_NWAYTEST: /* N-way auto-neg test register */
4588 case MII_RERRCOUNTER: /* rx error counter */
4589 case MII_SREVISION: /* silicon revision */
4590 case MII_RESV1: /* reserved 1 */
4591 case MII_LBRERROR: /* loopback, rx, bypass error */
4592 case MII_PHYADDR: /* physical address */
4593 case MII_RESV2: /* reserved 2 */
4594 case MII_TPISTATUS: /* TPI status for 10mbps */
4595 case MII_NCONFIG: /* network interface config */
4596 default:
4597 break;
4598 }
4599}
4600
4601static inline const char *
4602qeth_arp_get_error_cause(int *rc)
4603{
4604 switch (*rc) {
4605 case QETH_IPA_ARP_RC_FAILED:
4606 *rc = -EIO;
4607 return "operation failed";
4608 case QETH_IPA_ARP_RC_NOTSUPP:
4609 *rc = -EOPNOTSUPP;
4610 return "operation not supported";
4611 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4612 *rc = -EINVAL;
4613 return "argument out of range";
4614 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4615 *rc = -EOPNOTSUPP;
4616 return "query operation not supported";
4617 case QETH_IPA_ARP_RC_Q_NO_DATA:
4618 *rc = -ENOENT;
4619 return "no query data available";
4620 default:
4621 return "unknown error";
4622 }
4623}
4624
4625static int
4626qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4627 __u16, long);
4628
4629static int
4630qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4631{
4632 int tmp;
4633 int rc;
4634
4635 QETH_DBF_TEXT(trace,3,"arpstnoe");
4636
f3d242e8
FP
4637 /*
4638 * currently GuestLAN only supports the ARP assist function
4639 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4640 * thus we say EOPNOTSUPP for this ARP function
4641 */
1da177e4
LT
4642 if (card->info.guestlan)
4643 return -EOPNOTSUPP;
4644 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4645 PRINT_WARN("ARP processing not supported "
4646 "on %s!\n", QETH_CARD_IFNAME(card));
4647 return -EOPNOTSUPP;
4648 }
4649 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4650 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4651 no_entries);
4652 if (rc) {
4653 tmp = rc;
4654 PRINT_WARN("Could not set number of ARP entries on %s: "
4655 "%s (0x%x/%d)\n",
4656 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4657 tmp, tmp);
4658 }
4659 return rc;
4660}
4661
4662static inline void
4663qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4664 struct qeth_arp_query_data *qdata,
4665 int entry_size, int uentry_size)
4666{
4667 char *entry_ptr;
4668 char *uentry_ptr;
4669 int i;
4670
4671 entry_ptr = (char *)&qdata->data;
4672 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4673 for (i = 0; i < qdata->no_entries; ++i){
4674 /* strip off 32 bytes "media specific information" */
4675 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4676 entry_ptr += entry_size;
4677 uentry_ptr += uentry_size;
4678 }
4679}
4680
4681static int
4682qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4683 unsigned long data)
4684{
4685 struct qeth_ipa_cmd *cmd;
4686 struct qeth_arp_query_data *qdata;
4687 struct qeth_arp_query_info *qinfo;
4688 int entry_size;
4689 int uentry_size;
4690 int i;
4691
4692 QETH_DBF_TEXT(trace,4,"arpquecb");
4693
4694 qinfo = (struct qeth_arp_query_info *) reply->param;
4695 cmd = (struct qeth_ipa_cmd *) data;
4696 if (cmd->hdr.return_code) {
4697 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4698 return 0;
4699 }
4700 if (cmd->data.setassparms.hdr.return_code) {
4701 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4702 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4703 return 0;
4704 }
4705 qdata = &cmd->data.setassparms.data.query_arp;
4706 switch(qdata->reply_bits){
4707 case 5:
4708 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4709 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4710 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4711 break;
4712 case 7:
4713 /* fall through to default */
4714 default:
4715 /* tr is the same as eth -> entry7 */
4716 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4717 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4718 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4719 break;
4720 }
4721 /* check if there is enough room in userspace */
4722 if ((qinfo->udata_len - qinfo->udata_offset) <
4723 qdata->no_entries * uentry_size){
4724 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4725 cmd->hdr.return_code = -ENOMEM;
4726 PRINT_WARN("query ARP user space buffer is too small for "
4727 "the returned number of ARP entries. "
4728 "Aborting query!\n");
4729 goto out_error;
4730 }
4731 QETH_DBF_TEXT_(trace, 4, "anore%i",
4732 cmd->data.setassparms.hdr.number_of_replies);
4733 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4734 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4735
4736 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4737 /* strip off "media specific information" */
4738 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4739 uentry_size);
4740 } else
4741 /*copy entries to user buffer*/
4742 memcpy(qinfo->udata + qinfo->udata_offset,
4743 (char *)&qdata->data, qdata->no_entries*uentry_size);
4744
4745 qinfo->no_entries += qdata->no_entries;
4746 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4747 /* check if all replies received ... */
4748 if (cmd->data.setassparms.hdr.seq_no <
4749 cmd->data.setassparms.hdr.number_of_replies)
4750 return 1;
4751 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4752 /* keep STRIP_ENTRIES flag so the user program can distinguish
4753 * stripped entries from normal ones */
4754 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4755 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4756 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4757 return 0;
4758out_error:
4759 i = 0;
4760 memcpy(qinfo->udata, &i, 4);
4761 return 0;
4762}
4763
4764static int
4765qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4766 int len, int (*reply_cb)(struct qeth_card *,
4767 struct qeth_reply *,
4768 unsigned long),
4769 void *reply_param)
4770{
4771 QETH_DBF_TEXT(trace,4,"sendarp");
4772
4773 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4774 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4775 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4776 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4777 reply_cb, reply_param);
4778}
4779
4780static int
4781qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4782 int len, int (*reply_cb)(struct qeth_card *,
4783 struct qeth_reply *,
4784 unsigned long),
4785 void *reply_param)
4786{
4787 u16 s1, s2;
4788
4789 QETH_DBF_TEXT(trace,4,"sendsnmp");
4790
4791 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4792 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4793 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4794 /* adjust PDU length fields in IPA_PDU_HEADER */
4795 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4796 s2 = (u32) len;
4797 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4798 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4799 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4800 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4801 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4802 reply_cb, reply_param);
4803}
4804
4805static struct qeth_cmd_buffer *
4806qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4807 __u16, __u16, enum qeth_prot_versions);
4808static int
4809qeth_arp_query(struct qeth_card *card, char *udata)
4810{
4811 struct qeth_cmd_buffer *iob;
4812 struct qeth_arp_query_info qinfo = {0, };
4813 int tmp;
4814 int rc;
4815
4816 QETH_DBF_TEXT(trace,3,"arpquery");
4817
1da177e4
LT
4818 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4819 IPA_ARP_PROCESSING)) {
4820 PRINT_WARN("ARP processing not supported "
4821 "on %s!\n", QETH_CARD_IFNAME(card));
4822 return -EOPNOTSUPP;
4823 }
4824 /* get size of userspace buffer and mask_bits -> 6 bytes */
4825 if (copy_from_user(&qinfo, udata, 6))
4826 return -EFAULT;
4827 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
4828 return -ENOMEM;
4829 memset(qinfo.udata, 0, qinfo.udata_len);
4830 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4831 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4832 IPA_CMD_ASS_ARP_QUERY_INFO,
4833 sizeof(int),QETH_PROT_IPV4);
4834
4835 rc = qeth_send_ipa_arp_cmd(card, iob,
4836 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4837 qeth_arp_query_cb, (void *)&qinfo);
4838 if (rc) {
4839 tmp = rc;
4840 PRINT_WARN("Error while querying ARP cache on %s: %s "
4841 "(0x%x/%d)\n",
4842 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4843 tmp, tmp);
4844 copy_to_user(udata, qinfo.udata, 4);
4845 } else {
4846 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4847 }
4848 kfree(qinfo.udata);
4849 return rc;
4850}
4851
4852/**
4853 * SNMP command callback
4854 */
4855static int
4856qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4857 unsigned long sdata)
4858{
4859 struct qeth_ipa_cmd *cmd;
4860 struct qeth_arp_query_info *qinfo;
4861 struct qeth_snmp_cmd *snmp;
4862 unsigned char *data;
4863 __u16 data_len;
4864
4865 QETH_DBF_TEXT(trace,3,"snpcmdcb");
4866
4867 cmd = (struct qeth_ipa_cmd *) sdata;
4868 data = (unsigned char *)((char *)cmd - reply->offset);
4869 qinfo = (struct qeth_arp_query_info *) reply->param;
4870 snmp = &cmd->data.setadapterparms.data.snmp;
4871
4872 if (cmd->hdr.return_code) {
4873 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4874 return 0;
4875 }
4876 if (cmd->data.setadapterparms.hdr.return_code) {
4877 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4878 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4879 return 0;
4880 }
4881 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4882 if (cmd->data.setadapterparms.hdr.seq_no == 1)
4883 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4884 else
4885 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4886
4887 /* check if there is enough room in userspace */
4888 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4889 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4890 cmd->hdr.return_code = -ENOMEM;
4891 return 0;
4892 }
4893 QETH_DBF_TEXT_(trace, 4, "snore%i",
4894 cmd->data.setadapterparms.hdr.used_total);
4895 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4896 /*copy entries to user buffer*/
4897 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4898 memcpy(qinfo->udata + qinfo->udata_offset,
4899 (char *)snmp,
4900 data_len + offsetof(struct qeth_snmp_cmd,data));
4901 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4902 } else {
4903 memcpy(qinfo->udata + qinfo->udata_offset,
4904 (char *)&snmp->request, data_len);
4905 }
4906 qinfo->udata_offset += data_len;
4907 /* check if all replies received ... */
4908 QETH_DBF_TEXT_(trace, 4, "srtot%i",
4909 cmd->data.setadapterparms.hdr.used_total);
4910 QETH_DBF_TEXT_(trace, 4, "srseq%i",
4911 cmd->data.setadapterparms.hdr.seq_no);
4912 if (cmd->data.setadapterparms.hdr.seq_no <
4913 cmd->data.setadapterparms.hdr.used_total)
4914 return 1;
4915 return 0;
4916}
4917
4918static struct qeth_cmd_buffer *
4919qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4920 enum qeth_prot_versions );
4921
4922static struct qeth_cmd_buffer *
4923qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4924{
4925 struct qeth_cmd_buffer *iob;
4926 struct qeth_ipa_cmd *cmd;
4927
4928 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4929 QETH_PROT_IPV4);
4930 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4931 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4932 cmd->data.setadapterparms.hdr.command_code = command;
4933 cmd->data.setadapterparms.hdr.used_total = 1;
4934 cmd->data.setadapterparms.hdr.seq_no = 1;
4935
4936 return iob;
4937}
4938
4939/**
4940 * function to send SNMP commands to OSA-E card
4941 */
4942static int
4943qeth_snmp_command(struct qeth_card *card, char *udata)
4944{
4945 struct qeth_cmd_buffer *iob;
4946 struct qeth_ipa_cmd *cmd;
4947 struct qeth_snmp_ureq *ureq;
4948 int req_len;
4949 struct qeth_arp_query_info qinfo = {0, };
4950 int rc = 0;
4951
4952 QETH_DBF_TEXT(trace,3,"snmpcmd");
4953
4954 if (card->info.guestlan)
4955 return -EOPNOTSUPP;
4956
4957 if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
4958 (!card->options.layer2) ) {
4959 PRINT_WARN("SNMP Query MIBS not supported "
4960 "on %s!\n", QETH_CARD_IFNAME(card));
4961 return -EOPNOTSUPP;
4962 }
4963 /* skip 4 bytes (data_len struct member) to get req_len */
4964 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4965 return -EFAULT;
4966 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
4967 if (!ureq) {
4968 QETH_DBF_TEXT(trace, 2, "snmpnome");
4969 return -ENOMEM;
4970 }
4971 if (copy_from_user(ureq, udata,
4972 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
4973 kfree(ureq);
4974 return -EFAULT;
4975 }
4976 qinfo.udata_len = ureq->hdr.data_len;
4977 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
4978 kfree(ureq);
4979 return -ENOMEM;
4980 }
4981 memset(qinfo.udata, 0, qinfo.udata_len);
4982 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4983
4984 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4985 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4986 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4987 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4988 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4989 qeth_snmp_command_cb, (void *)&qinfo);
4990 if (rc)
4991 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4992 QETH_CARD_IFNAME(card), rc);
4993 else
4994 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4995
4996 kfree(ureq);
4997 kfree(qinfo.udata);
4998 return rc;
4999}
5000
5001static int
5002qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
5003 unsigned long);
5004
5005static int
5006qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
5007 __u16, long,
5008 int (*reply_cb)
5009 (struct qeth_card *, struct qeth_reply *, unsigned long),
5010 void *reply_param);
5011
5012static int
5013qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5014{
5015 struct qeth_cmd_buffer *iob;
5016 char buf[16];
5017 int tmp;
5018 int rc;
5019
5020 QETH_DBF_TEXT(trace,3,"arpadent");
5021
5022 /*
f3d242e8
FP
5023 * currently GuestLAN only supports the ARP assist function
5024 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
5025 * thus we say EOPNOTSUPP for this ARP function
1da177e4
LT
5026 */
5027 if (card->info.guestlan)
5028 return -EOPNOTSUPP;
5029 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5030 PRINT_WARN("ARP processing not supported "
5031 "on %s!\n", QETH_CARD_IFNAME(card));
5032 return -EOPNOTSUPP;
5033 }
5034
5035 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5036 IPA_CMD_ASS_ARP_ADD_ENTRY,
5037 sizeof(struct qeth_arp_cache_entry),
5038 QETH_PROT_IPV4);
5039 rc = qeth_send_setassparms(card, iob,
5040 sizeof(struct qeth_arp_cache_entry),
5041 (unsigned long) entry,
5042 qeth_default_setassparms_cb, NULL);
5043 if (rc) {
5044 tmp = rc;
5045 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5046 PRINT_WARN("Could not add ARP entry for address %s on %s: "
5047 "%s (0x%x/%d)\n",
5048 buf, QETH_CARD_IFNAME(card),
5049 qeth_arp_get_error_cause(&rc), tmp, tmp);
5050 }
5051 return rc;
5052}
5053
5054static int
5055qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5056{
5057 struct qeth_cmd_buffer *iob;
5058 char buf[16] = {0, };
5059 int tmp;
5060 int rc;
5061
5062 QETH_DBF_TEXT(trace,3,"arprment");
5063
5064 /*
f3d242e8
FP
5065 * currently GuestLAN only supports the ARP assist function
5066 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
5067 * thus we say EOPNOTSUPP for this ARP function
1da177e4
LT
5068 */
5069 if (card->info.guestlan)
5070 return -EOPNOTSUPP;
5071 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5072 PRINT_WARN("ARP processing not supported "
5073 "on %s!\n", QETH_CARD_IFNAME(card));
5074 return -EOPNOTSUPP;
5075 }
5076 memcpy(buf, entry, 12);
5077 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5078 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
5079 12,
5080 QETH_PROT_IPV4);
5081 rc = qeth_send_setassparms(card, iob,
5082 12, (unsigned long)buf,
5083 qeth_default_setassparms_cb, NULL);
5084 if (rc) {
5085 tmp = rc;
5086 memset(buf, 0, 16);
5087 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5088 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
5089 "%s (0x%x/%d)\n",
5090 buf, QETH_CARD_IFNAME(card),
5091 qeth_arp_get_error_cause(&rc), tmp, tmp);
5092 }
5093 return rc;
5094}
5095
5096static int
5097qeth_arp_flush_cache(struct qeth_card *card)
5098{
5099 int rc;
5100 int tmp;
5101
5102 QETH_DBF_TEXT(trace,3,"arpflush");
5103
5104 /*
f3d242e8
FP
5105 * currently GuestLAN only supports the ARP assist function
5106 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
5107 * thus we say EOPNOTSUPP for this ARP function
5108 */
1da177e4
LT
5109 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
5110 return -EOPNOTSUPP;
5111 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5112 PRINT_WARN("ARP processing not supported "
5113 "on %s!\n", QETH_CARD_IFNAME(card));
5114 return -EOPNOTSUPP;
5115 }
5116 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
5117 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
5118 if (rc){
5119 tmp = rc;
5120 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
5121 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
5122 tmp, tmp);
5123 }
5124 return rc;
5125}
5126
5127static int
5128qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5129{
5130 struct qeth_card *card = (struct qeth_card *)dev->priv;
5131 struct qeth_arp_cache_entry arp_entry;
5132 struct mii_ioctl_data *mii_data;
5133 int rc = 0;
5134
5135 if (!card)
5136 return -ENODEV;
5137
5138 if ((card->state != CARD_STATE_UP) &&
5139 (card->state != CARD_STATE_SOFTSETUP))
5140 return -ENODEV;
5141
500f83ab
UB
5142 if (card->info.type == QETH_CARD_TYPE_OSN)
5143 return -EPERM;
5144
1da177e4
LT
5145 switch (cmd){
5146 case SIOC_QETH_ARP_SET_NO_ENTRIES:
5147 if ( !capable(CAP_NET_ADMIN) ||
5148 (card->options.layer2) ) {
5149 rc = -EPERM;
5150 break;
5151 }
5152 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
5153 break;
5154 case SIOC_QETH_ARP_QUERY_INFO:
5155 if ( !capable(CAP_NET_ADMIN) ||
5156 (card->options.layer2) ) {
5157 rc = -EPERM;
5158 break;
5159 }
5160 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
5161 break;
5162 case SIOC_QETH_ARP_ADD_ENTRY:
5163 if ( !capable(CAP_NET_ADMIN) ||
5164 (card->options.layer2) ) {
5165 rc = -EPERM;
5166 break;
5167 }
5168 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5169 sizeof(struct qeth_arp_cache_entry)))
5170 rc = -EFAULT;
5171 else
5172 rc = qeth_arp_add_entry(card, &arp_entry);
5173 break;
5174 case SIOC_QETH_ARP_REMOVE_ENTRY:
5175 if ( !capable(CAP_NET_ADMIN) ||
5176 (card->options.layer2) ) {
5177 rc = -EPERM;
5178 break;
5179 }
5180 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5181 sizeof(struct qeth_arp_cache_entry)))
5182 rc = -EFAULT;
5183 else
5184 rc = qeth_arp_remove_entry(card, &arp_entry);
5185 break;
5186 case SIOC_QETH_ARP_FLUSH_CACHE:
5187 if ( !capable(CAP_NET_ADMIN) ||
5188 (card->options.layer2) ) {
5189 rc = -EPERM;
5190 break;
5191 }
5192 rc = qeth_arp_flush_cache(card);
5193 break;
5194 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5195 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5196 break;
5197 case SIOC_QETH_GET_CARD_TYPE:
5198 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
5199 !card->info.guestlan)
5200 return 1;
5201 return 0;
5202 break;
5203 case SIOCGMIIPHY:
5204 mii_data = if_mii(rq);
5205 mii_data->phy_id = 0;
5206 break;
5207 case SIOCGMIIREG:
5208 mii_data = if_mii(rq);
5209 if (mii_data->phy_id != 0)
5210 rc = -EINVAL;
5211 else
5212 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
5213 mii_data->reg_num);
5214 break;
5215 case SIOCSMIIREG:
5216 rc = -EOPNOTSUPP;
5217 break;
5218 /* TODO: remove return if qeth_mdio_write does something */
5219 if (!capable(CAP_NET_ADMIN)){
5220 rc = -EPERM;
5221 break;
5222 }
5223 mii_data = if_mii(rq);
5224 if (mii_data->phy_id != 0)
5225 rc = -EINVAL;
5226 else
5227 qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
5228 mii_data->val_in);
5229 break;
5230 default:
5231 rc = -EOPNOTSUPP;
5232 }
5233 if (rc)
5234 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
5235 return rc;
5236}
5237
5238static struct net_device_stats *
5239qeth_get_stats(struct net_device *dev)
5240{
5241 struct qeth_card *card;
5242
5243 card = (struct qeth_card *) (dev->priv);
5244
5245 QETH_DBF_TEXT(trace,5,"getstat");
5246
5247 return &card->stats;
5248}
5249
5250static int
5251qeth_change_mtu(struct net_device *dev, int new_mtu)
5252{
5253 struct qeth_card *card;
5254 char dbf_text[15];
5255
5256 card = (struct qeth_card *) (dev->priv);
5257
5258 QETH_DBF_TEXT(trace,4,"chgmtu");
5259 sprintf(dbf_text, "%8x", new_mtu);
5260 QETH_DBF_TEXT(trace,4,dbf_text);
5261
5262 if (new_mtu < 64)
5263 return -EINVAL;
5264 if (new_mtu > 65535)
5265 return -EINVAL;
5266 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
5267 (!qeth_mtu_is_valid(card, new_mtu)))
5268 return -EINVAL;
5269 dev->mtu = new_mtu;
5270 return 0;
5271}
5272
5273#ifdef CONFIG_QETH_VLAN
5274static void
5275qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5276{
5277 struct qeth_card *card;
5278 unsigned long flags;
5279
5280 QETH_DBF_TEXT(trace,4,"vlanreg");
5281
5282 card = (struct qeth_card *) dev->priv;
5283 spin_lock_irqsave(&card->vlanlock, flags);
5284 card->vlangrp = grp;
5285 spin_unlock_irqrestore(&card->vlanlock, flags);
5286}
5287
5288static inline void
5289qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5290 unsigned short vid)
5291{
5292 int i;
5293 struct sk_buff *skb;
5294 struct sk_buff_head tmp_list;
5295
5296 skb_queue_head_init(&tmp_list);
5297 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5298 while ((skb = skb_dequeue(&buf->skb_list))){
5299 if (vlan_tx_tag_present(skb) &&
5300 (vlan_tx_tag_get(skb) == vid)) {
5301 atomic_dec(&skb->users);
5302 dev_kfree_skb(skb);
5303 } else
5304 skb_queue_tail(&tmp_list, skb);
5305 }
5306 }
5307 while ((skb = skb_dequeue(&tmp_list)))
5308 skb_queue_tail(&buf->skb_list, skb);
5309}
5310
5311static void
5312qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
5313{
5314 int i, j;
5315
5316 QETH_DBF_TEXT(trace, 4, "frvlskbs");
5317 for (i = 0; i < card->qdio.no_out_queues; ++i){
5318 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
5319 qeth_free_vlan_buffer(card, &card->qdio.
5320 out_qs[i]->bufs[j], vid);
5321 }
5322}
5323
5324static void
5325qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5326{
5327 struct in_device *in_dev;
5328 struct in_ifaddr *ifa;
5329 struct qeth_ipaddr *addr;
5330
5331 QETH_DBF_TEXT(trace, 4, "frvaddr4");
6c88ad2d 5332
1da177e4 5333 rcu_read_lock();
e5ed6399 5334 in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
1da177e4
LT
5335 if (!in_dev)
5336 goto out;
5337 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
5338 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
5339 if (addr){
5340 addr->u.a4.addr = ifa->ifa_address;
5341 addr->u.a4.mask = ifa->ifa_mask;
5342 addr->type = QETH_IP_TYPE_NORMAL;
5343 if (!qeth_delete_ip(card, addr))
5344 kfree(addr);
5345 }
5346 }
5347out:
5348 rcu_read_unlock();
5349}
5350
5351static void
5352qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5353{
5354#ifdef CONFIG_QETH_IPV6
5355 struct inet6_dev *in6_dev;
5356 struct inet6_ifaddr *ifa;
5357 struct qeth_ipaddr *addr;
5358
5359 QETH_DBF_TEXT(trace, 4, "frvaddr6");
6c88ad2d 5360
1da177e4
LT
5361 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
5362 if (!in6_dev)
5363 return;
5364 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
5365 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
5366 if (addr){
5367 memcpy(&addr->u.a6.addr, &ifa->addr,
5368 sizeof(struct in6_addr));
5369 addr->u.a6.pfxlen = ifa->prefix_len;
5370 addr->type = QETH_IP_TYPE_NORMAL;
5371 if (!qeth_delete_ip(card, addr))
5372 kfree(addr);
5373 }
5374 }
5375 in6_dev_put(in6_dev);
5376#endif /* CONFIG_QETH_IPV6 */
5377}
5378
6c88ad2d
FP
5379static void
5380qeth_free_vlan_addresses(struct qeth_card *card, unsigned short vid)
5381{
5382 if (card->options.layer2 || !card->vlangrp)
5383 return;
5384 qeth_free_vlan_addresses4(card, vid);
5385 qeth_free_vlan_addresses6(card, vid);
5386}
5387
508cc2b0
FP
5388static int
5389qeth_layer2_send_setdelvlan_cb(struct qeth_card *card,
5390 struct qeth_reply *reply,
5391 unsigned long data)
5392{
5393 struct qeth_ipa_cmd *cmd;
5394
5395 QETH_DBF_TEXT(trace, 2, "L2sdvcb");
5396 cmd = (struct qeth_ipa_cmd *) data;
5397 if (cmd->hdr.return_code) {
5398 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5399 "Continuing\n",cmd->data.setdelvlan.vlan_id,
5400 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5401 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
5402 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
5403 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5404 }
5405 return 0;
5406}
5407
5408static int
1da177e4
LT
5409qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
5410 enum qeth_ipa_cmds ipacmd)
5411{
1da177e4
LT
5412 struct qeth_ipa_cmd *cmd;
5413 struct qeth_cmd_buffer *iob;
5414
5415 QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
5416 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5417 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5418 cmd->data.setdelvlan.vlan_id = i;
508cc2b0
FP
5419 return qeth_send_ipa_cmd(card, iob,
5420 qeth_layer2_send_setdelvlan_cb, NULL);
1da177e4
LT
5421}
5422
5423static void
5424qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5425{
5426 unsigned short i;
5427
5428 QETH_DBF_TEXT(trace, 3, "L2prcvln");
5429
5430 if (!card->vlangrp)
5431 return;
5432 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5433 if (card->vlangrp->vlan_devices[i] == NULL)
5434 continue;
5435 if (clear)
5436 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
5437 else
5438 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
5439 }
5440}
5441
5442/*add_vid is layer 2 used only ....*/
5443static void
5444qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
5445{
5446 struct qeth_card *card;
5447
5448 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
5449
5450 card = (struct qeth_card *) dev->priv;
5451 if (!card->options.layer2)
5452 return;
5453 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
5454}
5455
5456/*... kill_vid used for both modes*/
5457static void
5458qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5459{
5460 struct qeth_card *card;
5461 unsigned long flags;
5462
5463 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
5464
5465 card = (struct qeth_card *) dev->priv;
5466 /* free all skbs for the vlan device */
5467 qeth_free_vlan_skbs(card, vid);
5468 spin_lock_irqsave(&card->vlanlock, flags);
5469 /* unregister IP addresses of vlan device */
6c88ad2d 5470 qeth_free_vlan_addresses(card, vid);
1da177e4
LT
5471 if (card->vlangrp)
5472 card->vlangrp->vlan_devices[vid] = NULL;
5473 spin_unlock_irqrestore(&card->vlanlock, flags);
5474 if (card->options.layer2)
5475 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
5476 qeth_set_multicast_list(card->dev);
5477}
5478#endif
5479
5480/**
5481 * set multicast address on card
5482 */
5483static void
5484qeth_set_multicast_list(struct net_device *dev)
5485{
5486 struct qeth_card *card = (struct qeth_card *) dev->priv;
5487
500f83ab
UB
5488 if (card->info.type == QETH_CARD_TYPE_OSN)
5489 return ;
5490
1da177e4
LT
5491 QETH_DBF_TEXT(trace,3,"setmulti");
5492 qeth_delete_mc_addresses(card);
6c88ad2d
FP
5493 if (card->options.layer2) {
5494 qeth_layer2_add_multicast(card);
5495 goto out;
5496 }
1da177e4
LT
5497 qeth_add_multicast_ipv4(card);
5498#ifdef CONFIG_QETH_IPV6
5499 qeth_add_multicast_ipv6(card);
5500#endif
6c88ad2d 5501out:
1da177e4
LT
5502 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
5503 schedule_work(&card->kernel_thread_starter);
5504}
5505
5506static int
5507qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
5508{
5509 return 0;
5510}
5511
5512static void
5513qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
5514{
5515 if (dev->type == ARPHRD_IEEE802_TR)
5516 ip_tr_mc_map(ipm, mac);
5517 else
5518 ip_eth_mc_map(ipm, mac);
5519}
5520
5521static struct qeth_ipaddr *
5522qeth_get_addr_buffer(enum qeth_prot_versions prot)
5523{
5524 struct qeth_ipaddr *addr;
5525
5526 addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
5527 if (addr == NULL) {
5528 PRINT_WARN("Not enough memory to add address\n");
5529 return NULL;
5530 }
5531 memset(addr,0,sizeof(struct qeth_ipaddr));
5532 addr->type = QETH_IP_TYPE_NORMAL;
5533 addr->proto = prot;
5534 return addr;
5535}
5536
500f83ab
UB
5537int
5538qeth_osn_assist(struct net_device *dev,
5539 void *data,
5540 int data_len)
5541{
5542 struct qeth_cmd_buffer *iob;
5543 struct qeth_card *card;
5544 int rc;
5545
5546 QETH_DBF_TEXT(trace, 2, "osnsdmc");
5547 if (!dev)
5548 return -ENODEV;
5549 card = (struct qeth_card *)dev->priv;
5550 if (!card)
5551 return -ENODEV;
5552 if ((card->state != CARD_STATE_UP) &&
5553 (card->state != CARD_STATE_SOFTSETUP))
5554 return -ENODEV;
5555 iob = qeth_wait_for_buffer(&card->write);
5556 memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
5557 rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
5558 return rc;
5559}
5560
5561static struct net_device *
5562qeth_netdev_by_devno(unsigned char *read_dev_no)
5563{
5564 struct qeth_card *card;
5565 struct net_device *ndev;
5566 unsigned char *readno;
5567 __u16 temp_dev_no, card_dev_no;
5568 char *endp;
5569 unsigned long flags;
5570
5571 ndev = NULL;
5572 memcpy(&temp_dev_no, read_dev_no, 2);
5573 read_lock_irqsave(&qeth_card_list.rwlock, flags);
5574 list_for_each_entry(card, &qeth_card_list.list, list) {
5575 readno = CARD_RDEV_ID(card);
5576 readno += (strlen(readno) - 4);
5577 card_dev_no = simple_strtoul(readno, &endp, 16);
5578 if (card_dev_no == temp_dev_no) {
5579 ndev = card->dev;
5580 break;
5581 }
5582 }
5583 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
5584 return ndev;
5585}
5586
5587int
5588qeth_osn_register(unsigned char *read_dev_no,
5589 struct net_device **dev,
5590 int (*assist_cb)(struct net_device *, void *),
5591 int (*data_cb)(struct sk_buff *))
5592{
5593 struct qeth_card * card;
5594
5595 QETH_DBF_TEXT(trace, 2, "osnreg");
5596 *dev = qeth_netdev_by_devno(read_dev_no);
5597 if (*dev == NULL)
5598 return -ENODEV;
5599 card = (struct qeth_card *)(*dev)->priv;
5600 if (!card)
5601 return -ENODEV;
5602 if ((assist_cb == NULL) || (data_cb == NULL))
5603 return -EINVAL;
5604 card->osn_info.assist_cb = assist_cb;
5605 card->osn_info.data_cb = data_cb;
5606 return 0;
5607}
5608
5609void
5610qeth_osn_deregister(struct net_device * dev)
5611{
5612 struct qeth_card *card;
5613
5614 QETH_DBF_TEXT(trace, 2, "osndereg");
5615 if (!dev)
5616 return;
5617 card = (struct qeth_card *)dev->priv;
5618 if (!card)
5619 return;
5620 card->osn_info.assist_cb = NULL;
5621 card->osn_info.data_cb = NULL;
5622 return;
5623}
5624
1da177e4
LT
5625static void
5626qeth_delete_mc_addresses(struct qeth_card *card)
5627{
5628 struct qeth_ipaddr *iptodo;
5629 unsigned long flags;
5630
5631 QETH_DBF_TEXT(trace,4,"delmc");
5632 iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
5633 if (!iptodo) {
5634 QETH_DBF_TEXT(trace, 2, "dmcnomem");
5635 return;
5636 }
5637 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
5638 spin_lock_irqsave(&card->ip_lock, flags);
5639 if (!__qeth_insert_ip_todo(card, iptodo, 0))
5640 kfree(iptodo);
5641 spin_unlock_irqrestore(&card->ip_lock, flags);
5642}
5643
5644static inline void
5645qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5646{
5647 struct qeth_ipaddr *ipm;
5648 struct ip_mc_list *im4;
5649 char buf[MAX_ADDR_LEN];
5650
5651 QETH_DBF_TEXT(trace,4,"addmc");
5652 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
5653 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
5654 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5655 if (!ipm)
5656 continue;
5657 ipm->u.a4.addr = im4->multiaddr;
5658 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5659 ipm->is_multicast = 1;
5660 if (!qeth_add_ip(card,ipm))
5661 kfree(ipm);
5662 }
5663}
5664
5665static inline void
5666qeth_add_vlan_mc(struct qeth_card *card)
5667{
5668#ifdef CONFIG_QETH_VLAN
5669 struct in_device *in_dev;
5670 struct vlan_group *vg;
5671 int i;
5672
5673 QETH_DBF_TEXT(trace,4,"addmcvl");
5674 if ( ((card->options.layer2 == 0) &&
5675 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5676 (card->vlangrp == NULL) )
5677 return ;
5678
5679 vg = card->vlangrp;
5680 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5681 if (vg->vlan_devices[i] == NULL ||
5682 !(vg->vlan_devices[i]->flags & IFF_UP))
5683 continue;
5684 in_dev = in_dev_get(vg->vlan_devices[i]);
5685 if (!in_dev)
5686 continue;
5687 read_lock(&in_dev->mc_list_lock);
5688 qeth_add_mc(card,in_dev);
5689 read_unlock(&in_dev->mc_list_lock);
5690 in_dev_put(in_dev);
5691 }
5692#endif
5693}
5694
5695static void
5696qeth_add_multicast_ipv4(struct qeth_card *card)
5697{
5698 struct in_device *in4_dev;
5699
5700 QETH_DBF_TEXT(trace,4,"chkmcv4");
5701 in4_dev = in_dev_get(card->dev);
5702 if (in4_dev == NULL)
5703 return;
5704 read_lock(&in4_dev->mc_list_lock);
5705 qeth_add_mc(card, in4_dev);
5706 qeth_add_vlan_mc(card);
5707 read_unlock(&in4_dev->mc_list_lock);
5708 in_dev_put(in4_dev);
5709}
5710
6c88ad2d
FP
5711static void
5712qeth_layer2_add_multicast(struct qeth_card *card)
5713{
5714 struct qeth_ipaddr *ipm;
5715 struct dev_mc_list *dm;
5716
5717 QETH_DBF_TEXT(trace,4,"L2addmc");
5718 for (dm = card->dev->mc_list; dm; dm = dm->next) {
5719 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5720 if (!ipm)
5721 continue;
5722 memcpy(ipm->mac,dm->dmi_addr,MAX_ADDR_LEN);
5723 ipm->is_multicast = 1;
5724 if (!qeth_add_ip(card, ipm))
5725 kfree(ipm);
5726 }
5727}
5728
1da177e4
LT
5729#ifdef CONFIG_QETH_IPV6
5730static inline void
5731qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5732{
5733 struct qeth_ipaddr *ipm;
5734 struct ifmcaddr6 *im6;
5735 char buf[MAX_ADDR_LEN];
5736
5737 QETH_DBF_TEXT(trace,4,"addmc6");
5738 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
5739 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
5740 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
5741 if (!ipm)
5742 continue;
5743 ipm->is_multicast = 1;
5744 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5745 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
5746 sizeof(struct in6_addr));
5747 if (!qeth_add_ip(card,ipm))
5748 kfree(ipm);
5749 }
5750}
5751
5752static inline void
5753qeth_add_vlan_mc6(struct qeth_card *card)
5754{
5755#ifdef CONFIG_QETH_VLAN
5756 struct inet6_dev *in_dev;
5757 struct vlan_group *vg;
5758 int i;
5759
5760 QETH_DBF_TEXT(trace,4,"admc6vl");
5761 if ( ((card->options.layer2 == 0) &&
5762 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5763 (card->vlangrp == NULL))
5764 return ;
5765
5766 vg = card->vlangrp;
5767 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5768 if (vg->vlan_devices[i] == NULL ||
5769 !(vg->vlan_devices[i]->flags & IFF_UP))
5770 continue;
5771 in_dev = in6_dev_get(vg->vlan_devices[i]);
5772 if (!in_dev)
5773 continue;
5774 read_lock(&in_dev->lock);
5775 qeth_add_mc6(card,in_dev);
5776 read_unlock(&in_dev->lock);
5777 in6_dev_put(in_dev);
5778 }
5779#endif /* CONFIG_QETH_VLAN */
5780}
5781
5782static void
5783qeth_add_multicast_ipv6(struct qeth_card *card)
5784{
5785 struct inet6_dev *in6_dev;
5786
5787 QETH_DBF_TEXT(trace,4,"chkmcv6");
5788 if ((card->options.layer2 == 0) &&
5789 (!qeth_is_supported(card, IPA_IPV6)) )
5790 return ;
5791
5792 in6_dev = in6_dev_get(card->dev);
5793 if (in6_dev == NULL)
5794 return;
5795 read_lock(&in6_dev->lock);
5796 qeth_add_mc6(card, in6_dev);
5797 qeth_add_vlan_mc6(card);
5798 read_unlock(&in6_dev->lock);
5799 in6_dev_put(in6_dev);
5800}
5801#endif /* CONFIG_QETH_IPV6 */
5802
5803static int
5804qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
5805 enum qeth_ipa_cmds ipacmd,
5806 int (*reply_cb) (struct qeth_card *,
5807 struct qeth_reply*,
5808 unsigned long))
5809{
5810 struct qeth_ipa_cmd *cmd;
5811 struct qeth_cmd_buffer *iob;
5812
5813 QETH_DBF_TEXT(trace, 2, "L2sdmac");
5814 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5815 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5816 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
5817 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
5818 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
5819}
5820
5821static int
5822qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
5823 struct qeth_reply *reply,
5824 unsigned long data)
5825{
5826 struct qeth_ipa_cmd *cmd;
5827 __u8 *mac;
5828
5829 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
5830 cmd = (struct qeth_ipa_cmd *) data;
5831 mac = &cmd->data.setdelmac.mac[0];
5832 /* MAC already registered, needed in couple/uncouple case */
5833 if (cmd->hdr.return_code == 0x2005) {
5834 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
5835 "already existing on %s \n",
5836 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5837 QETH_CARD_IFNAME(card));
5838 cmd->hdr.return_code = 0;
5839 }
5840 if (cmd->hdr.return_code)
5841 PRINT_ERR("Could not set group MAC " \
5842 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5843 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5844 QETH_CARD_IFNAME(card),cmd->hdr.return_code);
5845 return 0;
5846}
5847
5848static int
5849qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
5850{
5851 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
5852 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
5853 qeth_layer2_send_setgroupmac_cb);
5854}
5855
5856static int
5857qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
5858 struct qeth_reply *reply,
5859 unsigned long data)
5860{
5861 struct qeth_ipa_cmd *cmd;
5862 __u8 *mac;
5863
5864 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
5865 cmd = (struct qeth_ipa_cmd *) data;
5866 mac = &cmd->data.setdelmac.mac[0];
5867 if (cmd->hdr.return_code)
5868 PRINT_ERR("Could not delete group MAC " \
5869 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5870 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5871 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5872 return 0;
5873}
5874
5875static int
5876qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
5877{
5878 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
5879 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
5880 qeth_layer2_send_delgroupmac_cb);
5881}
5882
5883static int
5884qeth_layer2_send_setmac_cb(struct qeth_card *card,
5885 struct qeth_reply *reply,
5886 unsigned long data)
5887{
5888 struct qeth_ipa_cmd *cmd;
5889
5890 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
5891 cmd = (struct qeth_ipa_cmd *) data;
5892 if (cmd->hdr.return_code) {
5893 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
5894 PRINT_WARN("Error in registering MAC address on " \
5895 "device %s: x%x\n", CARD_BUS_ID(card),
5896 cmd->hdr.return_code);
5897 card->info.layer2_mac_registered = 0;
5898 cmd->hdr.return_code = -EIO;
5899 } else {
5900 card->info.layer2_mac_registered = 1;
5901 memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
5902 OSA_ADDR_LEN);
5903 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
5904 "successfully registered on device %s\n",
5905 card->dev->dev_addr[0], card->dev->dev_addr[1],
5906 card->dev->dev_addr[2], card->dev->dev_addr[3],
5907 card->dev->dev_addr[4], card->dev->dev_addr[5],
5908 card->dev->name);
5909 }
5910 return 0;
5911}
5912
5913static int
5914qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
5915{
5916 QETH_DBF_TEXT(trace, 2, "L2Setmac");
5917 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
5918 qeth_layer2_send_setmac_cb);
5919}
5920
5921static int
5922qeth_layer2_send_delmac_cb(struct qeth_card *card,
5923 struct qeth_reply *reply,
5924 unsigned long data)
5925{
5926 struct qeth_ipa_cmd *cmd;
5927
5928 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
5929 cmd = (struct qeth_ipa_cmd *) data;
5930 if (cmd->hdr.return_code) {
5931 PRINT_WARN("Error in deregistering MAC address on " \
5932 "device %s: x%x\n", CARD_BUS_ID(card),
5933 cmd->hdr.return_code);
5934 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5935 cmd->hdr.return_code = -EIO;
5936 return 0;
5937 }
5938 card->info.layer2_mac_registered = 0;
5939
5940 return 0;
5941}
5942static int
5943qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
5944{
5945 QETH_DBF_TEXT(trace, 2, "L2Delmac");
5946 if (!card->info.layer2_mac_registered)
5947 return 0;
5948 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
5949 qeth_layer2_send_delmac_cb);
5950}
5951
5952static int
5953qeth_layer2_set_mac_address(struct net_device *dev, void *p)
5954{
5955 struct sockaddr *addr = p;
5956 struct qeth_card *card;
5957 int rc = 0;
5958
5959 QETH_DBF_TEXT(trace, 3, "setmac");
5960
5961 if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
5962 QETH_DBF_TEXT(trace, 3, "setmcINV");
5963 return -EOPNOTSUPP;
5964 }
5965 card = (struct qeth_card *) dev->priv;
5966
5967 if (!card->options.layer2) {
5968 PRINT_WARN("Setting MAC address on %s is not supported"
5969 "in Layer 3 mode.\n", dev->name);
5970 QETH_DBF_TEXT(trace, 3, "setmcLY3");
5971 return -EOPNOTSUPP;
5972 }
500f83ab
UB
5973 if (card->info.type == QETH_CARD_TYPE_OSN) {
5974 PRINT_WARN("Setting MAC address on %s is not supported.\n",
5975 dev->name);
5976 QETH_DBF_TEXT(trace, 3, "setmcOSN");
5977 return -EOPNOTSUPP;
5978 }
1da177e4
LT
5979 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
5980 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
5981 rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
5982 if (!rc)
5983 rc = qeth_layer2_send_setmac(card, addr->sa_data);
5984 return rc;
5985}
5986
5987static void
5988qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
5989 __u8 command, enum qeth_prot_versions prot)
5990{
5991 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
5992 cmd->hdr.command = command;
5993 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
5994 cmd->hdr.seqno = card->seqno.ipa;
5995 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
5996 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
5997 if (card->options.layer2)
5998 cmd->hdr.prim_version_no = 2;
5999 else
6000 cmd->hdr.prim_version_no = 1;
6001 cmd->hdr.param_count = 1;
6002 cmd->hdr.prot_version = prot;
6003 cmd->hdr.ipa_supported = 0;
6004 cmd->hdr.ipa_enabled = 0;
6005}
6006
6007static struct qeth_cmd_buffer *
6008qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6009 enum qeth_prot_versions prot)
6010{
6011 struct qeth_cmd_buffer *iob;
6012 struct qeth_ipa_cmd *cmd;
6013
6014 iob = qeth_wait_for_buffer(&card->write);
6015 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6016 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
6017
6018 return iob;
6019}
6020
6021static int
6022qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6023{
6024 int rc;
6025 struct qeth_cmd_buffer *iob;
6026 struct qeth_ipa_cmd *cmd;
6027
6028 QETH_DBF_TEXT(trace,4,"setdelmc");
6029
6030 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6031 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6032 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
6033 if (addr->proto == QETH_PROT_IPV6)
6034 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
6035 sizeof(struct in6_addr));
6036 else
6037 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
6038
6039 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6040
6041 return rc;
6042}
6043static inline void
6044qeth_fill_netmask(u8 *netmask, unsigned int len)
6045{
6046 int i,j;
6047 for (i=0;i<16;i++) {
6048 j=(len)-(i*8);
6049 if (j >= 8)
6050 netmask[i] = 0xff;
6051 else if (j > 0)
6052 netmask[i] = (u8)(0xFF00>>j);
6053 else
6054 netmask[i] = 0;
6055 }
6056}
6057
6058static int
6059qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
6060 int ipacmd, unsigned int flags)
6061{
6062 int rc;
6063 struct qeth_cmd_buffer *iob;
6064 struct qeth_ipa_cmd *cmd;
6065 __u8 netmask[16];
6066
6067 QETH_DBF_TEXT(trace,4,"setdelip");
6068 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
6069
6070 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6071 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6072 if (addr->proto == QETH_PROT_IPV6) {
6073 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
6074 sizeof(struct in6_addr));
6075 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
6076 memcpy(cmd->data.setdelip6.mask, netmask,
6077 sizeof(struct in6_addr));
6078 cmd->data.setdelip6.flags = flags;
6079 } else {
6080 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
6081 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
6082 cmd->data.setdelip4.flags = flags;
6083 }
6084
6085 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6086
6087 return rc;
6088}
6089
6090static int
6091qeth_layer2_register_addr_entry(struct qeth_card *card,
6092 struct qeth_ipaddr *addr)
6093{
6094 if (!addr->is_multicast)
6095 return 0;
6096 QETH_DBF_TEXT(trace, 2, "setgmac");
6097 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6098 return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
6099}
6100
6101static int
6102qeth_layer2_deregister_addr_entry(struct qeth_card *card,
6103 struct qeth_ipaddr *addr)
6104{
6105 if (!addr->is_multicast)
6106 return 0;
6107 QETH_DBF_TEXT(trace, 2, "delgmac");
6108 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6109 return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
6110}
6111
6112static int
6113qeth_layer3_register_addr_entry(struct qeth_card *card,
6114 struct qeth_ipaddr *addr)
6115{
6116 char buf[50];
6117 int rc;
6118 int cnt = 3;
6119
6120 if (addr->proto == QETH_PROT_IPV4) {
6121 QETH_DBF_TEXT(trace, 2,"setaddr4");
6122 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6123 } else if (addr->proto == QETH_PROT_IPV6) {
6124 QETH_DBF_TEXT(trace, 2, "setaddr6");
6125 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6126 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6127 } else {
6128 QETH_DBF_TEXT(trace, 2, "setaddr?");
6129 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6130 }
6131 do {
6132 if (addr->is_multicast)
6133 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
6134 else
6135 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
6136 addr->set_flags);
6137 if (rc)
6138 QETH_DBF_TEXT(trace, 2, "failed");
6139 } while ((--cnt > 0) && rc);
6140 if (rc){
6141 QETH_DBF_TEXT(trace, 2, "FAILED");
6142 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6143 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
6144 buf, rc, rc);
6145 }
6146 return rc;
6147}
6148
6149static int
6150qeth_layer3_deregister_addr_entry(struct qeth_card *card,
6151 struct qeth_ipaddr *addr)
6152{
6153 //char buf[50];
6154 int rc;
6155
6156 if (addr->proto == QETH_PROT_IPV4) {
6157 QETH_DBF_TEXT(trace, 2,"deladdr4");
6158 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6159 } else if (addr->proto == QETH_PROT_IPV6) {
6160 QETH_DBF_TEXT(trace, 2, "deladdr6");
6161 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6162 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6163 } else {
6164 QETH_DBF_TEXT(trace, 2, "deladdr?");
6165 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6166 }
6167 if (addr->is_multicast)
6168 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
6169 else
6170 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
6171 addr->del_flags);
6172 if (rc) {
6173 QETH_DBF_TEXT(trace, 2, "failed");
6174 /* TODO: re-activate this warning as soon as we have a
6175 * clean mirco code
6176 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6177 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
6178 buf, rc);
6179 */
6180 }
6181 return rc;
6182}
6183
6184static int
6185qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6186{
6187 if (card->options.layer2)
6188 return qeth_layer2_register_addr_entry(card, addr);
6189
6190 return qeth_layer3_register_addr_entry(card, addr);
6191}
6192
6193static int
6194qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6195{
6196 if (card->options.layer2)
6197 return qeth_layer2_deregister_addr_entry(card, addr);
6198
6199 return qeth_layer3_deregister_addr_entry(card, addr);
6200}
6201
6202static u32
6203qeth_ethtool_get_tx_csum(struct net_device *dev)
6204{
6205 /* We may need to say that we support tx csum offload if
6206 * we do EDDP or TSO. There are discussions going on to
6207 * enforce rules in the stack and in ethtool that make
6208 * SG and TSO depend on HW_CSUM. At the moment there are
6209 * no such rules....
6210 * If we say yes here, we have to checksum outbound packets
6211 * any time. */
6212 return 0;
6213}
6214
6215static int
6216qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
6217{
6218 return -EINVAL;
6219}
6220
6221static u32
6222qeth_ethtool_get_rx_csum(struct net_device *dev)
6223{
6224 struct qeth_card *card = (struct qeth_card *)dev->priv;
6225
6226 return (card->options.checksum_type == HW_CHECKSUMMING);
6227}
6228
6229static int
6230qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6231{
6232 struct qeth_card *card = (struct qeth_card *)dev->priv;
6233
6234 if ((card->state != CARD_STATE_DOWN) &&
6235 (card->state != CARD_STATE_RECOVER))
6236 return -EPERM;
6237 if (data)
6238 card->options.checksum_type = HW_CHECKSUMMING;
6239 else
6240 card->options.checksum_type = SW_CHECKSUMMING;
6241 return 0;
6242}
6243
6244static u32
6245qeth_ethtool_get_sg(struct net_device *dev)
6246{
6247 struct qeth_card *card = (struct qeth_card *)dev->priv;
6248
6249 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6250 (dev->features & NETIF_F_SG));
6251}
6252
6253static int
6254qeth_ethtool_set_sg(struct net_device *dev, u32 data)
6255{
6256 struct qeth_card *card = (struct qeth_card *)dev->priv;
6257
6258 if (data) {
6259 if (card->options.large_send != QETH_LARGE_SEND_NO)
6260 dev->features |= NETIF_F_SG;
6261 else {
6262 dev->features &= ~NETIF_F_SG;
6263 return -EINVAL;
6264 }
6265 } else
6266 dev->features &= ~NETIF_F_SG;
6267 return 0;
6268}
6269
6270static u32
6271qeth_ethtool_get_tso(struct net_device *dev)
6272{
6273 struct qeth_card *card = (struct qeth_card *)dev->priv;
6274
6275 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6276 (dev->features & NETIF_F_TSO));
6277}
6278
6279static int
6280qeth_ethtool_set_tso(struct net_device *dev, u32 data)
6281{
6282 struct qeth_card *card = (struct qeth_card *)dev->priv;
6283
6284 if (data) {
6285 if (card->options.large_send != QETH_LARGE_SEND_NO)
6286 dev->features |= NETIF_F_TSO;
6287 else {
6288 dev->features &= ~NETIF_F_TSO;
6289 return -EINVAL;
6290 }
6291 } else
6292 dev->features &= ~NETIF_F_TSO;
6293 return 0;
6294}
6295
6296static struct ethtool_ops qeth_ethtool_ops = {
6297 .get_tx_csum = qeth_ethtool_get_tx_csum,
6298 .set_tx_csum = qeth_ethtool_set_tx_csum,
6299 .get_rx_csum = qeth_ethtool_get_rx_csum,
6300 .set_rx_csum = qeth_ethtool_set_rx_csum,
6301 .get_sg = qeth_ethtool_get_sg,
6302 .set_sg = qeth_ethtool_set_sg,
6303 .get_tso = qeth_ethtool_get_tso,
6304 .set_tso = qeth_ethtool_set_tso,
6305};
6306
6307static int
6308qeth_netdev_init(struct net_device *dev)
6309{
6310 struct qeth_card *card;
6311
6312 card = (struct qeth_card *) dev->priv;
6313
6314 QETH_DBF_TEXT(trace,3,"initdev");
6315
6316 dev->tx_timeout = &qeth_tx_timeout;
6317 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6318 dev->open = qeth_open;
6319 dev->stop = qeth_stop;
6320 dev->hard_start_xmit = qeth_hard_start_xmit;
6321 dev->do_ioctl = qeth_do_ioctl;
6322 dev->get_stats = qeth_get_stats;
6323 dev->change_mtu = qeth_change_mtu;
6324 dev->neigh_setup = qeth_neigh_setup;
6325 dev->set_multicast_list = qeth_set_multicast_list;
6326#ifdef CONFIG_QETH_VLAN
6327 dev->vlan_rx_register = qeth_vlan_rx_register;
6328 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
6329 dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
6330#endif
6331 dev->hard_header = card->orig_hard_header;
6332 if (qeth_get_netdev_flags(card) & IFF_NOARP) {
6333 dev->rebuild_header = NULL;
6334 dev->hard_header = NULL;
6335 if (card->options.fake_ll)
6336 dev->hard_header = qeth_fake_header;
6337 dev->header_cache_update = NULL;
6338 dev->hard_header_cache = NULL;
6339 }
6340#ifdef CONFIG_QETH_IPV6
6341 /*IPv6 address autoconfiguration stuff*/
6342 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
6343 card->dev->dev_id = card->info.unique_id & 0xffff;
6344#endif
6345 dev->hard_header_parse = NULL;
6346 dev->set_mac_address = qeth_layer2_set_mac_address;
6347 dev->flags |= qeth_get_netdev_flags(card);
6348 if ((card->options.fake_broadcast) ||
6349 (card->info.broadcast_capable))
6350 dev->flags |= IFF_BROADCAST;
6351 dev->hard_header_len =
6352 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
6353 dev->addr_len = OSA_ADDR_LEN;
6354 dev->mtu = card->info.initial_mtu;
500f83ab
UB
6355 if (card->info.type != QETH_CARD_TYPE_OSN)
6356 SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
1da177e4
LT
6357 SET_MODULE_OWNER(dev);
6358 return 0;
6359}
6360
6361static void
6362qeth_init_func_level(struct qeth_card *card)
6363{
6364 if (card->ipato.enabled) {
6365 if (card->info.type == QETH_CARD_TYPE_IQD)
6366 card->info.func_level =
6367 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
6368 else
6369 card->info.func_level =
6370 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
6371 } else {
6372 if (card->info.type == QETH_CARD_TYPE_IQD)
500f83ab 6373 /*FIXME:why do we have same values for dis and ena for osae??? */
1da177e4
LT
6374 card->info.func_level =
6375 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
6376 else
6377 card->info.func_level =
6378 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
6379 }
6380}
6381
6382/**
6383 * hardsetup card, initialize MPC and QDIO stuff
6384 */
6385static int
6386qeth_hardsetup_card(struct qeth_card *card)
6387{
6388 int retries = 3;
6389 int rc;
6390
6391 QETH_DBF_TEXT(setup, 2, "hrdsetup");
6392
6393retry:
6394 if (retries < 3){
6395 PRINT_WARN("Retrying to do IDX activates.\n");
6396 ccw_device_set_offline(CARD_DDEV(card));
6397 ccw_device_set_offline(CARD_WDEV(card));
6398 ccw_device_set_offline(CARD_RDEV(card));
6399 ccw_device_set_online(CARD_RDEV(card));
6400 ccw_device_set_online(CARD_WDEV(card));
6401 ccw_device_set_online(CARD_DDEV(card));
6402 }
500f83ab 6403 rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD);
1da177e4
LT
6404 if (rc == -ERESTARTSYS) {
6405 QETH_DBF_TEXT(setup, 2, "break1");
6406 return rc;
6407 } else if (rc) {
6408 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6409 if (--retries < 0)
6410 goto out;
6411 else
6412 goto retry;
6413 }
6414 if ((rc = qeth_get_unitaddr(card))){
6415 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6416 return rc;
6417 }
6418 qeth_init_tokens(card);
6419 qeth_init_func_level(card);
6420 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
6421 if (rc == -ERESTARTSYS) {
6422 QETH_DBF_TEXT(setup, 2, "break2");
6423 return rc;
6424 } else if (rc) {
6425 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6426 if (--retries < 0)
6427 goto out;
6428 else
6429 goto retry;
6430 }
6431 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
6432 if (rc == -ERESTARTSYS) {
6433 QETH_DBF_TEXT(setup, 2, "break3");
6434 return rc;
6435 } else if (rc) {
6436 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6437 if (--retries < 0)
6438 goto out;
6439 else
6440 goto retry;
6441 }
6442 if ((rc = qeth_mpc_initialize(card))){
6443 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6444 goto out;
6445 }
6446 /*network device will be recovered*/
6447 if (card->dev) {
6448 card->dev->hard_header = card->orig_hard_header;
6449 return 0;
6450 }
6451 /* at first set_online allocate netdev */
6452 card->dev = qeth_get_netdevice(card->info.type,
6453 card->info.link_type);
6454 if (!card->dev){
500f83ab
UB
6455 qeth_qdio_clear_card(card, card->info.type !=
6456 QETH_CARD_TYPE_IQD);
1da177e4
LT
6457 rc = -ENODEV;
6458 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6459 goto out;
6460 }
6461 card->dev->priv = card;
6462 card->orig_hard_header = card->dev->hard_header;
6463 card->dev->type = qeth_get_arphdr_type(card->info.type,
6464 card->info.link_type);
6465 card->dev->init = qeth_netdev_init;
6466 return 0;
6467out:
6468 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
6469 return rc;
6470}
6471
6472static int
6473qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6474 unsigned long data)
6475{
6476 struct qeth_ipa_cmd *cmd;
6477
6478 QETH_DBF_TEXT(trace,4,"defadpcb");
6479
6480 cmd = (struct qeth_ipa_cmd *) data;
6481 if (cmd->hdr.return_code == 0){
6482 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6483 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6484 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6485#ifdef CONFIG_QETH_IPV6
6486 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6487 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6488#endif
6489 }
6490 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
6491 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6492 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
6493 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
6494 }
6495 return 0;
6496}
6497
6498static int
6499qeth_default_setadapterparms_cb(struct qeth_card *card,
6500 struct qeth_reply *reply,
6501 unsigned long data)
6502{
6503 struct qeth_ipa_cmd *cmd;
6504
6505 QETH_DBF_TEXT(trace,4,"defadpcb");
6506
6507 cmd = (struct qeth_ipa_cmd *) data;
6508 if (cmd->hdr.return_code == 0)
6509 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
6510 return 0;
6511}
6512
6513static int
6514qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6515 unsigned long data)
6516{
6517 struct qeth_ipa_cmd *cmd;
6518
6519 QETH_DBF_TEXT(trace,3,"quyadpcb");
6520
6521 cmd = (struct qeth_ipa_cmd *) data;
6522 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
6523 card->info.link_type =
6524 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
6525 card->options.adp.supported_funcs =
6526 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
6527 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
6528}
6529
6530static int
6531qeth_query_setadapterparms(struct qeth_card *card)
6532{
6533 int rc;
6534 struct qeth_cmd_buffer *iob;
6535
6536 QETH_DBF_TEXT(trace,3,"queryadp");
6537 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
6538 sizeof(struct qeth_ipacmd_setadpparms));
6539 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
6540 return rc;
6541}
6542
6543static int
6544qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
6545 struct qeth_reply *reply,
6546 unsigned long data)
6547{
6548 struct qeth_ipa_cmd *cmd;
6549
6550 QETH_DBF_TEXT(trace,4,"chgmaccb");
6551
6552 cmd = (struct qeth_ipa_cmd *) data;
6553 memcpy(card->dev->dev_addr,
6554 &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN);
6555 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
6556 return 0;
6557}
6558
6559static int
6560qeth_setadpparms_change_macaddr(struct qeth_card *card)
6561{
6562 int rc;
6563 struct qeth_cmd_buffer *iob;
6564 struct qeth_ipa_cmd *cmd;
6565
6566 QETH_DBF_TEXT(trace,4,"chgmac");
6567
6568 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
6569 sizeof(struct qeth_ipacmd_setadpparms));
6570 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6571 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
6572 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
6573 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
6574 card->dev->dev_addr, OSA_ADDR_LEN);
6575 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
6576 NULL);
6577 return rc;
6578}
6579
6580static int
6581qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6582{
6583 int rc;
6584 struct qeth_cmd_buffer *iob;
6585 struct qeth_ipa_cmd *cmd;
6586
6587 QETH_DBF_TEXT(trace,4,"adpmode");
6588
6589 iob = qeth_get_adapter_cmd(card, command,
6590 sizeof(struct qeth_ipacmd_setadpparms));
6591 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6592 cmd->data.setadapterparms.data.mode = mode;
6593 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
6594 NULL);
6595 return rc;
6596}
6597
6598static inline int
6599qeth_setadapter_hstr(struct qeth_card *card)
6600{
6601 int rc;
6602
6603 QETH_DBF_TEXT(trace,4,"adphstr");
6604
6605 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
6606 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
6607 card->options.broadcast_mode);
6608 if (rc)
6609 PRINT_WARN("couldn't set broadcast mode on "
6610 "device %s: x%x\n",
6611 CARD_BUS_ID(card), rc);
6612 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
6613 card->options.macaddr_mode);
6614 if (rc)
6615 PRINT_WARN("couldn't set macaddr mode on "
6616 "device %s: x%x\n", CARD_BUS_ID(card), rc);
6617 return rc;
6618 }
6619 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
6620 PRINT_WARN("set adapter parameters not available "
6621 "to set broadcast mode, using ALLRINGS "
6622 "on device %s:\n", CARD_BUS_ID(card));
6623 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
6624 PRINT_WARN("set adapter parameters not available "
6625 "to set macaddr mode, using NONCANONICAL "
6626 "on device %s:\n", CARD_BUS_ID(card));
6627 return 0;
6628}
6629
6630static int
6631qeth_setadapter_parms(struct qeth_card *card)
6632{
6633 int rc;
6634
6635 QETH_DBF_TEXT(setup, 2, "setadprm");
6636
6637 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
6638 PRINT_WARN("set adapter parameters not supported "
6639 "on device %s.\n",
6640 CARD_BUS_ID(card));
6641 QETH_DBF_TEXT(setup, 2, " notsupp");
6642 return 0;
6643 }
6644 rc = qeth_query_setadapterparms(card);
6645 if (rc) {
6646 PRINT_WARN("couldn't set adapter parameters on device %s: "
6647 "x%x\n", CARD_BUS_ID(card), rc);
6648 return rc;
6649 }
6650 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
6651 rc = qeth_setadpparms_change_macaddr(card);
6652 if (rc)
6653 PRINT_WARN("couldn't get MAC address on "
6654 "device %s: x%x\n",
6655 CARD_BUS_ID(card), rc);
6656 }
6657
6658 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
6659 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
6660 rc = qeth_setadapter_hstr(card);
6661
6662 return rc;
6663}
6664
6665static int
6666qeth_layer2_initialize(struct qeth_card *card)
6667{
6668 int rc = 0;
6669
6670
6671 QETH_DBF_TEXT(setup, 2, "doL2init");
6672 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
6673
6674 rc = qeth_setadpparms_change_macaddr(card);
6675 if (rc) {
6676 PRINT_WARN("couldn't get MAC address on "
6677 "device %s: x%x\n",
6678 CARD_BUS_ID(card), rc);
6679 QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
6680 return rc;
6681 }
6682 QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
6683
6684 rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
6685 if (rc)
6686 QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
6687 return 0;
6688}
6689
6690
6691static int
6692qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6693 enum qeth_prot_versions prot)
6694{
6695 int rc;
6696 struct qeth_cmd_buffer *iob;
6697
6698 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
6699 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6700
6701 return rc;
6702}
6703
6704static int
6705qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
6706{
6707 int rc;
6708
6709 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
6710
6711 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
6712 return rc;
6713}
6714
6715static int
6716qeth_send_stoplan(struct qeth_card *card)
6717{
6718 int rc = 0;
6719
6720 /*
6721 * TODO: according to the IPA format document page 14,
6722 * TCP/IP (we!) never issue a STOPLAN
6723 * is this right ?!?
6724 */
6725 QETH_DBF_TEXT(trace, 2, "stoplan");
6726
6727 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
6728 return rc;
6729}
6730
6731static int
6732qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6733 unsigned long data)
6734{
6735 struct qeth_ipa_cmd *cmd;
6736
6737 QETH_DBF_TEXT(setup, 2, "qipasscb");
6738
6739 cmd = (struct qeth_ipa_cmd *) data;
6740 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6741 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6742 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
9123e0d7
UB
6743 /* Disable IPV6 support hard coded for Hipersockets */
6744 if(card->info.type == QETH_CARD_TYPE_IQD)
6745 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
1da177e4
LT
6746 } else {
6747#ifdef CONFIG_QETH_IPV6
6748 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
6749 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6750#endif
6751 }
6752 QETH_DBF_TEXT(setup, 2, "suppenbl");
6753 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
6754 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
6755 return 0;
6756}
6757
6758static int
6759qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
6760{
6761 int rc;
6762 struct qeth_cmd_buffer *iob;
6763
6764 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
6765 if (card->options.layer2) {
6766 QETH_DBF_TEXT(setup, 2, "noprmly2");
6767 return -EPERM;
6768 }
6769
6770 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
6771 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
6772 return rc;
6773}
6774
6775static struct qeth_cmd_buffer *
6776qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
6777 __u16 cmd_code, __u16 len,
6778 enum qeth_prot_versions prot)
6779{
6780 struct qeth_cmd_buffer *iob;
6781 struct qeth_ipa_cmd *cmd;
6782
6783 QETH_DBF_TEXT(trace,4,"getasscm");
6784 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
6785
6786 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6787 cmd->data.setassparms.hdr.assist_no = ipa_func;
6788 cmd->data.setassparms.hdr.length = 8 + len;
6789 cmd->data.setassparms.hdr.command_code = cmd_code;
6790 cmd->data.setassparms.hdr.return_code = 0;
6791 cmd->data.setassparms.hdr.seq_no = 0;
6792
6793 return iob;
6794}
6795
6796static int
6797qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
6798 __u16 len, long data,
6799 int (*reply_cb)
6800 (struct qeth_card *,struct qeth_reply *,unsigned long),
6801 void *reply_param)
6802{
6803 int rc;
6804 struct qeth_ipa_cmd *cmd;
6805
6806 QETH_DBF_TEXT(trace,4,"sendassp");
6807
6808 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6809 if (len <= sizeof(__u32))
6810 cmd->data.setassparms.data.flags_32bit = (__u32) data;
6811 else if (len > sizeof(__u32))
6812 memcpy(&cmd->data.setassparms.data, (void *) data, len);
6813
6814 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
6815 return rc;
6816}
6817
6818#ifdef CONFIG_QETH_IPV6
6819static int
6820qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
6821 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
6822
6823{
6824 int rc;
6825 struct qeth_cmd_buffer *iob;
6826
6827 QETH_DBF_TEXT(trace,4,"simassp6");
6828 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6829 0, QETH_PROT_IPV6);
6830 rc = qeth_send_setassparms(card, iob, 0, 0,
6831 qeth_default_setassparms_cb, NULL);
6832 return rc;
6833}
6834#endif
6835
6836static int
6837qeth_send_simple_setassparms(struct qeth_card *card,
6838 enum qeth_ipa_funcs ipa_func,
6839 __u16 cmd_code, long data)
6840{
6841 int rc;
6842 int length = 0;
6843 struct qeth_cmd_buffer *iob;
6844
6845 QETH_DBF_TEXT(trace,4,"simassp4");
6846 if (data)
6847 length = sizeof(__u32);
6848 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6849 length, QETH_PROT_IPV4);
6850 rc = qeth_send_setassparms(card, iob, length, data,
6851 qeth_default_setassparms_cb, NULL);
6852 return rc;
6853}
6854
6855static inline int
6856qeth_start_ipa_arp_processing(struct qeth_card *card)
6857{
6858 int rc;
6859
6860 QETH_DBF_TEXT(trace,3,"ipaarp");
6861
6862 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
6863 PRINT_WARN("ARP processing not supported "
6864 "on %s!\n", QETH_CARD_IFNAME(card));
6865 return 0;
6866 }
6867 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
6868 IPA_CMD_ASS_START, 0);
6869 if (rc) {
6870 PRINT_WARN("Could not start ARP processing "
6871 "assist on %s: 0x%x\n",
6872 QETH_CARD_IFNAME(card), rc);
6873 }
6874 return rc;
6875}
6876
6877static int
6878qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
6879{
6880 int rc;
6881
6882 QETH_DBF_TEXT(trace,3,"ipaipfrg");
6883
6884 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
6885 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
6886 QETH_CARD_IFNAME(card));
6887 return -EOPNOTSUPP;
6888 }
6889
6890 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
6891 IPA_CMD_ASS_START, 0);
6892 if (rc) {
6893 PRINT_WARN("Could not start Hardware IP fragmentation "
6894 "assist on %s: 0x%x\n",
6895 QETH_CARD_IFNAME(card), rc);
6896 } else
6897 PRINT_INFO("Hardware IP fragmentation enabled \n");
6898 return rc;
6899}
6900
6901static int
6902qeth_start_ipa_source_mac(struct qeth_card *card)
6903{
6904 int rc;
6905
6906 QETH_DBF_TEXT(trace,3,"stsrcmac");
6907
6908 if (!card->options.fake_ll)
6909 return -EOPNOTSUPP;
6910
6911 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
6912 PRINT_INFO("Inbound source address not "
6913 "supported on %s\n", QETH_CARD_IFNAME(card));
6914 return -EOPNOTSUPP;
6915 }
6916
6917 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
6918 IPA_CMD_ASS_START, 0);
6919 if (rc)
6920 PRINT_WARN("Could not start inbound source "
6921 "assist on %s: 0x%x\n",
6922 QETH_CARD_IFNAME(card), rc);
6923 return rc;
6924}
6925
6926static int
6927qeth_start_ipa_vlan(struct qeth_card *card)
6928{
6929 int rc = 0;
6930
6931 QETH_DBF_TEXT(trace,3,"strtvlan");
6932
6933#ifdef CONFIG_QETH_VLAN
6934 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
6935 PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
6936 return -EOPNOTSUPP;
6937 }
6938
6939 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
6940 IPA_CMD_ASS_START,0);
6941 if (rc) {
6942 PRINT_WARN("Could not start vlan "
6943 "assist on %s: 0x%x\n",
6944 QETH_CARD_IFNAME(card), rc);
6945 } else {
6946 PRINT_INFO("VLAN enabled \n");
6947 card->dev->features |=
6948 NETIF_F_HW_VLAN_FILTER |
6949 NETIF_F_HW_VLAN_TX |
6950 NETIF_F_HW_VLAN_RX;
6951 }
6952#endif /* QETH_VLAN */
6953 return rc;
6954}
6955
6956static int
6957qeth_start_ipa_multicast(struct qeth_card *card)
6958{
6959 int rc;
6960
6961 QETH_DBF_TEXT(trace,3,"stmcast");
6962
6963 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
6964 PRINT_WARN("Multicast not supported on %s\n",
6965 QETH_CARD_IFNAME(card));
6966 return -EOPNOTSUPP;
6967 }
6968
6969 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
6970 IPA_CMD_ASS_START,0);
6971 if (rc) {
6972 PRINT_WARN("Could not start multicast "
6973 "assist on %s: rc=%i\n",
6974 QETH_CARD_IFNAME(card), rc);
6975 } else {
6976 PRINT_INFO("Multicast enabled\n");
6977 card->dev->flags |= IFF_MULTICAST;
6978 }
6979 return rc;
6980}
6981
6982#ifdef CONFIG_QETH_IPV6
6983static int
6984qeth_softsetup_ipv6(struct qeth_card *card)
6985{
6986 int rc;
6987
6988 QETH_DBF_TEXT(trace,3,"softipv6");
6989
6990 netif_stop_queue(card->dev);
6991 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
6992 if (rc) {
6993 PRINT_ERR("IPv6 startlan failed on %s\n",
6994 QETH_CARD_IFNAME(card));
6995 return rc;
6996 }
6997 netif_wake_queue(card->dev);
6998 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
6999 if (rc) {
7000 PRINT_ERR("IPv6 query ipassist failed on %s\n",
7001 QETH_CARD_IFNAME(card));
7002 return rc;
7003 }
7004 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
7005 IPA_CMD_ASS_START, 3);
7006 if (rc) {
7007 PRINT_WARN("IPv6 start assist (version 4) failed "
7008 "on %s: 0x%x\n",
7009 QETH_CARD_IFNAME(card), rc);
7010 return rc;
7011 }
7012 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
7013 IPA_CMD_ASS_START);
7014 if (rc) {
7015 PRINT_WARN("IPV6 start assist (version 6) failed "
7016 "on %s: 0x%x\n",
7017 QETH_CARD_IFNAME(card), rc);
7018 return rc;
7019 }
7020 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
7021 IPA_CMD_ASS_START);
7022 if (rc) {
7023 PRINT_WARN("Could not enable passthrough "
7024 "on %s: 0x%x\n",
7025 QETH_CARD_IFNAME(card), rc);
7026 return rc;
7027 }
7028 PRINT_INFO("IPV6 enabled \n");
7029 return 0;
7030}
7031
7032#endif
7033
7034static int
7035qeth_start_ipa_ipv6(struct qeth_card *card)
7036{
7037 int rc = 0;
7038#ifdef CONFIG_QETH_IPV6
7039 QETH_DBF_TEXT(trace,3,"strtipv6");
7040
7041 if (!qeth_is_supported(card, IPA_IPV6)) {
7042 PRINT_WARN("IPv6 not supported on %s\n",
7043 QETH_CARD_IFNAME(card));
7044 return 0;
7045 }
7046 rc = qeth_softsetup_ipv6(card);
7047#endif
7048 return rc ;
7049}
7050
7051static int
7052qeth_start_ipa_broadcast(struct qeth_card *card)
7053{
7054 int rc;
7055
7056 QETH_DBF_TEXT(trace,3,"stbrdcst");
7057 card->info.broadcast_capable = 0;
7058 if (!qeth_is_supported(card, IPA_FILTERING)) {
7059 PRINT_WARN("Broadcast not supported on %s\n",
7060 QETH_CARD_IFNAME(card));
7061 rc = -EOPNOTSUPP;
7062 goto out;
7063 }
7064 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7065 IPA_CMD_ASS_START, 0);
7066 if (rc) {
7067 PRINT_WARN("Could not enable broadcasting filtering "
7068 "on %s: 0x%x\n",
7069 QETH_CARD_IFNAME(card), rc);
7070 goto out;
7071 }
7072
7073 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7074 IPA_CMD_ASS_CONFIGURE, 1);
7075 if (rc) {
7076 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
7077 QETH_CARD_IFNAME(card), rc);
7078 goto out;
7079 }
7080 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
7081 PRINT_INFO("Broadcast enabled \n");
7082 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7083 IPA_CMD_ASS_ENABLE, 1);
7084 if (rc) {
7085 PRINT_WARN("Could not set up broadcast echo filtering on "
7086 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
7087 goto out;
7088 }
7089 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
7090out:
7091 if (card->info.broadcast_capable)
7092 card->dev->flags |= IFF_BROADCAST;
7093 else
7094 card->dev->flags &= ~IFF_BROADCAST;
7095 return rc;
7096}
7097
7098static int
7099qeth_send_checksum_command(struct qeth_card *card)
7100{
7101 int rc;
7102
7103 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7104 IPA_CMD_ASS_START, 0);
7105 if (rc) {
7106 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
7107 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7108 QETH_CARD_IFNAME(card), rc);
7109 return rc;
7110 }
7111 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7112 IPA_CMD_ASS_ENABLE,
7113 card->info.csum_mask);
7114 if (rc) {
7115 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
7116 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7117 QETH_CARD_IFNAME(card), rc);
7118 return rc;
7119 }
7120 return 0;
7121}
7122
7123static int
7124qeth_start_ipa_checksum(struct qeth_card *card)
7125{
7126 int rc = 0;
7127
7128 QETH_DBF_TEXT(trace,3,"strtcsum");
7129
7130 if (card->options.checksum_type == NO_CHECKSUMMING) {
7131 PRINT_WARN("Using no checksumming on %s.\n",
7132 QETH_CARD_IFNAME(card));
7133 return 0;
7134 }
7135 if (card->options.checksum_type == SW_CHECKSUMMING) {
7136 PRINT_WARN("Using SW checksumming on %s.\n",
7137 QETH_CARD_IFNAME(card));
7138 return 0;
7139 }
7140 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
7141 PRINT_WARN("Inbound HW Checksumming not "
7142 "supported on %s,\ncontinuing "
7143 "using Inbound SW Checksumming\n",
7144 QETH_CARD_IFNAME(card));
7145 card->options.checksum_type = SW_CHECKSUMMING;
7146 return 0;
7147 }
7148 rc = qeth_send_checksum_command(card);
7149 if (!rc) {
7150 PRINT_INFO("HW Checksumming (inbound) enabled \n");
7151 }
7152 return rc;
7153}
7154
7155static int
7156qeth_start_ipa_tso(struct qeth_card *card)
7157{
7158 int rc;
7159
7160 QETH_DBF_TEXT(trace,3,"sttso");
7161
7162 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
7163 PRINT_WARN("Outbound TSO not supported on %s\n",
7164 QETH_CARD_IFNAME(card));
7165 rc = -EOPNOTSUPP;
7166 } else {
7167 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
7168 IPA_CMD_ASS_START,0);
7169 if (rc)
7170 PRINT_WARN("Could not start outbound TSO "
7171 "assist on %s: rc=%i\n",
7172 QETH_CARD_IFNAME(card), rc);
7173 else
7174 PRINT_INFO("Outbound TSO enabled\n");
7175 }
7176 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
7177 card->options.large_send = QETH_LARGE_SEND_NO;
7178 card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
7179 }
7180 return rc;
7181}
7182
7183static int
7184qeth_start_ipassists(struct qeth_card *card)
7185{
7186 QETH_DBF_TEXT(trace,3,"strtipas");
7187 qeth_start_ipa_arp_processing(card); /* go on*/
7188 qeth_start_ipa_ip_fragmentation(card); /* go on*/
7189 qeth_start_ipa_source_mac(card); /* go on*/
7190 qeth_start_ipa_vlan(card); /* go on*/
7191 qeth_start_ipa_multicast(card); /* go on*/
7192 qeth_start_ipa_ipv6(card); /* go on*/
7193 qeth_start_ipa_broadcast(card); /* go on*/
7194 qeth_start_ipa_checksum(card); /* go on*/
7195 qeth_start_ipa_tso(card); /* go on*/
7196 return 0;
7197}
7198
7199static int
7200qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
7201 enum qeth_prot_versions prot)
7202{
7203 int rc;
7204 struct qeth_ipa_cmd *cmd;
7205 struct qeth_cmd_buffer *iob;
7206
7207 QETH_DBF_TEXT(trace,4,"setroutg");
7208 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
7209 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7210 cmd->data.setrtg.type = (type);
7211 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7212
7213 return rc;
7214
7215}
7216
7217static void
7218qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
7219 enum qeth_prot_versions prot)
7220{
7221 if (card->info.type == QETH_CARD_TYPE_IQD) {
7222 switch (*type) {
7223 case NO_ROUTER:
7224 case PRIMARY_CONNECTOR:
7225 case SECONDARY_CONNECTOR:
7226 case MULTICAST_ROUTER:
7227 return;
7228 default:
7229 goto out_inval;
7230 }
7231 } else {
7232 switch (*type) {
7233 case NO_ROUTER:
7234 case PRIMARY_ROUTER:
7235 case SECONDARY_ROUTER:
7236 return;
7237 case MULTICAST_ROUTER:
7238 if (qeth_is_ipafunc_supported(card, prot,
7239 IPA_OSA_MC_ROUTER))
7240 return;
7241 default:
7242 goto out_inval;
7243 }
7244 }
7245out_inval:
7246 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
7247 "Router status set to 'no router'.\n",
7248 ((*type == PRIMARY_ROUTER)? "primary router" :
7249 (*type == SECONDARY_ROUTER)? "secondary router" :
7250 (*type == PRIMARY_CONNECTOR)? "primary connector" :
7251 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
7252 (*type == MULTICAST_ROUTER)? "multicast router" :
7253 "unknown"),
7254 card->dev->name);
7255 *type = NO_ROUTER;
7256}
7257
7258int
7259qeth_setrouting_v4(struct qeth_card *card)
7260{
7261 int rc;
7262
7263 QETH_DBF_TEXT(trace,3,"setrtg4");
7264
7265 qeth_correct_routing_type(card, &card->options.route4.type,
7266 QETH_PROT_IPV4);
7267
7268 rc = qeth_send_setrouting(card, card->options.route4.type,
7269 QETH_PROT_IPV4);
7270 if (rc) {
7271 card->options.route4.type = NO_ROUTER;
7272 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7273 "Type set to 'no router'.\n",
7274 rc, QETH_CARD_IFNAME(card));
7275 }
7276 return rc;
7277}
7278
7279int
7280qeth_setrouting_v6(struct qeth_card *card)
7281{
7282 int rc = 0;
7283
7284 QETH_DBF_TEXT(trace,3,"setrtg6");
7285#ifdef CONFIG_QETH_IPV6
7286
7287 qeth_correct_routing_type(card, &card->options.route6.type,
7288 QETH_PROT_IPV6);
7289
7290 if ((card->options.route6.type == NO_ROUTER) ||
7291 ((card->info.type == QETH_CARD_TYPE_OSAE) &&
7292 (card->options.route6.type == MULTICAST_ROUTER) &&
7293 !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
7294 return 0;
7295 rc = qeth_send_setrouting(card, card->options.route6.type,
7296 QETH_PROT_IPV6);
7297 if (rc) {
7298 card->options.route6.type = NO_ROUTER;
7299 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7300 "Type set to 'no router'.\n",
7301 rc, QETH_CARD_IFNAME(card));
7302 }
7303#endif
7304 return rc;
7305}
7306
7307int
9cb90de8 7308qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
1da177e4
LT
7309{
7310 int rc = 0;
7311
9cb90de8
FP
7312 if (card->dev == NULL) {
7313 card->options.large_send = type;
1da177e4 7314 return 0;
9cb90de8 7315 }
1da177e4 7316 netif_stop_queue(card->dev);
9cb90de8 7317 card->options.large_send = type;
1da177e4
LT
7318 switch (card->options.large_send) {
7319 case QETH_LARGE_SEND_EDDP:
7320 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7321 break;
7322 case QETH_LARGE_SEND_TSO:
7323 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
7324 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7325 } else {
7326 PRINT_WARN("TSO not supported on %s. "
7327 "large_send set to 'no'.\n",
7328 card->dev->name);
7329 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7330 card->options.large_send = QETH_LARGE_SEND_NO;
7331 rc = -EOPNOTSUPP;
7332 }
7333 break;
7334 default: /* includes QETH_LARGE_SEND_NO */
7335 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7336 break;
7337 }
1da177e4
LT
7338 netif_wake_queue(card->dev);
7339 return rc;
7340}
7341
7342/*
7343 * softsetup card: init IPA stuff
7344 */
7345static int
7346qeth_softsetup_card(struct qeth_card *card)
7347{
7348 int rc;
7349
7350 QETH_DBF_TEXT(setup, 2, "softsetp");
7351
7352 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
7353 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7354 if (rc == 0xe080){
7355 PRINT_WARN("LAN on card %s if offline! "
7356 "Continuing softsetup.\n",
7357 CARD_BUS_ID(card));
7358 card->lan_online = 0;
7359 } else
7360 return rc;
7361 } else
7362 card->lan_online = 1;
500f83ab
UB
7363 if (card->info.type==QETH_CARD_TYPE_OSN)
7364 goto out;
1da177e4
LT
7365 if (card->options.layer2) {
7366 card->dev->features |=
7367 NETIF_F_HW_VLAN_FILTER |
7368 NETIF_F_HW_VLAN_TX |
7369 NETIF_F_HW_VLAN_RX;
7370 card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
7371 card->info.broadcast_capable=1;
7372 if ((rc = qeth_layer2_initialize(card))) {
7373 QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
7374 return rc;
7375 }
7376#ifdef CONFIG_QETH_VLAN
7377 qeth_layer2_process_vlans(card, 0);
7378#endif
7379 goto out;
7380 }
7381 if ((card->options.large_send == QETH_LARGE_SEND_EDDP) ||
7382 (card->options.large_send == QETH_LARGE_SEND_TSO))
7383 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7384 else
7385 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7386
7387 if ((rc = qeth_setadapter_parms(card)))
7388 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7389 if ((rc = qeth_start_ipassists(card)))
7390 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7391 if ((rc = qeth_setrouting_v4(card)))
7392 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7393 if ((rc = qeth_setrouting_v6(card)))
7394 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7395out:
7396 netif_stop_queue(card->dev);
7397 return 0;
7398}
7399
7400#ifdef CONFIG_QETH_IPV6
7401static int
7402qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
7403 unsigned long data)
7404{
7405 struct qeth_ipa_cmd *cmd;
7406
7407 cmd = (struct qeth_ipa_cmd *) data;
7408 if (cmd->hdr.return_code == 0)
7409 card->info.unique_id = *((__u16 *)
7410 &cmd->data.create_destroy_addr.unique_id[6]);
7411 else {
7412 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7413 UNIQUE_ID_NOT_BY_CARD;
7414 PRINT_WARN("couldn't get a unique id from the card on device "
7415 "%s (result=x%x), using default id. ipv6 "
7416 "autoconfig on other lpars may lead to duplicate "
7417 "ip addresses. please use manually "
7418 "configured ones.\n",
7419 CARD_BUS_ID(card), cmd->hdr.return_code);
7420 }
7421 return 0;
7422}
7423#endif
7424
7425static int
7426qeth_put_unique_id(struct qeth_card *card)
7427{
7428
7429 int rc = 0;
7430#ifdef CONFIG_QETH_IPV6
7431 struct qeth_cmd_buffer *iob;
7432 struct qeth_ipa_cmd *cmd;
7433
7434 QETH_DBF_TEXT(trace,2,"puniqeid");
7435
7436 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
7437 UNIQUE_ID_NOT_BY_CARD)
7438 return -1;
7439 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
7440 QETH_PROT_IPV6);
7441 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7442 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7443 card->info.unique_id;
7444 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
7445 card->dev->dev_addr, OSA_ADDR_LEN);
7446 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7447#else
7448 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7449 UNIQUE_ID_NOT_BY_CARD;
7450#endif
7451 return rc;
7452}
7453
7454/**
7455 * Clear IP List
7456 */
7457static void
7458qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
7459{
7460 struct qeth_ipaddr *addr, *tmp;
7461 unsigned long flags;
7462
7463 QETH_DBF_TEXT(trace,4,"clearip");
7464 spin_lock_irqsave(&card->ip_lock, flags);
7465 /* clear todo list */
7466 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
7467 list_del(&addr->entry);
7468 kfree(addr);
7469 }
7470
7471 while (!list_empty(&card->ip_list)) {
7472 addr = list_entry(card->ip_list.next,
7473 struct qeth_ipaddr, entry);
7474 list_del_init(&addr->entry);
7475 if (clean) {
7476 spin_unlock_irqrestore(&card->ip_lock, flags);
7477 qeth_deregister_addr_entry(card, addr);
7478 spin_lock_irqsave(&card->ip_lock, flags);
7479 }
7480 if (!recover || addr->is_multicast) {
7481 kfree(addr);
7482 continue;
7483 }
7484 list_add_tail(&addr->entry, card->ip_tbd_list);
7485 }
7486 spin_unlock_irqrestore(&card->ip_lock, flags);
7487}
7488
7489static void
7490qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7491 int clear_start_mask)
7492{
7493 unsigned long flags;
7494
7495 spin_lock_irqsave(&card->thread_mask_lock, flags);
7496 card->thread_allowed_mask = threads;
7497 if (clear_start_mask)
7498 card->thread_start_mask &= threads;
7499 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7500 wake_up(&card->wait_q);
7501}
7502
7503static inline int
7504qeth_threads_running(struct qeth_card *card, unsigned long threads)
7505{
7506 unsigned long flags;
7507 int rc = 0;
7508
7509 spin_lock_irqsave(&card->thread_mask_lock, flags);
7510 rc = (card->thread_running_mask & threads);
7511 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7512 return rc;
7513}
7514
7515static int
7516qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7517{
7518 return wait_event_interruptible(card->wait_q,
7519 qeth_threads_running(card, threads) == 0);
7520}
7521
7522static int
05e08a2a 7523qeth_stop_card(struct qeth_card *card, int recovery_mode)
1da177e4
LT
7524{
7525 int rc = 0;
7526
7527 QETH_DBF_TEXT(setup ,2,"stopcard");
7528 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7529
7530 qeth_set_allowed_threads(card, 0, 1);
7531 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
7532 return -ERESTARTSYS;
7533 if (card->read.state == CH_STATE_UP &&
7534 card->write.state == CH_STATE_UP &&
7535 (card->state == CARD_STATE_UP)) {
500f83ab
UB
7536 if (recovery_mode &&
7537 card->info.type != QETH_CARD_TYPE_OSN) {
05e08a2a
FP
7538 qeth_stop(card->dev);
7539 } else {
7540 rtnl_lock();
7541 dev_close(card->dev);
7542 rtnl_unlock();
7543 }
1da177e4
LT
7544 if (!card->use_hard_stop) {
7545 __u8 *mac = &card->dev->dev_addr[0];
7546 rc = qeth_layer2_send_delmac(card, mac);
7547 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
7548 if ((rc = qeth_send_stoplan(card)))
7549 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7550 }
7551 card->state = CARD_STATE_SOFTSETUP;
7552 }
7553 if (card->state == CARD_STATE_SOFTSETUP) {
7554#ifdef CONFIG_QETH_VLAN
7555 if (card->options.layer2)
7556 qeth_layer2_process_vlans(card, 1);
7557#endif
7558 qeth_clear_ip_list(card, !card->use_hard_stop, 1);
7559 qeth_clear_ipacmd_list(card);
7560 card->state = CARD_STATE_HARDSETUP;
7561 }
7562 if (card->state == CARD_STATE_HARDSETUP) {
7563 if ((!card->use_hard_stop) &&
7564 (!card->options.layer2))
7565 if ((rc = qeth_put_unique_id(card)))
7566 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7567 qeth_qdio_clear_card(card, 0);
7568 qeth_clear_qdio_buffers(card);
7569 qeth_clear_working_pool_list(card);
7570 card->state = CARD_STATE_DOWN;
7571 }
7572 if (card->state == CARD_STATE_DOWN) {
7573 qeth_clear_cmd_buffers(&card->read);
7574 qeth_clear_cmd_buffers(&card->write);
7575 }
7576 card->use_hard_stop = 0;
7577 return rc;
7578}
7579
7580
7581static int
7582qeth_get_unique_id(struct qeth_card *card)
7583{
7584 int rc = 0;
7585#ifdef CONFIG_QETH_IPV6
7586 struct qeth_cmd_buffer *iob;
7587 struct qeth_ipa_cmd *cmd;
7588
7589 QETH_DBF_TEXT(setup, 2, "guniqeid");
7590
7591 if (!qeth_is_supported(card,IPA_IPV6)) {
7592 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7593 UNIQUE_ID_NOT_BY_CARD;
7594 return 0;
7595 }
7596
7597 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
7598 QETH_PROT_IPV6);
7599 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7600 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7601 card->info.unique_id;
7602
7603 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
7604#else
7605 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7606 UNIQUE_ID_NOT_BY_CARD;
7607#endif
7608 return rc;
7609}
7610static void
7611qeth_print_status_with_portname(struct qeth_card *card)
7612{
7613 char dbf_text[15];
7614 int i;
7615
7616 sprintf(dbf_text, "%s", card->info.portname + 1);
7617 for (i = 0; i < 8; i++)
7618 dbf_text[i] =
7619 (char) _ebcasc[(__u8) dbf_text[i]];
7620 dbf_text[8] = 0;
7621 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7622 "with link type %s (portname: %s)\n",
7623 CARD_RDEV_ID(card),
7624 CARD_WDEV_ID(card),
7625 CARD_DDEV_ID(card),
7626 qeth_get_cardname(card),
7627 (card->info.mcl_level[0]) ? " (level: " : "",
7628 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7629 (card->info.mcl_level[0]) ? ")" : "",
7630 qeth_get_cardname_short(card),
7631 dbf_text);
7632
7633}
7634
7635static void
7636qeth_print_status_no_portname(struct qeth_card *card)
7637{
7638 if (card->info.portname[0])
7639 printk("qeth: Device %s/%s/%s is a%s "
7640 "card%s%s%s\nwith link type %s "
7641 "(no portname needed by interface).\n",
7642 CARD_RDEV_ID(card),
7643 CARD_WDEV_ID(card),
7644 CARD_DDEV_ID(card),
7645 qeth_get_cardname(card),
7646 (card->info.mcl_level[0]) ? " (level: " : "",
7647 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7648 (card->info.mcl_level[0]) ? ")" : "",
7649 qeth_get_cardname_short(card));
7650 else
7651 printk("qeth: Device %s/%s/%s is a%s "
7652 "card%s%s%s\nwith link type %s.\n",
7653 CARD_RDEV_ID(card),
7654 CARD_WDEV_ID(card),
7655 CARD_DDEV_ID(card),
7656 qeth_get_cardname(card),
7657 (card->info.mcl_level[0]) ? " (level: " : "",
7658 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7659 (card->info.mcl_level[0]) ? ")" : "",
7660 qeth_get_cardname_short(card));
7661}
7662
7663static void
7664qeth_print_status_message(struct qeth_card *card)
7665{
7666 switch (card->info.type) {
7667 case QETH_CARD_TYPE_OSAE:
7668 /* VM will use a non-zero first character
7669 * to indicate a HiperSockets like reporting
7670 * of the level OSA sets the first character to zero
7671 * */
7672 if (!card->info.mcl_level[0]) {
7673 sprintf(card->info.mcl_level,"%02x%02x",
7674 card->info.mcl_level[2],
7675 card->info.mcl_level[3]);
7676
7677 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7678 break;
7679 }
7680 /* fallthrough */
7681 case QETH_CARD_TYPE_IQD:
7682 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
7683 card->info.mcl_level[0]];
7684 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
7685 card->info.mcl_level[1]];
7686 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
7687 card->info.mcl_level[2]];
7688 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
7689 card->info.mcl_level[3]];
7690 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7691 break;
7692 default:
7693 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
7694 }
7695 if (card->info.portname_required)
7696 qeth_print_status_with_portname(card);
7697 else
7698 qeth_print_status_no_portname(card);
7699}
7700
7701static int
7702qeth_register_netdev(struct qeth_card *card)
7703{
7704 QETH_DBF_TEXT(setup, 3, "regnetd");
7705 if (card->dev->reg_state != NETREG_UNINITIALIZED) {
7706 qeth_netdev_init(card->dev);
7707 return 0;
7708 }
7709 /* sysfs magic */
7710 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
7711 return register_netdev(card->dev);
7712}
7713
7714static void
05e08a2a 7715qeth_start_again(struct qeth_card *card, int recovery_mode)
1da177e4
LT
7716{
7717 QETH_DBF_TEXT(setup ,2, "startag");
7718
500f83ab
UB
7719 if (recovery_mode &&
7720 card->info.type != QETH_CARD_TYPE_OSN) {
05e08a2a
FP
7721 qeth_open(card->dev);
7722 } else {
7723 rtnl_lock();
7724 dev_open(card->dev);
7725 rtnl_unlock();
7726 }
1da177e4
LT
7727 /* this also sets saved unicast addresses */
7728 qeth_set_multicast_list(card->dev);
7729}
7730
7731
7732/* Layer 2 specific stuff */
7733#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
7734 if (card->options.option == value) { \
7735 PRINT_ERR("%s not supported with layer 2 " \
7736 "functionality, ignoring option on read" \
7737 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7738 card->options.option = reset_value; \
7739 }
7740#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
7741 if (card->options.option != value) { \
7742 PRINT_ERR("%s not supported with layer 2 " \
7743 "functionality, ignoring option on read" \
7744 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7745 card->options.option = reset_value; \
7746 }
7747
7748
7749static void qeth_make_parameters_consistent(struct qeth_card *card)
7750{
7751
500f83ab
UB
7752 if (card->options.layer2 == 0)
7753 return;
7754 if (card->info.type == QETH_CARD_TYPE_OSN)
7755 return;
7756 if (card->info.type == QETH_CARD_TYPE_IQD) {
7757 PRINT_ERR("Device %s does not support layer 2 functionality." \
7758 " Ignoring layer2 option.\n",CARD_BUS_ID(card));
7759 card->options.layer2 = 0;
7760 return;
7761 }
7762 IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
7763 "Routing options are");
1da177e4 7764#ifdef CONFIG_QETH_IPV6
500f83ab
UB
7765 IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
7766 "Routing options are");
1da177e4 7767#endif
500f83ab
UB
7768 IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
7769 QETH_CHECKSUM_DEFAULT,
7770 "Checksumming options are");
7771 IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
7772 QETH_TR_BROADCAST_ALLRINGS,
7773 "Broadcast mode options are");
7774 IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
7775 QETH_TR_MACADDR_NONCANONICAL,
7776 "Canonical MAC addr options are");
7777 IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
7778 "Broadcast faking options are");
7779 IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
7780 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
7781 IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
1da177e4
LT
7782}
7783
7784
7785static int
05e08a2a 7786__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1da177e4
LT
7787{
7788 struct qeth_card *card = gdev->dev.driver_data;
7789 int rc = 0;
7790 enum qeth_card_states recover_flag;
7791
7792 BUG_ON(!card);
7793 QETH_DBF_TEXT(setup ,2, "setonlin");
7794 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7795
7796 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
7797 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
7798 PRINT_WARN("set_online of card %s interrupted by user!\n",
7799 CARD_BUS_ID(card));
7800 return -ERESTARTSYS;
7801 }
7802
7803 recover_flag = card->state;
7804 if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
7805 (rc = ccw_device_set_online(CARD_WDEV(card))) ||
7806 (rc = ccw_device_set_online(CARD_DDEV(card)))){
7807 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7808 return -EIO;
7809 }
7810
500f83ab 7811 qeth_make_parameters_consistent(card);
1da177e4
LT
7812
7813 if ((rc = qeth_hardsetup_card(card))){
7814 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7815 goto out_remove;
7816 }
7817 card->state = CARD_STATE_HARDSETUP;
7818
7819 if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
7820 rc = qeth_get_unique_id(card);
7821
7822 if (rc && card->options.layer2 == 0) {
7823 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7824 goto out_remove;
7825 }
7826 qeth_print_status_message(card);
7827 if ((rc = qeth_register_netdev(card))){
7828 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7829 goto out_remove;
7830 }
7831 if ((rc = qeth_softsetup_card(card))){
7832 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7833 goto out_remove;
7834 }
7835 card->state = CARD_STATE_SOFTSETUP;
7836
7837 if ((rc = qeth_init_qdio_queues(card))){
7838 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
7839 goto out_remove;
7840 }
7841/*maybe it was set offline without ifconfig down
7842 * we can also use this state for recovery purposes*/
7843 qeth_set_allowed_threads(card, 0xffffffff, 0);
7844 if (recover_flag == CARD_STATE_RECOVER)
05e08a2a 7845 qeth_start_again(card, recovery_mode);
1da177e4
LT
7846 qeth_notify_processes();
7847 return 0;
7848out_remove:
7849 card->use_hard_stop = 1;
05e08a2a 7850 qeth_stop_card(card, 0);
1da177e4
LT
7851 ccw_device_set_offline(CARD_DDEV(card));
7852 ccw_device_set_offline(CARD_WDEV(card));
7853 ccw_device_set_offline(CARD_RDEV(card));
7854 if (recover_flag == CARD_STATE_RECOVER)
7855 card->state = CARD_STATE_RECOVER;
7856 else
7857 card->state = CARD_STATE_DOWN;
7858 return -ENODEV;
7859}
7860
05e08a2a
FP
7861static int
7862qeth_set_online(struct ccwgroup_device *gdev)
7863{
7864 return __qeth_set_online(gdev, 0);
7865}
7866
1da177e4
LT
7867static struct ccw_device_id qeth_ids[] = {
7868 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
7869 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
500f83ab 7870 {CCW_DEVICE(0x1731, 0x06), driver_info:QETH_CARD_TYPE_OSN},
1da177e4
LT
7871 {},
7872};
7873MODULE_DEVICE_TABLE(ccw, qeth_ids);
7874
7875struct device *qeth_root_dev = NULL;
7876
7877struct ccwgroup_driver qeth_ccwgroup_driver = {
7878 .owner = THIS_MODULE,
7879 .name = "qeth",
7880 .driver_id = 0xD8C5E3C8,
7881 .probe = qeth_probe_device,
7882 .remove = qeth_remove_device,
7883 .set_online = qeth_set_online,
7884 .set_offline = qeth_set_offline,
7885};
7886
7887struct ccw_driver qeth_ccw_driver = {
7888 .name = "qeth",
7889 .ids = qeth_ids,
7890 .probe = ccwgroup_probe_ccwdev,
7891 .remove = ccwgroup_remove_ccwdev,
7892};
7893
7894
7895static void
7896qeth_unregister_dbf_views(void)
7897{
7898 if (qeth_dbf_setup)
7899 debug_unregister(qeth_dbf_setup);
7900 if (qeth_dbf_qerr)
7901 debug_unregister(qeth_dbf_qerr);
7902 if (qeth_dbf_sense)
7903 debug_unregister(qeth_dbf_sense);
7904 if (qeth_dbf_misc)
7905 debug_unregister(qeth_dbf_misc);
7906 if (qeth_dbf_data)
7907 debug_unregister(qeth_dbf_data);
7908 if (qeth_dbf_control)
7909 debug_unregister(qeth_dbf_control);
7910 if (qeth_dbf_trace)
7911 debug_unregister(qeth_dbf_trace);
7912}
7913static int
7914qeth_register_dbf_views(void)
7915{
7916 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
66a464db 7917 QETH_DBF_SETUP_PAGES,
1da177e4
LT
7918 QETH_DBF_SETUP_NR_AREAS,
7919 QETH_DBF_SETUP_LEN);
7920 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
66a464db 7921 QETH_DBF_MISC_PAGES,
1da177e4
LT
7922 QETH_DBF_MISC_NR_AREAS,
7923 QETH_DBF_MISC_LEN);
7924 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
66a464db 7925 QETH_DBF_DATA_PAGES,
1da177e4
LT
7926 QETH_DBF_DATA_NR_AREAS,
7927 QETH_DBF_DATA_LEN);
7928 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
66a464db 7929 QETH_DBF_CONTROL_PAGES,
1da177e4
LT
7930 QETH_DBF_CONTROL_NR_AREAS,
7931 QETH_DBF_CONTROL_LEN);
7932 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
66a464db 7933 QETH_DBF_SENSE_PAGES,
1da177e4
LT
7934 QETH_DBF_SENSE_NR_AREAS,
7935 QETH_DBF_SENSE_LEN);
7936 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
66a464db 7937 QETH_DBF_QERR_PAGES,
1da177e4
LT
7938 QETH_DBF_QERR_NR_AREAS,
7939 QETH_DBF_QERR_LEN);
7940 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
66a464db 7941 QETH_DBF_TRACE_PAGES,
1da177e4
LT
7942 QETH_DBF_TRACE_NR_AREAS,
7943 QETH_DBF_TRACE_LEN);
7944
7945 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
7946 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
7947 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
7948 (qeth_dbf_trace == NULL)) {
7949 qeth_unregister_dbf_views();
7950 return -ENOMEM;
7951 }
7952 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
7953 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
7954
7955 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
7956 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
7957
7958 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
7959 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
7960
7961 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
7962 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
7963
7964 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
7965 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
7966
7967 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
7968 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
7969
7970 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
7971 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
7972
7973 return 0;
7974}
7975
7976#ifdef CONFIG_QETH_IPV6
7977extern struct neigh_table arp_tbl;
7978static struct neigh_ops *arp_direct_ops;
7979static int (*qeth_old_arp_constructor) (struct neighbour *);
7980
7981static struct neigh_ops arp_direct_ops_template = {
7982 .family = AF_INET,
7983 .destructor = NULL,
7984 .solicit = NULL,
7985 .error_report = NULL,
7986 .output = dev_queue_xmit,
7987 .connected_output = dev_queue_xmit,
7988 .hh_output = dev_queue_xmit,
7989 .queue_xmit = dev_queue_xmit
7990};
7991
7992static int
7993qeth_arp_constructor(struct neighbour *neigh)
7994{
7995 struct net_device *dev = neigh->dev;
7996 struct in_device *in_dev;
7997 struct neigh_parms *parms;
7998 struct qeth_card *card;
7999
8000 card = qeth_get_card_from_dev(dev);
8001 if (card == NULL)
8002 goto out;
8003 if((card->options.layer2) ||
8004 (card->dev->hard_header == qeth_fake_header))
8005 goto out;
8006
8007 rcu_read_lock();
e5ed6399 8008 in_dev = __in_dev_get_rcu(dev);
1da177e4
LT
8009 if (in_dev == NULL) {
8010 rcu_read_unlock();
8011 return -EINVAL;
8012 }
8013
8014 parms = in_dev->arp_parms;
8015 __neigh_parms_put(neigh->parms);
8016 neigh->parms = neigh_parms_clone(parms);
8017 rcu_read_unlock();
8018
8019 neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
8020 neigh->nud_state = NUD_NOARP;
8021 neigh->ops = arp_direct_ops;
8022 neigh->output = neigh->ops->queue_xmit;
8023 return 0;
8024out:
8025 return qeth_old_arp_constructor(neigh);
8026}
8027#endif /*CONFIG_QETH_IPV6*/
8028
8029/*
8030 * IP address takeover related functions
8031 */
8032static void
8033qeth_clear_ipato_list(struct qeth_card *card)
8034{
8035 struct qeth_ipato_entry *ipatoe, *tmp;
8036 unsigned long flags;
8037
8038 spin_lock_irqsave(&card->ip_lock, flags);
8039 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
8040 list_del(&ipatoe->entry);
8041 kfree(ipatoe);
8042 }
8043 spin_unlock_irqrestore(&card->ip_lock, flags);
8044}
8045
8046int
8047qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
8048{
8049 struct qeth_ipato_entry *ipatoe;
8050 unsigned long flags;
8051 int rc = 0;
8052
8053 QETH_DBF_TEXT(trace, 2, "addipato");
8054 spin_lock_irqsave(&card->ip_lock, flags);
8055 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8056 if (ipatoe->proto != new->proto)
8057 continue;
8058 if (!memcmp(ipatoe->addr, new->addr,
8059 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
8060 (ipatoe->mask_bits == new->mask_bits)){
8061 PRINT_WARN("ipato entry already exists!\n");
8062 rc = -EEXIST;
8063 break;
8064 }
8065 }
8066 if (!rc) {
8067 list_add_tail(&new->entry, &card->ipato.entries);
8068 }
8069 spin_unlock_irqrestore(&card->ip_lock, flags);
8070 return rc;
8071}
8072
8073void
8074qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8075 u8 *addr, int mask_bits)
8076{
8077 struct qeth_ipato_entry *ipatoe, *tmp;
8078 unsigned long flags;
8079
8080 QETH_DBF_TEXT(trace, 2, "delipato");
8081 spin_lock_irqsave(&card->ip_lock, flags);
8082 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
8083 if (ipatoe->proto != proto)
8084 continue;
8085 if (!memcmp(ipatoe->addr, addr,
8086 (proto == QETH_PROT_IPV4)? 4:16) &&
8087 (ipatoe->mask_bits == mask_bits)){
8088 list_del(&ipatoe->entry);
8089 kfree(ipatoe);
8090 }
8091 }
8092 spin_unlock_irqrestore(&card->ip_lock, flags);
8093}
8094
8095static inline void
8096qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8097{
8098 int i, j;
8099 u8 octet;
8100
8101 for (i = 0; i < len; ++i){
8102 octet = addr[i];
8103 for (j = 7; j >= 0; --j){
8104 bits[i*8 + j] = octet & 1;
8105 octet >>= 1;
8106 }
8107 }
8108}
8109
8110static int
8111qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
8112{
8113 struct qeth_ipato_entry *ipatoe;
8114 u8 addr_bits[128] = {0, };
8115 u8 ipatoe_bits[128] = {0, };
8116 int rc = 0;
8117
8118 if (!card->ipato.enabled)
8119 return 0;
8120
8121 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
8122 (addr->proto == QETH_PROT_IPV4)? 4:16);
8123 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8124 if (addr->proto != ipatoe->proto)
8125 continue;
8126 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
8127 (ipatoe->proto==QETH_PROT_IPV4) ?
8128 4:16);
8129 if (addr->proto == QETH_PROT_IPV4)
8130 rc = !memcmp(addr_bits, ipatoe_bits,
8131 min(32, ipatoe->mask_bits));
8132 else
8133 rc = !memcmp(addr_bits, ipatoe_bits,
8134 min(128, ipatoe->mask_bits));
8135 if (rc)
8136 break;
8137 }
8138 /* invert? */
8139 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
8140 rc = !rc;
8141 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
8142 rc = !rc;
8143
8144 return rc;
8145}
8146
8147/*
8148 * VIPA related functions
8149 */
8150int
8151qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8152 const u8 *addr)
8153{
8154 struct qeth_ipaddr *ipaddr;
8155 unsigned long flags;
8156 int rc = 0;
8157
8158 ipaddr = qeth_get_addr_buffer(proto);
8159 if (ipaddr){
8160 if (proto == QETH_PROT_IPV4){
8161 QETH_DBF_TEXT(trace, 2, "addvipa4");
8162 memcpy(&ipaddr->u.a4.addr, addr, 4);
8163 ipaddr->u.a4.mask = 0;
8164#ifdef CONFIG_QETH_IPV6
8165 } else if (proto == QETH_PROT_IPV6){
8166 QETH_DBF_TEXT(trace, 2, "addvipa6");
8167 memcpy(&ipaddr->u.a6.addr, addr, 16);
8168 ipaddr->u.a6.pfxlen = 0;
8169#endif
8170 }
8171 ipaddr->type = QETH_IP_TYPE_VIPA;
8172 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
8173 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
8174 } else
8175 return -ENOMEM;
8176 spin_lock_irqsave(&card->ip_lock, flags);
8177 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8178 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8179 rc = -EEXIST;
8180 spin_unlock_irqrestore(&card->ip_lock, flags);
8181 if (rc){
8182 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
8183 return rc;
8184 }
8185 if (!qeth_add_ip(card, ipaddr))
8186 kfree(ipaddr);
8187 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8188 schedule_work(&card->kernel_thread_starter);
8189 return rc;
8190}
8191
8192void
8193qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8194 const u8 *addr)
8195{
8196 struct qeth_ipaddr *ipaddr;
8197
8198 ipaddr = qeth_get_addr_buffer(proto);
8199 if (ipaddr){
8200 if (proto == QETH_PROT_IPV4){
8201 QETH_DBF_TEXT(trace, 2, "delvipa4");
8202 memcpy(&ipaddr->u.a4.addr, addr, 4);
8203 ipaddr->u.a4.mask = 0;
8204#ifdef CONFIG_QETH_IPV6
8205 } else if (proto == QETH_PROT_IPV6){
8206 QETH_DBF_TEXT(trace, 2, "delvipa6");
8207 memcpy(&ipaddr->u.a6.addr, addr, 16);
8208 ipaddr->u.a6.pfxlen = 0;
8209#endif
8210 }
8211 ipaddr->type = QETH_IP_TYPE_VIPA;
8212 } else
8213 return;
8214 if (!qeth_delete_ip(card, ipaddr))
8215 kfree(ipaddr);
8216 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8217 schedule_work(&card->kernel_thread_starter);
8218}
8219
8220/*
8221 * proxy ARP related functions
8222 */
8223int
8224qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8225 const u8 *addr)
8226{
8227 struct qeth_ipaddr *ipaddr;
8228 unsigned long flags;
8229 int rc = 0;
8230
8231 ipaddr = qeth_get_addr_buffer(proto);
8232 if (ipaddr){
8233 if (proto == QETH_PROT_IPV4){
8234 QETH_DBF_TEXT(trace, 2, "addrxip4");
8235 memcpy(&ipaddr->u.a4.addr, addr, 4);
8236 ipaddr->u.a4.mask = 0;
8237#ifdef CONFIG_QETH_IPV6
8238 } else if (proto == QETH_PROT_IPV6){
8239 QETH_DBF_TEXT(trace, 2, "addrxip6");
8240 memcpy(&ipaddr->u.a6.addr, addr, 16);
8241 ipaddr->u.a6.pfxlen = 0;
8242#endif
8243 }
8244 ipaddr->type = QETH_IP_TYPE_RXIP;
8245 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
8246 ipaddr->del_flags = 0;
8247 } else
8248 return -ENOMEM;
8249 spin_lock_irqsave(&card->ip_lock, flags);
8250 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8251 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8252 rc = -EEXIST;
8253 spin_unlock_irqrestore(&card->ip_lock, flags);
8254 if (rc){
8255 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
8256 return rc;
8257 }
8258 if (!qeth_add_ip(card, ipaddr))
8259 kfree(ipaddr);
8260 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8261 schedule_work(&card->kernel_thread_starter);
8262 return 0;
8263}
8264
8265void
8266qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8267 const u8 *addr)
8268{
8269 struct qeth_ipaddr *ipaddr;
8270
8271 ipaddr = qeth_get_addr_buffer(proto);
8272 if (ipaddr){
8273 if (proto == QETH_PROT_IPV4){
8274 QETH_DBF_TEXT(trace, 2, "addrxip4");
8275 memcpy(&ipaddr->u.a4.addr, addr, 4);
8276 ipaddr->u.a4.mask = 0;
8277#ifdef CONFIG_QETH_IPV6
8278 } else if (proto == QETH_PROT_IPV6){
8279 QETH_DBF_TEXT(trace, 2, "addrxip6");
8280 memcpy(&ipaddr->u.a6.addr, addr, 16);
8281 ipaddr->u.a6.pfxlen = 0;
8282#endif
8283 }
8284 ipaddr->type = QETH_IP_TYPE_RXIP;
8285 } else
8286 return;
8287 if (!qeth_delete_ip(card, ipaddr))
8288 kfree(ipaddr);
8289 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8290 schedule_work(&card->kernel_thread_starter);
8291}
8292
8293/**
8294 * IP event handler
8295 */
8296static int
8297qeth_ip_event(struct notifier_block *this,
8298 unsigned long event,void *ptr)
8299{
8300 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
8301 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
8302 struct qeth_ipaddr *addr;
8303 struct qeth_card *card;
8304
8305 QETH_DBF_TEXT(trace,3,"ipevent");
8306 card = qeth_get_card_from_dev(dev);
8307 if (!card)
8308 return NOTIFY_DONE;
8309 if (card->options.layer2)
8310 return NOTIFY_DONE;
8311
8312 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
8313 if (addr != NULL) {
8314 addr->u.a4.addr = ifa->ifa_address;
8315 addr->u.a4.mask = ifa->ifa_mask;
8316 addr->type = QETH_IP_TYPE_NORMAL;
8317 } else
8318 goto out;
8319
8320 switch(event) {
8321 case NETDEV_UP:
8322 if (!qeth_add_ip(card, addr))
8323 kfree(addr);
8324 break;
8325 case NETDEV_DOWN:
8326 if (!qeth_delete_ip(card, addr))
8327 kfree(addr);
8328 break;
8329 default:
8330 break;
8331 }
8332 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8333 schedule_work(&card->kernel_thread_starter);
8334out:
8335 return NOTIFY_DONE;
8336}
8337
8338static struct notifier_block qeth_ip_notifier = {
8339 qeth_ip_event,
8340 0
8341};
8342
8343#ifdef CONFIG_QETH_IPV6
8344/**
8345 * IPv6 event handler
8346 */
8347static int
8348qeth_ip6_event(struct notifier_block *this,
8349 unsigned long event,void *ptr)
8350{
8351
8352 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
8353 struct net_device *dev = (struct net_device *)ifa->idev->dev;
8354 struct qeth_ipaddr *addr;
8355 struct qeth_card *card;
8356
8357 QETH_DBF_TEXT(trace,3,"ip6event");
8358
8359 card = qeth_get_card_from_dev(dev);
8360 if (!card)
8361 return NOTIFY_DONE;
8362 if (!qeth_is_supported(card, IPA_IPV6))
8363 return NOTIFY_DONE;
8364
8365 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
8366 if (addr != NULL) {
8367 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
8368 addr->u.a6.pfxlen = ifa->prefix_len;
8369 addr->type = QETH_IP_TYPE_NORMAL;
8370 } else
8371 goto out;
8372
8373 switch(event) {
8374 case NETDEV_UP:
8375 if (!qeth_add_ip(card, addr))
8376 kfree(addr);
8377 break;
8378 case NETDEV_DOWN:
8379 if (!qeth_delete_ip(card, addr))
8380 kfree(addr);
8381 break;
8382 default:
8383 break;
8384 }
8385 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8386 schedule_work(&card->kernel_thread_starter);
8387out:
8388 return NOTIFY_DONE;
8389}
8390
8391static struct notifier_block qeth_ip6_notifier = {
8392 qeth_ip6_event,
8393 0
8394};
8395#endif
8396
8397static int
66aea23f 8398__qeth_reboot_event_card(struct device *dev, void *data)
1da177e4 8399{
1da177e4
LT
8400 struct qeth_card *card;
8401
66aea23f
CH
8402 card = (struct qeth_card *) dev->driver_data;
8403 qeth_clear_ip_list(card, 0, 0);
8404 qeth_qdio_clear_card(card, 0);
8405 return 0;
8406}
8407
8408static int
8409qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8410{
8411
8412 driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8413 __qeth_reboot_event_card);
1da177e4
LT
8414 return NOTIFY_DONE;
8415}
8416
8417
8418static struct notifier_block qeth_reboot_notifier = {
8419 qeth_reboot_event,
8420 0
8421};
8422
8423static int
8424qeth_register_notifiers(void)
8425{
8426 int r;
8427
8428 QETH_DBF_TEXT(trace,5,"regnotif");
8429 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
8430 return r;
8431 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
8432 goto out_reboot;
8433#ifdef CONFIG_QETH_IPV6
8434 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
8435 goto out_ipv4;
8436#endif
8437 return 0;
8438
8439#ifdef CONFIG_QETH_IPV6
8440out_ipv4:
8441 unregister_inetaddr_notifier(&qeth_ip_notifier);
8442#endif
8443out_reboot:
8444 unregister_reboot_notifier(&qeth_reboot_notifier);
8445 return r;
8446}
8447
8448/**
8449 * unregister all event notifiers
8450 */
8451static void
8452qeth_unregister_notifiers(void)
8453{
8454
8455 QETH_DBF_TEXT(trace,5,"unregnot");
8456 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
8457 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
8458#ifdef CONFIG_QETH_IPV6
8459 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
8460#endif /* QETH_IPV6 */
8461
8462}
8463
8464#ifdef CONFIG_QETH_IPV6
8465static int
8466qeth_ipv6_init(void)
8467{
8468 qeth_old_arp_constructor = arp_tbl.constructor;
8469 write_lock(&arp_tbl.lock);
8470 arp_tbl.constructor = qeth_arp_constructor;
8471 write_unlock(&arp_tbl.lock);
8472
8473 arp_direct_ops = (struct neigh_ops*)
8474 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
8475 if (!arp_direct_ops)
8476 return -ENOMEM;
8477
8478 memcpy(arp_direct_ops, &arp_direct_ops_template,
8479 sizeof(struct neigh_ops));
8480
8481 return 0;
8482}
8483
8484static void
8485qeth_ipv6_uninit(void)
8486{
8487 write_lock(&arp_tbl.lock);
8488 arp_tbl.constructor = qeth_old_arp_constructor;
8489 write_unlock(&arp_tbl.lock);
8490 kfree(arp_direct_ops);
8491}
8492#endif /* CONFIG_QETH_IPV6 */
8493
8494static void
8495qeth_sysfs_unregister(void)
8496{
8497 qeth_remove_driver_attributes();
8498 ccw_driver_unregister(&qeth_ccw_driver);
8499 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8500 s390_root_dev_unregister(qeth_root_dev);
8501}
8502/**
8503 * register qeth at sysfs
8504 */
8505static int
8506qeth_sysfs_register(void)
8507{
8508 int rc=0;
8509
8510 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
8511 if (rc)
8512 return rc;
8513 rc = ccw_driver_register(&qeth_ccw_driver);
8514 if (rc)
8515 return rc;
8516 rc = qeth_create_driver_attributes();
8517 if (rc)
8518 return rc;
8519 qeth_root_dev = s390_root_dev_register("qeth");
8520 if (IS_ERR(qeth_root_dev)) {
8521 rc = PTR_ERR(qeth_root_dev);
8522 return rc;
8523 }
8524 return 0;
8525}
8526
8527/***
8528 * init function
8529 */
8530static int __init
8531qeth_init(void)
8532{
8533 int rc=0;
8534
1da177e4
LT
8535 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
8536 version, VERSION_QETH_C, VERSION_QETH_H,
8537 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
8538 VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
8539 VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
8540 QETH_VERSION_VLAN);
8541
8542 INIT_LIST_HEAD(&qeth_card_list.list);
8543 INIT_LIST_HEAD(&qeth_notify_list);
8544 spin_lock_init(&qeth_notify_lock);
8545 rwlock_init(&qeth_card_list.rwlock);
8546
8547 if (qeth_register_dbf_views())
8548 goto out_err;
8549 if (qeth_sysfs_register())
8550 goto out_sysfs;
8551
8552#ifdef CONFIG_QETH_IPV6
8553 if (qeth_ipv6_init()) {
8554 PRINT_ERR("Out of memory during ipv6 init.\n");
8555 goto out_sysfs;
8556 }
8557#endif /* QETH_IPV6 */
8558 if (qeth_register_notifiers())
8559 goto out_ipv6;
8560 if (qeth_create_procfs_entries())
8561 goto out_notifiers;
8562
8563 return rc;
8564
8565out_notifiers:
8566 qeth_unregister_notifiers();
8567out_ipv6:
8568#ifdef CONFIG_QETH_IPV6
8569 qeth_ipv6_uninit();
8570#endif /* QETH_IPV6 */
8571out_sysfs:
8572 qeth_sysfs_unregister();
8573 qeth_unregister_dbf_views();
8574out_err:
8575 PRINT_ERR("Initialization failed");
8576 return rc;
8577}
8578
8579static void
8580__exit qeth_exit(void)
8581{
8582 struct qeth_card *card, *tmp;
8583 unsigned long flags;
8584
8585 QETH_DBF_TEXT(trace,1, "cleanup.");
8586
8587 /*
8588 * Weed would not need to clean up our devices here, because the
8589 * common device layer calls qeth_remove_device for each device
8590 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8591 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8592 * qeth_remove_device called by the common device layer would otherwise
8593 * do a "hard" shutdown (card->use_hard_stop is set to one in
8594 * qeth_remove_device).
8595 */
8596again:
8597 read_lock_irqsave(&qeth_card_list.rwlock, flags);
8598 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
8599 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8600 qeth_set_offline(card->gdev);
8601 qeth_remove_device(card->gdev);
8602 goto again;
8603 }
8604 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8605#ifdef CONFIG_QETH_IPV6
8606 qeth_ipv6_uninit();
8607#endif
8608 qeth_unregister_notifiers();
8609 qeth_remove_procfs_entries();
8610 qeth_sysfs_unregister();
8611 qeth_unregister_dbf_views();
8612 printk("qeth: removed\n");
8613}
8614
500f83ab
UB
8615EXPORT_SYMBOL(qeth_osn_register);
8616EXPORT_SYMBOL(qeth_osn_deregister);
8617EXPORT_SYMBOL(qeth_osn_assist);
1da177e4
LT
8618module_init(qeth_init);
8619module_exit(qeth_exit);
8620MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
8621MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8622 "Copyright 2000,2003 IBM Corporation\n");
8623
8624MODULE_LICENSE("GPL");