2 * CTC / ESCON network driver
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen R鐬rig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen R鐬rig (roehrig@de.ibm.com)
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/bitops.h>
50 #include <linux/signal.h>
51 #include <linux/string.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
61 #include <asm/ccwdev.h>
62 #include <asm/ccwgroup.h>
63 #include <asm/uaccess.h>
65 #include <asm/idals.h>
73 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
74 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
75 MODULE_LICENSE("GPL");
77 * States of the interface statemachine.
81 DEV_STATE_STARTWAIT_RXTX,
82 DEV_STATE_STARTWAIT_RX,
83 DEV_STATE_STARTWAIT_TX,
84 DEV_STATE_STOPWAIT_RXTX,
85 DEV_STATE_STOPWAIT_RX,
86 DEV_STATE_STOPWAIT_TX,
89 * MUST be always the last element!!
94 static const char *dev_state_names[] = {
106 * Events of the interface statemachine.
117 * MUST be always the last element!!
122 static const char *dev_event_names[] = {
133 * Events of the channel statemachine
137 * Events, representing return code of
138 * I/O operations (ccw_device_start, ccw_device_halt et al.)
151 * Events, representing unit-check
155 CH_EVENT_UC_TXTIMEOUT,
156 CH_EVENT_UC_TXPARITY,
158 CH_EVENT_UC_RXPARITY,
163 * Events, representing subchannel-check
168 * Events, representing machine checks
174 * Event, representing normal IRQ
180 * Event, representing timer expiry.
185 * Events, representing commands from upper levels.
191 * MUST be always the last element!!
197 * States of the channel statemachine.
201 * Channel not assigned to any device,
202 * initial state, direction invalid
207 * Channel assigned but not operating
226 * MUST be always the last element!!
231 static int loglevel = CTC_LOGLEVEL_DEFAULT;
234 * Linked list of all detected channels.
236 static struct channel *channels = NULL;
244 static int printed = 0;
249 printk(KERN_INFO "CTC driver initialized\n");
254 * Return type of a detected device.
256 static enum channel_types
257 get_channel_type(struct ccw_device_id *id)
259 enum channel_types type = (enum channel_types) id->driver_info;
261 if (type == channel_type_ficon)
262 type = channel_type_escon;
267 static const char *ch_event_names[] = {
268 "ccw_device success",
272 "ccw_device unknown",
274 "Status ATTN & BUSY",
278 "Unit check remote reset",
279 "Unit check remote system reset",
280 "Unit check TX timeout",
281 "Unit check TX parity",
282 "Unit check Hardware failure",
283 "Unit check RX parity",
285 "Unit check Unknown",
287 "SubChannel check Unknown",
289 "Machine check failure",
290 "Machine check operational",
301 static const char *ch_state_names[] = {
322 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
324 * @param skb The sk_buff to dump.
325 * @param offset Offset relative to skb-data, where to start the dump.
328 ctc_dump_skb(struct sk_buff *skb, int offset)
330 unsigned char *p = skb->data;
332 struct ll_header *header;
335 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
340 header = (struct ll_header *) p;
343 printk(KERN_DEBUG "dump:\n");
344 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
346 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
348 printk(KERN_DEBUG "h->type=%04x\n", header->type);
349 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
352 printk(KERN_DEBUG "data: ");
353 for (i = 0; i < bl; i++)
354 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
359 ctc_dump_skb(struct sk_buff *skb, int offset)
365 * Unpack a just received skb and hand it over to
368 * @param ch The channel where this skb has been received.
369 * @param pskb The received skb.
372 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374 struct net_device *dev = ch->netdev;
375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
376 __u16 len = *((__u16 *) pskb->data);
378 DBF_TEXT(trace, 4, __FUNCTION__);
379 skb_put(pskb, 2 + LL_HEADER_LENGTH);
382 pskb->ip_summed = CHECKSUM_UNNECESSARY;
385 struct ll_header *header = (struct ll_header *) pskb->data;
387 skb_pull(pskb, LL_HEADER_LENGTH);
388 if ((ch->protocol == CTC_PROTO_S390) &&
389 (header->type != ETH_P_IP)) {
392 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
395 * Check packet type only if we stick strictly
396 * to S/390's protocol of OS390. This only
397 * supports IP. Otherwise allow any packet
401 "%s Illegal packet type 0x%04x received, dropping\n",
402 dev->name, header->type);
403 ch->logflags |= LOG_FLAG_ILLEGALPKT;
408 ctc_dump_skb(pskb, -6);
410 privptr->stats.rx_dropped++;
411 privptr->stats.rx_frame_errors++;
414 pskb->protocol = ntohs(header->type);
415 if (header->length <= LL_HEADER_LENGTH) {
417 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
420 "%s Illegal packet size %d "
421 "received (MTU=%d blocklen=%d), "
422 "dropping\n", dev->name, header->length,
424 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
429 ctc_dump_skb(pskb, -6);
431 privptr->stats.rx_dropped++;
432 privptr->stats.rx_length_errors++;
435 header->length -= LL_HEADER_LENGTH;
436 len -= LL_HEADER_LENGTH;
437 if ((header->length > skb_tailroom(pskb)) ||
438 (header->length > len)) {
440 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
443 "%s Illegal packet size %d "
444 "(beyond the end of received data), "
445 "dropping\n", dev->name, header->length);
446 ch->logflags |= LOG_FLAG_OVERRUN;
451 ctc_dump_skb(pskb, -6);
453 privptr->stats.rx_dropped++;
454 privptr->stats.rx_length_errors++;
457 skb_put(pskb, header->length);
458 skb_reset_mac_header(pskb);
459 len -= header->length;
460 skb = dev_alloc_skb(pskb->len);
463 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
466 "%s Out of memory in ctc_unpack_skb\n",
468 ch->logflags |= LOG_FLAG_NOMEM;
472 privptr->stats.rx_dropped++;
475 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
476 skb_reset_mac_header(skb);
477 skb->dev = pskb->dev;
478 skb->protocol = pskb->protocol;
479 pskb->ip_summed = CHECKSUM_UNNECESSARY;
482 * Successful rx; reset logflags
485 dev->last_rx = jiffies;
486 privptr->stats.rx_packets++;
487 privptr->stats.rx_bytes += skb->len;
489 skb_pull(pskb, header->length);
490 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
492 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
495 "%s Overrun in ctc_unpack_skb\n",
497 ch->logflags |= LOG_FLAG_OVERRUN;
503 skb_put(pskb, LL_HEADER_LENGTH);
509 * Check return code of a preceeding ccw_device call, halt_IO etc...
511 * @param ch The channel, the error belongs to.
512 * @param return_code The error code to inspect.
515 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517 DBF_TEXT(trace, 5, __FUNCTION__);
518 switch (return_code) {
520 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
523 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
524 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
527 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
529 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
532 ctc_pr_emerg("%s (%s): Status pending... \n",
534 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
537 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
538 ch->id, msg, return_code);
539 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
544 * Check sense of a unit check.
546 * @param ch The channel, the sense code belongs to.
547 * @param sense The sense code to inspect.
550 ccw_unit_check(struct channel *ch, unsigned char sense)
552 DBF_TEXT(trace, 5, __FUNCTION__);
553 if (sense & SNS0_INTERVENTION_REQ) {
555 ctc_pr_debug("%s: Interface disc. or Sel. reset "
556 "(remote)\n", ch->id);
557 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
559 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
560 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
562 } else if (sense & SNS0_EQUIPMENT_CHECK) {
563 if (sense & SNS0_BUS_OUT_CHECK) {
564 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
566 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
568 ctc_pr_warn("%s: Read-data parity error (remote)\n",
570 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
572 } else if (sense & SNS0_BUS_OUT_CHECK) {
574 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
575 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
577 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
578 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
580 } else if (sense & SNS0_CMD_REJECT) {
581 ctc_pr_warn("%s: Command reject\n", ch->id);
582 } else if (sense == 0) {
583 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
584 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
586 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
588 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
593 ctc_purge_skb_queue(struct sk_buff_head *q)
597 DBF_TEXT(trace, 5, __FUNCTION__);
599 while ((skb = skb_dequeue(q))) {
600 atomic_dec(&skb->users);
601 dev_kfree_skb_irq(skb);
606 ctc_checkalloc_buffer(struct channel *ch, int warn)
608 DBF_TEXT(trace, 5, __FUNCTION__);
609 if ((ch->trans_skb == NULL) ||
610 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
611 if (ch->trans_skb != NULL)
612 dev_kfree_skb(ch->trans_skb);
613 clear_normalized_cda(&ch->ccw[1]);
614 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
615 GFP_ATOMIC | GFP_DMA);
616 if (ch->trans_skb == NULL) {
619 "%s: Couldn't alloc %s trans_skb\n",
621 (CHANNEL_DIRECTION(ch->flags) == READ) ?
625 ch->ccw[1].count = ch->max_bufsize;
626 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
627 dev_kfree_skb(ch->trans_skb);
628 ch->trans_skb = NULL;
631 "%s: set_normalized_cda for %s "
632 "trans_skb failed, dropping packets\n",
634 (CHANNEL_DIRECTION(ch->flags) == READ) ?
638 ch->ccw[1].count = 0;
639 ch->trans_skb_data = ch->trans_skb->data;
640 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
646 * Dummy NOP action for statemachines
649 fsm_action_nop(fsm_instance * fi, int event, void *arg)
654 * Actions for channel - statemachines.
655 *****************************************************************************/
658 * Normal data has been send. Free the corresponding
659 * skb (it's in io_queue), reset dev->tbusy and
660 * revert to idle state.
662 * @param fi An instance of a channel statemachine.
663 * @param event The event, just happened.
664 * @param arg Generic pointer, casted from channel * upon call.
667 ch_action_txdone(fsm_instance * fi, int event, void *arg)
669 struct channel *ch = (struct channel *) arg;
670 struct net_device *dev = ch->netdev;
671 struct ctc_priv *privptr = dev->priv;
675 unsigned long duration;
676 struct timespec done_stamp = xtime;
678 DBF_TEXT(trace, 4, __FUNCTION__);
681 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
682 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
683 if (duration > ch->prof.tx_time)
684 ch->prof.tx_time = duration;
686 if (ch->irb->scsw.count != 0)
687 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
688 dev->name, ch->irb->scsw.count);
689 fsm_deltimer(&ch->timer);
690 while ((skb = skb_dequeue(&ch->io_queue))) {
691 privptr->stats.tx_packets++;
692 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
694 privptr->stats.tx_bytes += 2;
697 atomic_dec(&skb->users);
698 dev_kfree_skb_irq(skb);
700 spin_lock(&ch->collect_lock);
701 clear_normalized_cda(&ch->ccw[4]);
702 if (ch->collect_len > 0) {
705 if (ctc_checkalloc_buffer(ch, 1)) {
706 spin_unlock(&ch->collect_lock);
709 ch->trans_skb->data = ch->trans_skb_data;
710 skb_reset_tail_pointer(ch->trans_skb);
711 ch->trans_skb->len = 0;
712 if (ch->prof.maxmulti < (ch->collect_len + 2))
713 ch->prof.maxmulti = ch->collect_len + 2;
714 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
715 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
716 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
718 while ((skb = skb_dequeue(&ch->collect_queue))) {
719 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
721 privptr->stats.tx_packets++;
722 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
723 atomic_dec(&skb->users);
724 dev_kfree_skb_irq(skb);
728 spin_unlock(&ch->collect_lock);
729 ch->ccw[1].count = ch->trans_skb->len;
730 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
731 ch->prof.send_stamp = xtime;
732 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
733 (unsigned long) ch, 0xff, 0);
734 ch->prof.doios_multi++;
736 privptr->stats.tx_dropped += i;
737 privptr->stats.tx_errors += i;
738 fsm_deltimer(&ch->timer);
739 ccw_check_return_code(ch, rc, "chained TX");
742 spin_unlock(&ch->collect_lock);
743 fsm_newstate(fi, CH_STATE_TXIDLE);
749 * Initial data is sent.
750 * Notify device statemachine that we are up and
753 * @param fi An instance of a channel statemachine.
754 * @param event The event, just happened.
755 * @param arg Generic pointer, casted from channel * upon call.
758 ch_action_txidle(fsm_instance * fi, int event, void *arg)
760 struct channel *ch = (struct channel *) arg;
762 DBF_TEXT(trace, 4, __FUNCTION__);
763 fsm_deltimer(&ch->timer);
764 fsm_newstate(fi, CH_STATE_TXIDLE);
765 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
770 * Got normal data, check for sanity, queue it up, allocate new buffer
771 * trigger bottom half, and initiate next read.
773 * @param fi An instance of a channel statemachine.
774 * @param event The event, just happened.
775 * @param arg Generic pointer, casted from channel * upon call.
778 ch_action_rx(fsm_instance * fi, int event, void *arg)
780 struct channel *ch = (struct channel *) arg;
781 struct net_device *dev = ch->netdev;
782 struct ctc_priv *privptr = dev->priv;
783 int len = ch->max_bufsize - ch->irb->scsw.count;
784 struct sk_buff *skb = ch->trans_skb;
785 __u16 block_len = *((__u16 *) skb->data);
789 DBF_TEXT(trace, 4, __FUNCTION__);
790 fsm_deltimer(&ch->timer);
792 ctc_pr_debug("%s: got packet with length %d < 8\n",
794 privptr->stats.rx_dropped++;
795 privptr->stats.rx_length_errors++;
798 if (len > ch->max_bufsize) {
799 ctc_pr_debug("%s: got packet with length %d > %d\n",
800 dev->name, len, ch->max_bufsize);
801 privptr->stats.rx_dropped++;
802 privptr->stats.rx_length_errors++;
807 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
809 switch (ch->protocol) {
811 case CTC_PROTO_OS390:
812 check_len = block_len + 2;
815 check_len = block_len;
818 if ((len < block_len) || (len > check_len)) {
819 ctc_pr_debug("%s: got block length %d != rx length %d\n",
820 dev->name, block_len, len);
822 ctc_dump_skb(skb, 0);
824 *((__u16 *) skb->data) = len;
825 privptr->stats.rx_dropped++;
826 privptr->stats.rx_length_errors++;
831 *((__u16 *) skb->data) = block_len;
832 ctc_unpack_skb(ch, skb);
835 skb->data = ch->trans_skb_data;
836 skb_reset_tail_pointer(skb);
838 if (ctc_checkalloc_buffer(ch, 1))
840 ch->ccw[1].count = ch->max_bufsize;
841 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
843 ccw_check_return_code(ch, rc, "normal RX");
846 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
849 * Initialize connection by sending a __u16 of value 0.
851 * @param fi An instance of a channel statemachine.
852 * @param event The event, just happened.
853 * @param arg Generic pointer, casted from channel * upon call.
856 ch_action_firstio(fsm_instance * fi, int event, void *arg)
858 struct channel *ch = (struct channel *) arg;
861 DBF_TEXT(trace, 4, __FUNCTION__);
863 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
864 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
865 fsm_deltimer(&ch->timer);
866 if (ctc_checkalloc_buffer(ch, 1))
868 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
869 (ch->protocol == CTC_PROTO_OS390)) {
870 /* OS/390 resp. z/OS */
871 if (CHANNEL_DIRECTION(ch->flags) == READ) {
872 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
873 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
875 ch_action_rxidle(fi, event, arg);
877 struct net_device *dev = ch->netdev;
878 fsm_newstate(fi, CH_STATE_TXIDLE);
879 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
880 DEV_EVENT_TXUP, dev);
886 * Don愒 setup a timer for receiving the initial RX frame
887 * if in compatibility mode, since VM TCP delays the initial
888 * frame until it has some data to send.
890 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
891 (ch->protocol != CTC_PROTO_S390))
892 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
894 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
895 ch->ccw[1].count = 2; /* Transfer only length */
897 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
898 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
899 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
901 fsm_deltimer(&ch->timer);
902 fsm_newstate(fi, CH_STATE_SETUPWAIT);
903 ccw_check_return_code(ch, rc, "init IO");
906 * If in compatibility mode since we don愒 setup a timer, we
907 * also signal RX channel up immediately. This enables us
908 * to send packets early which in turn usually triggers some
909 * reply from VM TCP which brings up the RX channel to it愀
912 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
913 (ch->protocol == CTC_PROTO_S390)) {
914 struct net_device *dev = ch->netdev;
915 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
921 * Got initial data, check it. If OK,
922 * notify device statemachine that we are up and
925 * @param fi An instance of a channel statemachine.
926 * @param event The event, just happened.
927 * @param arg Generic pointer, casted from channel * upon call.
930 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
932 struct channel *ch = (struct channel *) arg;
933 struct net_device *dev = ch->netdev;
937 DBF_TEXT(trace, 4, __FUNCTION__);
938 fsm_deltimer(&ch->timer);
939 buflen = *((__u16 *) ch->trans_skb->data);
941 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
943 if (buflen >= CTC_INITIAL_BLOCKLEN) {
944 if (ctc_checkalloc_buffer(ch, 1))
946 ch->ccw[1].count = ch->max_bufsize;
947 fsm_newstate(fi, CH_STATE_RXIDLE);
948 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
949 (unsigned long) ch, 0xff, 0);
951 fsm_newstate(fi, CH_STATE_RXINIT);
952 ccw_check_return_code(ch, rc, "initial RX");
954 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
955 DEV_EVENT_RXUP, dev);
957 ctc_pr_debug("%s: Initial RX count %d not %d\n",
958 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
959 ch_action_firstio(fi, event, arg);
964 * Set channel into extended mode.
966 * @param fi An instance of a channel statemachine.
967 * @param event The event, just happened.
968 * @param arg Generic pointer, casted from channel * upon call.
971 ch_action_setmode(fsm_instance * fi, int event, void *arg)
973 struct channel *ch = (struct channel *) arg;
975 unsigned long saveflags;
977 DBF_TEXT(trace, 4, __FUNCTION__);
978 fsm_deltimer(&ch->timer);
979 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
980 fsm_newstate(fi, CH_STATE_SETUPWAIT);
981 saveflags = 0; /* avoids compiler warning with
982 spin_unlock_irqrestore */
983 if (event == CH_EVENT_TIMER) // only for timer not yet locked
984 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
985 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
986 if (event == CH_EVENT_TIMER)
987 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
989 fsm_deltimer(&ch->timer);
990 fsm_newstate(fi, CH_STATE_STARTWAIT);
991 ccw_check_return_code(ch, rc, "set Mode");
999 * @param fi An instance of a channel statemachine.
1000 * @param event The event, just happened.
1001 * @param arg Generic pointer, casted from channel * upon call.
1004 ch_action_start(fsm_instance * fi, int event, void *arg)
1006 struct channel *ch = (struct channel *) arg;
1007 unsigned long saveflags;
1009 struct net_device *dev;
1011 DBF_TEXT(trace, 4, __FUNCTION__);
1013 ctc_pr_warn("ch_action_start ch=NULL\n");
1016 if (ch->netdev == NULL) {
1017 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1023 ctc_pr_debug("%s: %s channel start\n", dev->name,
1024 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1027 if (ch->trans_skb != NULL) {
1028 clear_normalized_cda(&ch->ccw[1]);
1029 dev_kfree_skb(ch->trans_skb);
1030 ch->trans_skb = NULL;
1032 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1033 ch->ccw[1].cmd_code = CCW_CMD_READ;
1034 ch->ccw[1].flags = CCW_FLAG_SLI;
1035 ch->ccw[1].count = 0;
1037 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1038 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1039 ch->ccw[1].count = 0;
1041 if (ctc_checkalloc_buffer(ch, 0)) {
1043 "%s: Could not allocate %s trans_skb, delaying "
1044 "allocation until first transfer\n",
1046 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1049 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1050 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1051 ch->ccw[0].count = 0;
1053 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1054 ch->ccw[2].flags = CCW_FLAG_SLI;
1055 ch->ccw[2].count = 0;
1057 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1059 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1061 fsm_newstate(fi, CH_STATE_STARTWAIT);
1062 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1063 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1064 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1065 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1068 fsm_deltimer(&ch->timer);
1069 ccw_check_return_code(ch, rc, "initial HaltIO");
1072 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1077 * Shutdown a channel.
1079 * @param fi An instance of a channel statemachine.
1080 * @param event The event, just happened.
1081 * @param arg Generic pointer, casted from channel * upon call.
1084 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1086 struct channel *ch = (struct channel *) arg;
1087 unsigned long saveflags;
1091 DBF_TEXT(trace, 3, __FUNCTION__);
1092 fsm_deltimer(&ch->timer);
1093 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1094 saveflags = 0; /* avoids comp warning with
1095 spin_unlock_irqrestore */
1096 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1097 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1098 oldstate = fsm_getstate(fi);
1099 fsm_newstate(fi, CH_STATE_TERM);
1100 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1101 if (event == CH_EVENT_STOP)
1102 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1105 fsm_deltimer(&ch->timer);
1106 fsm_newstate(fi, oldstate);
1108 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1113 * A channel has successfully been halted.
1114 * Cleanup it's queue and notify interface statemachine.
1116 * @param fi An instance of a channel statemachine.
1117 * @param event The event, just happened.
1118 * @param arg Generic pointer, casted from channel * upon call.
1121 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1123 struct channel *ch = (struct channel *) arg;
1124 struct net_device *dev = ch->netdev;
1126 DBF_TEXT(trace, 3, __FUNCTION__);
1127 fsm_deltimer(&ch->timer);
1128 fsm_newstate(fi, CH_STATE_STOPPED);
1129 if (ch->trans_skb != NULL) {
1130 clear_normalized_cda(&ch->ccw[1]);
1131 dev_kfree_skb(ch->trans_skb);
1132 ch->trans_skb = NULL;
1134 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1135 skb_queue_purge(&ch->io_queue);
1136 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1137 DEV_EVENT_RXDOWN, dev);
1139 ctc_purge_skb_queue(&ch->io_queue);
1140 spin_lock(&ch->collect_lock);
1141 ctc_purge_skb_queue(&ch->collect_queue);
1142 ch->collect_len = 0;
1143 spin_unlock(&ch->collect_lock);
1144 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1145 DEV_EVENT_TXDOWN, dev);
1150 * A stop command from device statemachine arrived and we are in
1151 * not operational mode. Set state to stopped.
1153 * @param fi An instance of a channel statemachine.
1154 * @param event The event, just happened.
1155 * @param arg Generic pointer, casted from channel * upon call.
1158 ch_action_stop(fsm_instance * fi, int event, void *arg)
1160 fsm_newstate(fi, CH_STATE_STOPPED);
1164 * A machine check for no path, not operational status or gone device has
1166 * Cleanup queue and notify interface statemachine.
1168 * @param fi An instance of a channel statemachine.
1169 * @param event The event, just happened.
1170 * @param arg Generic pointer, casted from channel * upon call.
1173 ch_action_fail(fsm_instance * fi, int event, void *arg)
1175 struct channel *ch = (struct channel *) arg;
1176 struct net_device *dev = ch->netdev;
1178 DBF_TEXT(trace, 3, __FUNCTION__);
1179 fsm_deltimer(&ch->timer);
1180 fsm_newstate(fi, CH_STATE_NOTOP);
1181 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1182 skb_queue_purge(&ch->io_queue);
1183 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1184 DEV_EVENT_RXDOWN, dev);
1186 ctc_purge_skb_queue(&ch->io_queue);
1187 spin_lock(&ch->collect_lock);
1188 ctc_purge_skb_queue(&ch->collect_queue);
1189 ch->collect_len = 0;
1190 spin_unlock(&ch->collect_lock);
1191 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1192 DEV_EVENT_TXDOWN, dev);
1197 * Handle error during setup of channel.
1199 * @param fi An instance of a channel statemachine.
1200 * @param event The event, just happened.
1201 * @param arg Generic pointer, casted from channel * upon call.
1204 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1206 struct channel *ch = (struct channel *) arg;
1207 struct net_device *dev = ch->netdev;
1209 DBF_TEXT(setup, 3, __FUNCTION__);
1211 * Special case: Got UC_RCRESET on setmode.
1212 * This means that remote side isn't setup. In this case
1213 * simply retry after some 10 secs...
1215 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1216 ((event == CH_EVENT_UC_RCRESET) ||
1217 (event == CH_EVENT_UC_RSRESET))) {
1218 fsm_newstate(fi, CH_STATE_STARTRETRY);
1219 fsm_deltimer(&ch->timer);
1220 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1221 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1222 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1224 ccw_check_return_code(
1225 ch, rc, "HaltIO in ch_action_setuperr");
1230 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1231 dev->name, ch_event_names[event],
1232 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1233 fsm_getstate_str(fi));
1234 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1235 fsm_newstate(fi, CH_STATE_RXERR);
1236 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1237 DEV_EVENT_RXDOWN, dev);
1239 fsm_newstate(fi, CH_STATE_TXERR);
1240 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1241 DEV_EVENT_TXDOWN, dev);
1246 * Restart a channel after an error.
1248 * @param fi An instance of a channel statemachine.
1249 * @param event The event, just happened.
1250 * @param arg Generic pointer, casted from channel * upon call.
1253 ch_action_restart(fsm_instance * fi, int event, void *arg)
1255 unsigned long saveflags;
1259 struct channel *ch = (struct channel *) arg;
1260 struct net_device *dev = ch->netdev;
1262 DBF_TEXT(trace, 3, __FUNCTION__);
1263 fsm_deltimer(&ch->timer);
1264 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1265 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1266 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1267 oldstate = fsm_getstate(fi);
1268 fsm_newstate(fi, CH_STATE_STARTWAIT);
1269 saveflags = 0; /* avoids compiler warning with
1270 spin_unlock_irqrestore */
1271 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1272 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1273 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1274 if (event == CH_EVENT_TIMER)
1275 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1278 fsm_deltimer(&ch->timer);
1279 fsm_newstate(fi, oldstate);
1281 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1286 * Handle error during RX initial handshake (exchange of
1287 * 0-length block header)
1289 * @param fi An instance of a channel statemachine.
1290 * @param event The event, just happened.
1291 * @param arg Generic pointer, casted from channel * upon call.
1294 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1296 struct channel *ch = (struct channel *) arg;
1297 struct net_device *dev = ch->netdev;
1299 DBF_TEXT(setup, 3, __FUNCTION__);
1300 if (event == CH_EVENT_TIMER) {
1301 fsm_deltimer(&ch->timer);
1302 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1303 if (ch->retry++ < 3)
1304 ch_action_restart(fi, event, arg);
1306 fsm_newstate(fi, CH_STATE_RXERR);
1307 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1308 DEV_EVENT_RXDOWN, dev);
1311 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1315 * Notify device statemachine if we gave up initialization
1318 * @param fi An instance of a channel statemachine.
1319 * @param event The event, just happened.
1320 * @param arg Generic pointer, casted from channel * upon call.
1323 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1325 struct channel *ch = (struct channel *) arg;
1326 struct net_device *dev = ch->netdev;
1328 DBF_TEXT(setup, 3, __FUNCTION__);
1329 fsm_newstate(fi, CH_STATE_RXERR);
1330 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1331 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1332 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1336 * Handle RX Unit check remote reset (remote disconnected)
1338 * @param fi An instance of a channel statemachine.
1339 * @param event The event, just happened.
1340 * @param arg Generic pointer, casted from channel * upon call.
1343 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1345 struct channel *ch = (struct channel *) arg;
1346 struct channel *ch2;
1347 struct net_device *dev = ch->netdev;
1349 DBF_TEXT(trace, 3, __FUNCTION__);
1350 fsm_deltimer(&ch->timer);
1351 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1355 * Notify device statemachine
1357 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1358 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1360 fsm_newstate(fi, CH_STATE_DTERM);
1361 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1362 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1364 ccw_device_halt(ch->cdev, (unsigned long) ch);
1365 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1369 * Handle error during TX channel initialization.
1371 * @param fi An instance of a channel statemachine.
1372 * @param event The event, just happened.
1373 * @param arg Generic pointer, casted from channel * upon call.
1376 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1378 struct channel *ch = (struct channel *) arg;
1379 struct net_device *dev = ch->netdev;
1381 DBF_TEXT(setup, 2, __FUNCTION__);
1382 if (event == CH_EVENT_TIMER) {
1383 fsm_deltimer(&ch->timer);
1384 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1385 if (ch->retry++ < 3)
1386 ch_action_restart(fi, event, arg);
1388 fsm_newstate(fi, CH_STATE_TXERR);
1389 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1390 DEV_EVENT_TXDOWN, dev);
1393 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1397 * Handle TX timeout by retrying operation.
1399 * @param fi An instance of a channel statemachine.
1400 * @param event The event, just happened.
1401 * @param arg Generic pointer, casted from channel * upon call.
1404 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1406 struct channel *ch = (struct channel *) arg;
1407 struct net_device *dev = ch->netdev;
1408 unsigned long saveflags;
1410 DBF_TEXT(trace, 4, __FUNCTION__);
1411 fsm_deltimer(&ch->timer);
1412 if (ch->retry++ > 3) {
1413 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1415 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1416 DEV_EVENT_TXDOWN, dev);
1417 ch_action_restart(fi, event, arg);
1419 struct sk_buff *skb;
1421 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1422 if ((skb = skb_peek(&ch->io_queue))) {
1425 clear_normalized_cda(&ch->ccw[4]);
1426 ch->ccw[4].count = skb->len;
1427 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1429 "%s: IDAL alloc failed, chan restart\n",
1431 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1432 DEV_EVENT_TXDOWN, dev);
1433 ch_action_restart(fi, event, arg);
1436 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1437 saveflags = 0; /* avoids compiler warning with
1438 spin_unlock_irqrestore */
1439 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1440 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1442 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1443 (unsigned long) ch, 0xff, 0);
1444 if (event == CH_EVENT_TIMER)
1445 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1448 fsm_deltimer(&ch->timer);
1449 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1450 ctc_purge_skb_queue(&ch->io_queue);
1458 * Handle fatal errors during an I/O command.
1460 * @param fi An instance of a channel statemachine.
1461 * @param event The event, just happened.
1462 * @param arg Generic pointer, casted from channel * upon call.
1465 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1467 struct channel *ch = (struct channel *) arg;
1468 struct net_device *dev = ch->netdev;
1470 DBF_TEXT(trace, 3, __FUNCTION__);
1471 fsm_deltimer(&ch->timer);
1472 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1473 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1474 fsm_newstate(fi, CH_STATE_RXERR);
1475 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1476 DEV_EVENT_RXDOWN, dev);
1478 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1479 fsm_newstate(fi, CH_STATE_TXERR);
1480 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1481 DEV_EVENT_TXDOWN, dev);
1486 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1488 struct channel *ch = (struct channel *)arg;
1489 struct net_device *dev = ch->netdev;
1490 struct ctc_priv *privptr = dev->priv;
1492 DBF_TEXT(trace, 4, __FUNCTION__);
1493 ch_action_iofatal(fi, event, arg);
1494 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1498 * The statemachine for a channel.
1500 static const fsm_node ch_fsm[] = {
1501 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1502 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1503 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1504 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1506 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1507 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1508 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1509 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1510 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1512 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1513 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1514 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1515 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1516 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1517 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1518 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1520 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1521 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1522 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1523 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1525 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1526 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1527 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1528 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1532 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1533 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1535 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1536 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1537 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1538 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1539 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1540 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1541 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1542 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1543 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1544 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1545 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1547 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1548 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1549 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1550 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1551 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1552 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1553 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1554 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1555 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1557 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1558 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1559 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1560 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1561 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1562 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1563 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1564 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1565 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1567 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1568 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1569 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1570 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1571 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1572 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1573 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1574 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1576 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1577 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1578 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1579 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1580 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1581 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1583 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1584 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1585 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1586 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1587 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1588 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1590 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1591 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1592 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1593 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1594 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1595 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1596 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1597 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1598 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1600 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1601 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1602 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1603 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1606 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1609 * Functions related to setup and device detection.
1610 *****************************************************************************/
1613 less_than(char *id1, char *id2)
1617 for (i = 0; i < 5; i++) {
1621 dev1 = simple_strtoul(id1, &id1, 16);
1622 dev2 = simple_strtoul(id2, &id2, 16);
1624 return (dev1 < dev2);
1628 * Add a new channel to the list of channels.
1629 * Keeps the channel list sorted.
1631 * @param cdev The ccw_device to be added.
1632 * @param type The type class of the new channel.
1634 * @return 0 on success, !0 on error.
1637 add_channel(struct ccw_device *cdev, enum channel_types type)
1639 struct channel **c = &channels;
1642 DBF_TEXT(trace, 2, __FUNCTION__);
1644 (struct channel *) kmalloc(sizeof (struct channel),
1645 GFP_KERNEL)) == NULL) {
1646 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1649 memset(ch, 0, sizeof (struct channel));
1650 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1),
1651 GFP_KERNEL | GFP_DMA)) == NULL) {
1653 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1657 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1660 * "static" ccws are used in the following way:
1662 * ccw[0..2] (Channel program for generic I/O):
1664 * 1: read or write (depending on direction) with fixed
1665 * buffer (idal allocated once when buffer is allocated)
1667 * ccw[3..5] (Channel program for direct write of packets)
1669 * 4: write (idal allocated on every write).
1671 * ccw[6..7] (Channel program for initial channel setup):
1672 * 6: set extended mode
1675 * ch->ccw[0..5] are initialized in ch_action_start because
1676 * the channel's direction is yet unknown here.
1678 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1679 ch->ccw[6].flags = CCW_FLAG_SLI;
1681 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1682 ch->ccw[7].flags = CCW_FLAG_SLI;
1685 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1687 ch->fsm = init_fsm(ch->id, ch_state_names,
1688 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1689 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1690 if (ch->fsm == NULL) {
1691 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1696 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1697 if ((ch->irb = kmalloc(sizeof (struct irb),
1698 GFP_KERNEL)) == NULL) {
1699 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1705 memset(ch->irb, 0, sizeof (struct irb));
1706 while (*c && less_than((*c)->id, ch->id))
1708 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1710 "ctc: add_channel: device %s already in list, "
1711 "using old entry\n", (*c)->id);
1719 spin_lock_init(&ch->collect_lock);
1721 fsm_settimer(ch->fsm, &ch->timer);
1722 skb_queue_head_init(&ch->io_queue);
1723 skb_queue_head_init(&ch->collect_queue);
1730 * Release a specific channel in the channel list.
1732 * @param ch Pointer to channel struct to be released.
1735 channel_free(struct channel *ch)
1737 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1738 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1742 * Remove a specific channel in the channel list.
1744 * @param ch Pointer to channel struct to be released.
1747 channel_remove(struct channel *ch)
1749 struct channel **c = &channels;
1751 DBF_TEXT(trace, 2, __FUNCTION__);
1759 fsm_deltimer(&ch->timer);
1761 clear_normalized_cda(&ch->ccw[4]);
1762 if (ch->trans_skb != NULL) {
1763 clear_normalized_cda(&ch->ccw[1]);
1764 dev_kfree_skb(ch->trans_skb);
1776 * Get a specific channel from the channel list.
1778 * @param type Type of channel we are interested in.
1779 * @param id Id of channel we are interested in.
1780 * @param direction Direction we want to use this channel for.
1782 * @return Pointer to a channel or NULL if no matching channel available.
1784 static struct channel
1786 channel_get(enum channel_types type, char *id, int direction)
1788 struct channel *ch = channels;
1790 DBF_TEXT(trace, 3, __FUNCTION__);
1792 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1793 __func__, id, type);
1796 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1798 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1799 __func__, ch, ch->id, ch->type);
1804 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1805 __func__, ch, ch->id, ch->type);
1808 ctc_pr_warn("ctc: %s(): channel with id %s "
1809 "and type %d not found in channel list\n",
1810 __func__, id, type);
1812 if (ch->flags & CHANNEL_FLAGS_INUSE)
1815 ch->flags |= CHANNEL_FLAGS_INUSE;
1816 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1817 ch->flags |= (direction == WRITE)
1818 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1819 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1826 * Return the channel type by name.
1828 * @param name Name of network interface.
1830 * @return Type class of channel to be used for that interface.
1832 static enum channel_types inline
1833 extract_channel_media(char *name)
1835 enum channel_types ret = channel_type_unknown;
1838 if (strncmp(name, "ctc", 3) == 0)
1839 ret = channel_type_parallel;
1840 if (strncmp(name, "escon", 5) == 0)
1841 ret = channel_type_escon;
1847 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1852 switch (PTR_ERR(irb)) {
1854 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1855 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1856 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1859 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1860 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1861 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1864 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1866 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1867 // CTC_DBF_TEXT(trace, 2, " rc???");
1869 return PTR_ERR(irb);
1875 * @param cdev The ccw_device the interrupt is for.
1876 * @param intparm interruption parameter.
1877 * @param irb interruption response block.
1880 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1883 struct net_device *dev;
1884 struct ctc_priv *priv;
1886 DBF_TEXT(trace, 5, __FUNCTION__);
1887 if (__ctc_check_irb_error(cdev, irb))
1890 /* Check for unsolicited interrupts. */
1891 if (!cdev->dev.driver_data) {
1892 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1893 cdev->dev.bus_id, irb->scsw.cstat,
1898 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1901 /* Try to extract channel from driver data. */
1902 if (priv->channel[READ]->cdev == cdev)
1903 ch = priv->channel[READ];
1904 else if (priv->channel[WRITE]->cdev == cdev)
1905 ch = priv->channel[WRITE];
1907 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1908 "device %s\n", cdev->dev.bus_id);
1912 dev = (struct net_device *) (ch->netdev);
1914 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1915 cdev->dev.bus_id, ch);
1920 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1921 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1924 /* Copy interruption response block. */
1925 memcpy(ch->irb, irb, sizeof(struct irb));
1927 /* Check for good subchannel return code, otherwise error message */
1928 if (ch->irb->scsw.cstat) {
1929 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1930 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1931 dev->name, ch->id, ch->irb->scsw.cstat,
1932 ch->irb->scsw.dstat);
1936 /* Check the reason-code of a unit check */
1937 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1938 ccw_unit_check(ch, ch->irb->ecw[0]);
1941 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1942 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1943 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1945 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1948 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1949 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1952 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1953 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1954 (ch->irb->scsw.stctl ==
1955 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1956 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1958 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1963 * Actions for interface - statemachine.
1964 *****************************************************************************/
1967 * Startup channels by sending CH_EVENT_START to each channel.
1969 * @param fi An instance of an interface statemachine.
1970 * @param event The event, just happened.
1971 * @param arg Generic pointer, casted from struct net_device * upon call.
1974 dev_action_start(fsm_instance * fi, int event, void *arg)
1976 struct net_device *dev = (struct net_device *) arg;
1977 struct ctc_priv *privptr = dev->priv;
1980 DBF_TEXT(setup, 3, __FUNCTION__);
1981 fsm_deltimer(&privptr->restart_timer);
1982 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1983 for (direction = READ; direction <= WRITE; direction++) {
1984 struct channel *ch = privptr->channel[direction];
1985 fsm_event(ch->fsm, CH_EVENT_START, ch);
1990 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1992 * @param fi An instance of an interface statemachine.
1993 * @param event The event, just happened.
1994 * @param arg Generic pointer, casted from struct net_device * upon call.
1997 dev_action_stop(fsm_instance * fi, int event, void *arg)
1999 struct net_device *dev = (struct net_device *) arg;
2000 struct ctc_priv *privptr = dev->priv;
2003 DBF_TEXT(trace, 3, __FUNCTION__);
2004 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2005 for (direction = READ; direction <= WRITE; direction++) {
2006 struct channel *ch = privptr->channel[direction];
2007 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2011 dev_action_restart(fsm_instance *fi, int event, void *arg)
2013 struct net_device *dev = (struct net_device *)arg;
2014 struct ctc_priv *privptr = dev->priv;
2016 DBF_TEXT(trace, 3, __FUNCTION__);
2017 ctc_pr_debug("%s: Restarting\n", dev->name);
2018 dev_action_stop(fi, event, arg);
2019 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2020 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2021 DEV_EVENT_START, dev);
2025 * Called from channel statemachine
2026 * when a channel is up and running.
2028 * @param fi An instance of an interface statemachine.
2029 * @param event The event, just happened.
2030 * @param arg Generic pointer, casted from struct net_device * upon call.
2033 dev_action_chup(fsm_instance * fi, int event, void *arg)
2035 struct net_device *dev = (struct net_device *) arg;
2037 DBF_TEXT(trace, 3, __FUNCTION__);
2038 switch (fsm_getstate(fi)) {
2039 case DEV_STATE_STARTWAIT_RXTX:
2040 if (event == DEV_EVENT_RXUP)
2041 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2043 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2045 case DEV_STATE_STARTWAIT_RX:
2046 if (event == DEV_EVENT_RXUP) {
2047 fsm_newstate(fi, DEV_STATE_RUNNING);
2048 ctc_pr_info("%s: connected with remote side\n",
2050 ctc_clear_busy(dev);
2053 case DEV_STATE_STARTWAIT_TX:
2054 if (event == DEV_EVENT_TXUP) {
2055 fsm_newstate(fi, DEV_STATE_RUNNING);
2056 ctc_pr_info("%s: connected with remote side\n",
2058 ctc_clear_busy(dev);
2061 case DEV_STATE_STOPWAIT_TX:
2062 if (event == DEV_EVENT_RXUP)
2063 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2065 case DEV_STATE_STOPWAIT_RX:
2066 if (event == DEV_EVENT_TXUP)
2067 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2073 * Called from channel statemachine
2074 * when a channel has been shutdown.
2076 * @param fi An instance of an interface statemachine.
2077 * @param event The event, just happened.
2078 * @param arg Generic pointer, casted from struct net_device * upon call.
2081 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2084 DBF_TEXT(trace, 3, __FUNCTION__);
2085 switch (fsm_getstate(fi)) {
2086 case DEV_STATE_RUNNING:
2087 if (event == DEV_EVENT_TXDOWN)
2088 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2090 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2092 case DEV_STATE_STARTWAIT_RX:
2093 if (event == DEV_EVENT_TXDOWN)
2094 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2096 case DEV_STATE_STARTWAIT_TX:
2097 if (event == DEV_EVENT_RXDOWN)
2098 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2100 case DEV_STATE_STOPWAIT_RXTX:
2101 if (event == DEV_EVENT_TXDOWN)
2102 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2104 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2106 case DEV_STATE_STOPWAIT_RX:
2107 if (event == DEV_EVENT_RXDOWN)
2108 fsm_newstate(fi, DEV_STATE_STOPPED);
2110 case DEV_STATE_STOPWAIT_TX:
2111 if (event == DEV_EVENT_TXDOWN)
2112 fsm_newstate(fi, DEV_STATE_STOPPED);
2117 static const fsm_node dev_fsm[] = {
2118 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2120 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2121 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2122 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2123 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2125 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2126 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2127 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2128 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2129 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2131 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2132 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2133 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2134 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2135 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2137 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2138 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2139 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2140 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2141 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2142 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2144 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2145 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2146 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2147 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2148 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2150 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2151 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2152 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2153 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2154 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2156 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2157 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2158 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2159 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2160 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2161 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2164 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2167 * Transmit a packet.
2168 * This is a helper function for ctc_tx().
2170 * @param ch Channel to be used for sending.
2171 * @param skb Pointer to struct sk_buff of packet to send.
2172 * The linklevel header has already been set up
2175 * @return 0 on success, -ERRNO on failure. (Never fails.)
2178 transmit_skb(struct channel *ch, struct sk_buff *skb)
2180 unsigned long saveflags;
2181 struct ll_header header;
2184 DBF_TEXT(trace, 5, __FUNCTION__);
2185 /* we need to acquire the lock for testing the state
2186 * otherwise we can have an IRQ changing the state to
2187 * TXIDLE after the test but before acquiring the lock.
2189 spin_lock_irqsave(&ch->collect_lock, saveflags);
2190 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2191 int l = skb->len + LL_HEADER_LENGTH;
2193 if (ch->collect_len + l > ch->max_bufsize - 2) {
2194 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2197 atomic_inc(&skb->users);
2199 header.type = skb->protocol;
2201 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2203 skb_queue_tail(&ch->collect_queue, skb);
2204 ch->collect_len += l;
2206 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2210 struct sk_buff *nskb;
2212 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2214 * Protect skb against beeing free'd by upper
2217 atomic_inc(&skb->users);
2218 ch->prof.txlen += skb->len;
2219 header.length = skb->len + LL_HEADER_LENGTH;
2220 header.type = skb->protocol;
2222 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2224 block_len = skb->len + 2;
2225 *((__u16 *) skb_push(skb, 2)) = block_len;
2228 * IDAL support in CTC is broken, so we have to
2229 * care about skb's above 2G ourselves.
2231 hi = ((unsigned long)skb_tail_pointer(skb) +
2232 LL_HEADER_LENGTH) >> 31;
2234 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2236 atomic_dec(&skb->users);
2237 skb_pull(skb, LL_HEADER_LENGTH + 2);
2238 ctc_clear_busy(ch->netdev);
2241 memcpy(skb_put(nskb, skb->len),
2242 skb->data, skb->len);
2243 atomic_inc(&nskb->users);
2244 atomic_dec(&skb->users);
2245 dev_kfree_skb_irq(skb);
2250 ch->ccw[4].count = block_len;
2251 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2253 * idal allocation failed, try via copying to
2254 * trans_skb. trans_skb usually has a pre-allocated
2257 if (ctc_checkalloc_buffer(ch, 1)) {
2259 * Remove our header. It gets added
2260 * again on retransmit.
2262 atomic_dec(&skb->users);
2263 skb_pull(skb, LL_HEADER_LENGTH + 2);
2264 ctc_clear_busy(ch->netdev);
2268 skb_reset_tail_pointer(ch->trans_skb);
2269 ch->trans_skb->len = 0;
2270 ch->ccw[1].count = skb->len;
2271 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2273 atomic_dec(&skb->users);
2274 dev_kfree_skb_irq(skb);
2277 skb_queue_tail(&ch->io_queue, skb);
2281 fsm_newstate(ch->fsm, CH_STATE_TX);
2282 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2283 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2284 ch->prof.send_stamp = xtime;
2285 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2286 (unsigned long) ch, 0xff, 0);
2287 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2289 ch->prof.doios_single++;
2291 fsm_deltimer(&ch->timer);
2292 ccw_check_return_code(ch, rc, "single skb TX");
2294 skb_dequeue_tail(&ch->io_queue);
2296 * Remove our header. It gets added
2297 * again on retransmit.
2299 skb_pull(skb, LL_HEADER_LENGTH + 2);
2302 struct net_device *dev = ch->netdev;
2303 struct ctc_priv *privptr = dev->priv;
2304 privptr->stats.tx_packets++;
2305 privptr->stats.tx_bytes +=
2306 skb->len - LL_HEADER_LENGTH;
2311 ctc_clear_busy(ch->netdev);
2316 * Interface API for upper network layers
2317 *****************************************************************************/
2320 * Open an interface.
2321 * Called from generic network layer when ifconfig up is run.
2323 * @param dev Pointer to interface struct.
2325 * @return 0 on success, -ERRNO on failure. (Never fails.)
2328 ctc_open(struct net_device * dev)
2330 DBF_TEXT(trace, 5, __FUNCTION__);
2331 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2336 * Close an interface.
2337 * Called from generic network layer when ifconfig down is run.
2339 * @param dev Pointer to interface struct.
2341 * @return 0 on success, -ERRNO on failure. (Never fails.)
2344 ctc_close(struct net_device * dev)
2346 DBF_TEXT(trace, 5, __FUNCTION__);
2347 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2352 * Start transmission of a packet.
2353 * Called from generic network device layer.
2355 * @param skb Pointer to buffer containing the packet.
2356 * @param dev Pointer to interface struct.
2358 * @return 0 if packet consumed, !0 if packet rejected.
2359 * Note: If we return !0, then the packet is free'd by
2360 * the generic network layer.
2363 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2366 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2368 DBF_TEXT(trace, 5, __FUNCTION__);
2370 * Some sanity checks ...
2373 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2374 privptr->stats.tx_dropped++;
2377 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2378 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2379 dev->name, LL_HEADER_LENGTH + 2);
2381 privptr->stats.tx_dropped++;
2386 * If channels are not running, try to restart them
2387 * and throw away packet.
2389 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2390 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2392 privptr->stats.tx_dropped++;
2393 privptr->stats.tx_errors++;
2394 privptr->stats.tx_carrier_errors++;
2398 if (ctc_test_and_set_busy(dev))
2401 dev->trans_start = jiffies;
2402 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2408 * Sets MTU of an interface.
2410 * @param dev Pointer to interface struct.
2411 * @param new_mtu The new MTU to use for this interface.
2413 * @return 0 on success, -EINVAL if MTU is out of valid range.
2414 * (valid range is 576 .. 65527). If VM is on the
2415 * remote side, maximum MTU is 32760, however this is
2416 * <em>not</em> checked here.
2419 ctc_change_mtu(struct net_device * dev, int new_mtu)
2421 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2423 DBF_TEXT(trace, 3, __FUNCTION__);
2424 if ((new_mtu < 576) || (new_mtu > 65527) ||
2425 (new_mtu > (privptr->channel[READ]->max_bufsize -
2426 LL_HEADER_LENGTH - 2)))
2429 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2434 * Returns interface statistics of a device.
2436 * @param dev Pointer to interface struct.
2438 * @return Pointer to stats struct of this interface.
2440 static struct net_device_stats *
2441 ctc_stats(struct net_device * dev)
2443 return &((struct ctc_priv *) dev->priv)->stats;
2451 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2453 struct ctc_priv *priv;
2455 priv = dev->driver_data;
2458 return sprintf(buf, "%d\n",
2463 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2465 struct ctc_priv *priv;
2466 struct net_device *ndev;
2470 DBF_TEXT(trace, 3, __FUNCTION__);
2471 DBF_TEXT(trace, 3, buf);
2472 priv = dev->driver_data;
2474 DBF_TEXT(trace, 3, "bfnopriv");
2478 sscanf(buf, "%u", &bs1);
2479 if (bs1 > CTC_BUFSIZE_LIMIT)
2481 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2483 priv->buffer_size = bs1; // just to overwrite the default
2485 ndev = priv->channel[READ]->netdev;
2487 DBF_TEXT(trace, 3, "bfnondev");
2491 if ((ndev->flags & IFF_RUNNING) &&
2492 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2495 priv->channel[READ]->max_bufsize = bs1;
2496 priv->channel[WRITE]->max_bufsize = bs1;
2497 if (!(ndev->flags & IFF_RUNNING))
2498 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2499 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2500 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2502 sprintf(buffer, "%d",priv->buffer_size);
2503 DBF_TEXT(trace, 3, buffer);
2507 DBF_TEXT(trace, 3, "buff_err");
2512 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2514 return sprintf(buf, "%d\n", loglevel);
2518 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2522 DBF_TEXT(trace, 5, __FUNCTION__);
2523 sscanf(buf, "%i", &ll1);
2525 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2532 ctc_print_statistics(struct ctc_priv *priv)
2537 DBF_TEXT(trace, 4, __FUNCTION__);
2540 sbuf = kmalloc(2048, GFP_KERNEL);
2545 p += sprintf(p, " Device FSM state: %s\n",
2546 fsm_getstate_str(priv->fsm));
2547 p += sprintf(p, " RX channel FSM state: %s\n",
2548 fsm_getstate_str(priv->channel[READ]->fsm));
2549 p += sprintf(p, " TX channel FSM state: %s\n",
2550 fsm_getstate_str(priv->channel[WRITE]->fsm));
2551 p += sprintf(p, " Max. TX buffer used: %ld\n",
2552 priv->channel[WRITE]->prof.maxmulti);
2553 p += sprintf(p, " Max. chained SKBs: %ld\n",
2554 priv->channel[WRITE]->prof.maxcqueue);
2555 p += sprintf(p, " TX single write ops: %ld\n",
2556 priv->channel[WRITE]->prof.doios_single);
2557 p += sprintf(p, " TX multi write ops: %ld\n",
2558 priv->channel[WRITE]->prof.doios_multi);
2559 p += sprintf(p, " Netto bytes written: %ld\n",
2560 priv->channel[WRITE]->prof.txlen);
2561 p += sprintf(p, " Max. TX IO-time: %ld\n",
2562 priv->channel[WRITE]->prof.tx_time);
2564 ctc_pr_debug("Statistics for %s:\n%s",
2565 priv->channel[WRITE]->netdev->name, sbuf);
2571 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2573 struct ctc_priv *priv = dev->driver_data;
2576 ctc_print_statistics(priv);
2577 return sprintf(buf, "0\n");
2581 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2583 struct ctc_priv *priv = dev->driver_data;
2586 /* Reset statistics */
2587 memset(&priv->channel[WRITE]->prof, 0,
2588 sizeof(priv->channel[WRITE]->prof));
2593 ctc_netdev_unregister(struct net_device * dev)
2595 struct ctc_priv *privptr;
2599 privptr = (struct ctc_priv *) dev->priv;
2600 unregister_netdev(dev);
2604 ctc_netdev_register(struct net_device * dev)
2606 return register_netdev(dev);
2610 ctc_free_netdevice(struct net_device * dev, int free_dev)
2612 struct ctc_priv *privptr;
2615 privptr = dev->priv;
2618 kfree_fsm(privptr->fsm);
2628 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2630 struct ctc_priv *priv;
2632 priv = dev->driver_data;
2636 return sprintf(buf, "%d\n", priv->protocol);
2640 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2642 struct ctc_priv *priv;
2645 DBF_TEXT(trace, 3, __FUNCTION__);
2646 pr_debug("%s() called\n", __FUNCTION__);
2648 priv = dev->driver_data;
2651 sscanf(buf, "%u", &value);
2652 if (!((value == CTC_PROTO_S390) ||
2653 (value == CTC_PROTO_LINUX) ||
2654 (value == CTC_PROTO_OS390)))
2656 priv->protocol = value;
2662 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2664 struct ccwgroup_device *cgdev;
2666 cgdev = to_ccwgroupdev(dev);
2670 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2673 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2674 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2675 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2677 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2678 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2680 static struct attribute *ctc_attr[] = {
2681 &dev_attr_protocol.attr,
2682 &dev_attr_type.attr,
2683 &dev_attr_buffer.attr,
2687 static struct attribute_group ctc_attr_group = {
2692 ctc_add_attributes(struct device *dev)
2696 rc = device_create_file(dev, &dev_attr_loglevel);
2699 rc = device_create_file(dev, &dev_attr_stats);
2702 device_remove_file(dev, &dev_attr_loglevel);
2708 ctc_remove_attributes(struct device *dev)
2710 device_remove_file(dev, &dev_attr_stats);
2711 device_remove_file(dev, &dev_attr_loglevel);
2715 ctc_add_files(struct device *dev)
2717 pr_debug("%s() called\n", __FUNCTION__);
2719 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2723 ctc_remove_files(struct device *dev)
2725 pr_debug("%s() called\n", __FUNCTION__);
2727 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2731 * Add ctc specific attributes.
2732 * Add ctc private data.
2734 * @param cgdev pointer to ccwgroup_device just added
2736 * @returns 0 on success, !0 on failure.
2739 ctc_probe_device(struct ccwgroup_device *cgdev)
2741 struct ctc_priv *priv;
2745 pr_debug("%s() called\n", __FUNCTION__);
2746 DBF_TEXT(setup, 3, __FUNCTION__);
2748 if (!get_device(&cgdev->dev))
2751 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
2753 ctc_pr_err("%s: Out of memory\n", __func__);
2754 put_device(&cgdev->dev);
2758 memset(priv, 0, sizeof (struct ctc_priv));
2759 rc = ctc_add_files(&cgdev->dev);
2762 put_device(&cgdev->dev);
2765 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2766 cgdev->cdev[0]->handler = ctc_irq_handler;
2767 cgdev->cdev[1]->handler = ctc_irq_handler;
2768 cgdev->dev.driver_data = priv;
2770 sprintf(buffer, "%p", priv);
2771 DBF_TEXT(data, 3, buffer);
2773 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2774 DBF_TEXT(data, 3, buffer);
2776 sprintf(buffer, "%p", &channels);
2777 DBF_TEXT(data, 3, buffer);
2779 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2780 DBF_TEXT(data, 3, buffer);
2786 * Initialize everything of the net device except the name and the
2789 static struct net_device *
2790 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2791 struct ctc_priv *privptr)
2796 DBF_TEXT(setup, 3, __FUNCTION__);
2799 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2802 memset(dev, 0, sizeof (struct net_device));
2805 dev->priv = privptr;
2806 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2807 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2808 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2809 if (privptr->fsm == NULL) {
2814 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2815 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2817 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2818 dev->hard_start_xmit = ctc_tx;
2819 dev->open = ctc_open;
2820 dev->stop = ctc_close;
2821 dev->get_stats = ctc_stats;
2822 dev->change_mtu = ctc_change_mtu;
2823 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2825 dev->type = ARPHRD_SLIP;
2826 dev->tx_queue_len = 100;
2827 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2828 SET_MODULE_OWNER(dev);
2835 * Setup an interface.
2837 * @param cgdev Device to be setup.
2839 * @returns 0 on success, !0 on failure.
2842 ctc_new_device(struct ccwgroup_device *cgdev)
2844 char read_id[CTC_ID_SIZE];
2845 char write_id[CTC_ID_SIZE];
2847 enum channel_types type;
2848 struct ctc_priv *privptr;
2849 struct net_device *dev;
2853 pr_debug("%s() called\n", __FUNCTION__);
2854 DBF_TEXT(setup, 3, __FUNCTION__);
2856 privptr = cgdev->dev.driver_data;
2860 sprintf(buffer, "%d", privptr->buffer_size);
2861 DBF_TEXT(setup, 3, buffer);
2863 type = get_channel_type(&cgdev->cdev[0]->id);
2865 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2866 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2868 if (add_channel(cgdev->cdev[0], type))
2870 if (add_channel(cgdev->cdev[1], type))
2873 ret = ccw_device_set_online(cgdev->cdev[0]);
2876 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2879 ret = ccw_device_set_online(cgdev->cdev[1]);
2882 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2885 dev = ctc_init_netdevice(NULL, 1, privptr);
2888 ctc_pr_warn("ctc_init_netdevice failed\n");
2892 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2894 for (direction = READ; direction <= WRITE; direction++) {
2895 privptr->channel[direction] =
2896 channel_get(type, direction == READ ? read_id : write_id,
2898 if (privptr->channel[direction] == NULL) {
2899 if (direction == WRITE)
2900 channel_free(privptr->channel[READ]);
2902 ctc_free_netdevice(dev, 1);
2905 privptr->channel[direction]->netdev = dev;
2906 privptr->channel[direction]->protocol = privptr->protocol;
2907 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2910 SET_NETDEV_DEV(dev, &cgdev->dev);
2912 if (ctc_netdev_register(dev) != 0) {
2913 ctc_free_netdevice(dev, 1);
2917 if (ctc_add_attributes(&cgdev->dev)) {
2918 ctc_netdev_unregister(dev);
2920 ctc_free_netdevice(dev, 1);
2924 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2928 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2929 dev->name, privptr->channel[READ]->id,
2930 privptr->channel[WRITE]->id, privptr->protocol);
2934 ccw_device_set_offline(cgdev->cdev[1]);
2935 ccw_device_set_offline(cgdev->cdev[0]);
2941 * Shutdown an interface.
2943 * @param cgdev Device to be shut down.
2945 * @returns 0 on success, !0 on failure.
2948 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2950 struct ctc_priv *priv;
2951 struct net_device *ndev;
2953 DBF_TEXT(setup, 3, __FUNCTION__);
2954 pr_debug("%s() called\n", __FUNCTION__);
2957 priv = cgdev->dev.driver_data;
2962 if (priv->channel[READ]) {
2963 ndev = priv->channel[READ]->netdev;
2965 /* Close the device */
2967 ndev->flags &=~IFF_RUNNING;
2969 ctc_remove_attributes(&cgdev->dev);
2971 channel_free(priv->channel[READ]);
2973 if (priv->channel[WRITE])
2974 channel_free(priv->channel[WRITE]);
2977 ctc_netdev_unregister(ndev);
2979 ctc_free_netdevice(ndev, 1);
2983 kfree_fsm(priv->fsm);
2985 ccw_device_set_offline(cgdev->cdev[1]);
2986 ccw_device_set_offline(cgdev->cdev[0]);
2988 if (priv->channel[READ])
2989 channel_remove(priv->channel[READ]);
2990 if (priv->channel[WRITE])
2991 channel_remove(priv->channel[WRITE]);
2992 priv->channel[READ] = priv->channel[WRITE] = NULL;
2999 ctc_remove_device(struct ccwgroup_device *cgdev)
3001 struct ctc_priv *priv;
3003 pr_debug("%s() called\n", __FUNCTION__);
3004 DBF_TEXT(setup, 3, __FUNCTION__);
3006 priv = cgdev->dev.driver_data;
3009 if (cgdev->state == CCWGROUP_ONLINE)
3010 ctc_shutdown_device(cgdev);
3011 ctc_remove_files(&cgdev->dev);
3012 cgdev->dev.driver_data = NULL;
3014 put_device(&cgdev->dev);
3017 static struct ccwgroup_driver ctc_group_driver = {
3018 .owner = THIS_MODULE,
3021 .driver_id = 0xC3E3C3,
3022 .probe = ctc_probe_device,
3023 .remove = ctc_remove_device,
3024 .set_online = ctc_new_device,
3025 .set_offline = ctc_shutdown_device,
3029 * Module related routines
3030 *****************************************************************************/
3033 * Prepare to be unloaded. Free IRQ's and release all resources.
3034 * This is called just before this module is unloaded. It is
3035 * <em>not</em> called, if the usage count is !0, so we don't need to check
3041 DBF_TEXT(setup, 3, __FUNCTION__);
3042 unregister_cu3088_discipline(&ctc_group_driver);
3043 ctc_unregister_dbf_views();
3044 ctc_pr_info("CTC driver unloaded\n");
3048 * Initialize module.
3049 * This is called just after the module is loaded.
3051 * @return 0 on success, !0 on error.
3058 loglevel = CTC_LOGLEVEL_DEFAULT;
3060 DBF_TEXT(setup, 3, __FUNCTION__);
3064 ret = ctc_register_dbf_views();
3066 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3069 ret = register_cu3088_discipline(&ctc_group_driver);
3071 ctc_unregister_dbf_views();
3076 module_init(ctc_init);
3077 module_exit(ctc_exit);
3079 /* --- This is the END my friend --- */