]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/staging/batman-adv/send.c
Staging: batman-adv: fix rogue packets on shutdown
[net-next-2.6.git] / drivers / staging / batman-adv / send.c
index 2a9fac8c240ed6e21812c46f1d49a17fc70452ef..ac69ed871a7614c36b8bfc1d2bda10cb1792b14b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -36,25 +36,17 @@ static uint8_t hop_penalty(const uint8_t tq)
 }
 
 /* when do we schedule our own packet to be sent */
-static unsigned long own_send_time(void)
+static unsigned long own_send_time(struct bat_priv *bat_priv)
 {
        return jiffies +
-               (((atomic_read(&originator_interval) - JITTER +
+               (((atomic_read(&bat_priv->orig_interval) - JITTER +
                   (random32() % 2*JITTER)) * HZ) / 1000);
 }
 
 /* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(void)
+static unsigned long forward_send_time(struct bat_priv *bat_priv)
 {
-       unsigned long send_time = jiffies; /* Starting now plus... */
-
-       if (atomic_read(&aggregation_enabled))
-               send_time += (((MAX_AGGREGATION_MS - (JITTER/2) +
-                               (random32() % JITTER)) * HZ) / 1000);
-       else
-               send_time += (((random32() % (JITTER/2)) * HZ) / 1000);
-
-       return send_time;
+       return jiffies + (((random32() % (JITTER/2)) * HZ) / 1000);
 }
 
 /* send out an already prepared packet to the given address via the
@@ -65,7 +57,7 @@ int send_skb_packet(struct sk_buff *skb,
 {
        struct ethhdr *ethhdr;
 
-       if (batman_if->if_active != IF_ACTIVE)
+       if (batman_if->if_status != IF_ACTIVE)
                goto send_skb_err;
 
        if (unlikely(!batman_if->net_dev))
@@ -73,7 +65,8 @@ int send_skb_packet(struct sk_buff *skb,
 
        if (!(batman_if->net_dev->flags & IFF_UP)) {
                printk(KERN_WARNING
-                      "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
+                      "batman-adv:Interface %s "
+                      "is not up - can't send packet via that interface!\n",
                       batman_if->dev);
                goto send_skb_err;
        }
@@ -131,10 +124,11 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
        int16_t buff_pos;
        struct batman_packet *batman_packet;
 
-       if (batman_if->if_active != IF_ACTIVE)
+       if (batman_if->if_status != IF_ACTIVE)
                return;
 
-       packet_num = buff_pos = 0;
+       packet_num = 0;
+       buff_pos = 0;
        batman_packet = (struct batman_packet *)
                (forw_packet->packet_buff);
 
@@ -155,9 +149,9 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
                                                            "Sending own" :
                                                            "Forwarding"));
                bat_dbg(DBG_BATMAN,
-                       "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
-                       fwd_str,
-                       (packet_num > 0 ? "aggregated " : ""),
+                       "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
+                       " IDF %s) on interface %s [%s]\n",
+                       fwd_str, (packet_num > 0 ? "aggregated " : ""),
                        batman_packet->orig, ntohs(batman_packet->seqno),
                        batman_packet->tq, batman_packet->ttl,
                        (batman_packet->flags & DIRECTLINK ?
@@ -185,11 +179,12 @@ static void send_packet(struct forw_packet *forw_packet)
        unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
 
        if (!forw_packet->if_incoming) {
-               printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
+               printk(KERN_ERR "batman-adv: Error - can't forward packet: "
+                      "incoming iface not specified\n");
                return;
        }
 
-       if (forw_packet->if_incoming->if_active != IF_ACTIVE)
+       if (forw_packet->if_incoming->if_status != IF_ACTIVE)
                return;
 
        /* multihomed peer assumed */
@@ -199,7 +194,8 @@ static void send_packet(struct forw_packet *forw_packet)
 
                /* FIXME: what about aggregated packets ? */
                bat_dbg(DBG_BATMAN,
-                       "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%s]\n",
+                       "%s packet (originator %pM, seqno %d, TTL %d) "
+                       "on interface %s [%s]\n",
                        (forw_packet->own ? "Sending own" : "Forwarding"),
                        batman_packet->orig, ntohs(batman_packet->seqno),
                        batman_packet->ttl, forw_packet->if_incoming->dev,
@@ -246,9 +242,17 @@ static void rebuild_batman_packet(struct batman_if *batman_if)
 
 void schedule_own_packet(struct batman_if *batman_if)
 {
+       /* FIXME: each batman_if will be attached to a softif */
+       struct bat_priv *bat_priv = netdev_priv(soft_device);
        unsigned long send_time;
        struct batman_packet *batman_packet;
-       int vis_server = atomic_read(&vis_mode);
+       int vis_server;
+
+       if ((batman_if->if_status == IF_NOT_IN_USE) ||
+           (batman_if->if_status == IF_TO_BE_REMOVED))
+               return;
+
+       vis_server = atomic_read(&bat_priv->vis_mode);
 
        /**
         * the interface gets activated here to avoid race conditions between
@@ -257,11 +261,12 @@ void schedule_own_packet(struct batman_if *batman_if)
         * outdated packets (especially uninitialized mac addresses) in the
         * packet queue
         */
-       if (batman_if->if_active == IF_TO_BE_ACTIVATED)
-               batman_if->if_active = IF_ACTIVE;
+       if (batman_if->if_status == IF_TO_BE_ACTIVATED)
+               batman_if->if_status = IF_ACTIVE;
 
        /* if local hna has changed and interface is a primary interface */
-       if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0))
+       if ((atomic_read(&hna_local_changed)) &&
+           (batman_if == bat_priv->primary_if))
                rebuild_batman_packet(batman_if);
 
        /**
@@ -276,15 +281,17 @@ void schedule_own_packet(struct batman_if *batman_if)
        if (vis_server == VIS_TYPE_SERVER_SYNC)
                batman_packet->flags = VIS_SERVER;
        else
-               batman_packet->flags = 0;
+               batman_packet->flags &= ~VIS_SERVER;
 
        /* could be read by receive_bat_packet() */
        atomic_inc(&batman_if->seqno);
 
        slide_own_bcast_window(batman_if);
-       send_time = own_send_time();
-       add_bat_packet_to_list(batman_if->packet_buff,
-                              batman_if->packet_len, batman_if, 1, send_time);
+       send_time = own_send_time(bat_priv);
+       add_bat_packet_to_list(bat_priv,
+                              batman_if->packet_buff,
+                              batman_if->packet_len,
+                              batman_if, 1, send_time);
 }
 
 void schedule_forward_packet(struct orig_node *orig_node,
@@ -293,11 +300,13 @@ void schedule_forward_packet(struct orig_node *orig_node,
                             uint8_t directlink, int hna_buff_len,
                             struct batman_if *if_incoming)
 {
+       /* FIXME: each batman_if will be attached to a softif */
+       struct bat_priv *bat_priv = netdev_priv(soft_device);
        unsigned char in_tq, in_ttl, tq_avg = 0;
        unsigned long send_time;
 
        if (batman_packet->ttl <= 1) {
-               bat_dbg(DBG_BATMAN, "ttl exceeded \n");
+               bat_dbg(DBG_BATMAN, "ttl exceeded\n");
                return;
        }
 
@@ -316,7 +325,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
                        batman_packet->tq = orig_node->router->tq_avg;
 
                        if (orig_node->router->last_ttl)
-                               batman_packet->ttl = orig_node->router->last_ttl - 1;
+                               batman_packet->ttl = orig_node->router->last_ttl
+                                                       - 1;
                }
 
                tq_avg = orig_node->router->tq_avg;
@@ -325,7 +335,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
        /* apply hop penalty */
        batman_packet->tq = hop_penalty(batman_packet->tq);
 
-       bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
+       bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, "
+               "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
                in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
                batman_packet->ttl);
 
@@ -336,8 +347,9 @@ void schedule_forward_packet(struct orig_node *orig_node,
        else
                batman_packet->flags &= ~DIRECTLINK;
 
-       send_time = forward_send_time();
-       add_bat_packet_to_list((unsigned char *)batman_packet,
+       send_time = forward_send_time(bat_priv);
+       add_bat_packet_to_list(bat_priv,
+                              (unsigned char *)batman_packet,
                               sizeof(struct batman_packet) + hna_buff_len,
                               if_incoming, 0, send_time);
 }
@@ -368,19 +380,32 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
                           send_time);
 }
 
-void add_bcast_packet_to_list(struct sk_buff *skb)
+#define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
+/* add a broadcast packet to the queue and setup timers. broadcast packets
+ * are sent multiple times to increase probability for beeing received.
+ *
+ * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
+ * errors.
+ *
+ * The skb is not consumed, so the caller should make sure that the
+ * skb is freed. */
+int add_bcast_packet_to_list(struct sk_buff *skb)
 {
        struct forw_packet *forw_packet;
 
+       if (!atomic_dec_not_zero(&bcast_queue_left)) {
+               bat_dbg(DBG_BATMAN, "bcast packet queue full\n");
+               goto out;
+       }
+
        forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
+
        if (!forw_packet)
-               return;
+               goto out_and_inc;
 
        skb = skb_copy(skb, GFP_ATOMIC);
-       if (!skb) {
-               kfree(forw_packet);
-               return;
-       }
+       if (!skb)
+               goto packet_free;
 
        skb_reset_mac_header(skb);
 
@@ -391,6 +416,14 @@ void add_bcast_packet_to_list(struct sk_buff *skb)
        forw_packet->num_packets = 0;
 
        _add_bcast_packet_to_list(forw_packet, 1);
+       return NETDEV_TX_OK;
+
+packet_free:
+       kfree(forw_packet);
+out_and_inc:
+       atomic_inc(&bcast_queue_left);
+out:
+       return NETDEV_TX_BUSY;
 }
 
 void send_outstanding_bcast_packet(struct work_struct *work)
@@ -407,6 +440,9 @@ void send_outstanding_bcast_packet(struct work_struct *work)
        hlist_del(&forw_packet->list);
        spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
 
+       if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+               goto out;
+
        /* rebroadcast packet */
        rcu_read_lock();
        list_for_each_entry_rcu(batman_if, &if_list, list) {
@@ -420,13 +456,15 @@ void send_outstanding_bcast_packet(struct work_struct *work)
 
        forw_packet->num_packets++;
 
-       /* if we still have some more bcasts to send and we are not shutting
-        * down */
-       if ((forw_packet->num_packets < 3) &&
-           (atomic_read(&module_state) != MODULE_DEACTIVATING))
+       /* if we still have some more bcasts to send */
+       if (forw_packet->num_packets < 3) {
                _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
-       else
-               forw_packet_free(forw_packet);
+               return;
+       }
+
+out:
+       forw_packet_free(forw_packet);
+       atomic_inc(&bcast_queue_left);
 }
 
 void send_outstanding_bat_packet(struct work_struct *work)
@@ -441,6 +479,9 @@ void send_outstanding_bat_packet(struct work_struct *work)
        hlist_del(&forw_packet->list);
        spin_unlock_irqrestore(&forw_bat_list_lock, flags);
 
+       if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+               goto out;
+
        send_packet(forw_packet);
 
        /**
@@ -448,26 +489,42 @@ void send_outstanding_bat_packet(struct work_struct *work)
         * to determine the queues wake up time unless we are
         * shutting down
         */
-       if ((forw_packet->own) &&
-           (atomic_read(&module_state) != MODULE_DEACTIVATING))
+       if (forw_packet->own)
                schedule_own_packet(forw_packet->if_incoming);
 
+out:
+       /* don't count own packet */
+       if (!forw_packet->own)
+               atomic_inc(&batman_queue_left);
+
        forw_packet_free(forw_packet);
 }
 
-void purge_outstanding_packets(void)
+void purge_outstanding_packets(struct batman_if *batman_if)
 {
        struct forw_packet *forw_packet;
        struct hlist_node *tmp_node, *safe_tmp_node;
        unsigned long flags;
 
-       bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
+       if (batman_if)
+               bat_dbg(DBG_BATMAN, "purge_outstanding_packets(): %s\n",
+                       batman_if->dev);
+       else
+               bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
 
        /* free bcast list */
        spin_lock_irqsave(&forw_bcast_list_lock, flags);
        hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
                                  &forw_bcast_list, list) {
 
+               /**
+                * if purge_outstanding_packets() was called with an argmument
+                * we delete only packets belonging to the given interface
+                */
+               if ((batman_if) &&
+                   (forw_packet->if_incoming != batman_if))
+                       continue;
+
                spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
 
                /**
@@ -484,6 +541,14 @@ void purge_outstanding_packets(void)
        hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
                                  &forw_bat_list, list) {
 
+               /**
+                * if purge_outstanding_packets() was called with an argmument
+                * we delete only packets belonging to the given interface
+                */
+               if ((batman_if) &&
+                   (forw_packet->if_incoming != batman_if))
+                       continue;
+
                spin_unlock_irqrestore(&forw_bat_list_lock, flags);
 
                /**