]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/batman-adv/send.c
Staging: batman-adv: Fix aggregation direct-link bug
[net-next-2.6.git] / drivers / staging / batman-adv / send.c
CommitLineData
5beef3c9
AL
1/*
2 * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "send.h"
5beef3c9
AL
24#include "routing.h"
25#include "translation-table.h"
e7017195 26#include "soft-interface.h"
5beef3c9
AL
27#include "hard-interface.h"
28#include "types.h"
29#include "vis.h"
30#include "aggregation.h"
31
5beef3c9
AL
32/* apply hop penalty for a normal link */
33static uint8_t hop_penalty(const uint8_t tq)
34{
35 return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
36}
37
38/* when do we schedule our own packet to be sent */
39static unsigned long own_send_time(void)
40{
41 return jiffies +
42 (((atomic_read(&originator_interval) - JITTER +
43 (random32() % 2*JITTER)) * HZ) / 1000);
44}
45
46/* when do we schedule a forwarded packet to be sent */
47static unsigned long forward_send_time(void)
48{
bd13b616 49 return jiffies + (((random32() % (JITTER/2)) * HZ) / 1000);
5beef3c9
AL
50}
51
e7017195
SW
52/* send out an already prepared packet to the given address via the
53 * specified batman interface */
54int send_skb_packet(struct sk_buff *skb,
55 struct batman_if *batman_if,
56 uint8_t *dst_addr)
5beef3c9
AL
57{
58 struct ethhdr *ethhdr;
5beef3c9
AL
59
60 if (batman_if->if_active != IF_ACTIVE)
e7017195
SW
61 goto send_skb_err;
62
63 if (unlikely(!batman_if->net_dev))
64 goto send_skb_err;
5beef3c9
AL
65
66 if (!(batman_if->net_dev->flags & IFF_UP)) {
bad2239e
AL
67 printk(KERN_WARNING
68 "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
69 batman_if->dev);
e7017195 70 goto send_skb_err;
5beef3c9
AL
71 }
72
e7017195
SW
73 /* push to the ethernet header. */
74 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
75 goto send_skb_err;
5beef3c9 76
e7017195 77 skb_reset_mac_header(skb);
5beef3c9 78
e7017195 79 ethhdr = (struct ethhdr *) skb_mac_header(skb);
5beef3c9
AL
80 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
81 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
82 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
83
5beef3c9
AL
84 skb_set_network_header(skb, ETH_HLEN);
85 skb->priority = TC_PRIO_CONTROL;
86 skb->protocol = __constant_htons(ETH_P_BATMAN);
e7017195 87
5beef3c9
AL
88 skb->dev = batman_if->net_dev;
89
90 /* dev_queue_xmit() returns a negative result on error. However on
91 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
92 * (which is > 0). This will not be treated as an error. */
e7017195
SW
93
94 return dev_queue_xmit(skb);
95send_skb_err:
96 kfree_skb(skb);
97 return NET_XMIT_DROP;
98}
99
100/* sends a raw packet. */
101void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
102 struct batman_if *batman_if, uint8_t *dst_addr)
103{
104 struct sk_buff *skb;
105 char *data;
106
107 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
108 if (!skb)
109 return;
110 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
111 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
112 /* pull back to the batman "network header" */
113 skb_pull(skb, sizeof(struct ethhdr));
114 send_skb_packet(skb, batman_if, dst_addr);
5beef3c9
AL
115}
116
117/* Send a packet to a given interface */
118static void send_packet_to_if(struct forw_packet *forw_packet,
119 struct batman_if *batman_if)
120{
121 char *fwd_str;
122 uint8_t packet_num;
123 int16_t buff_pos;
124 struct batman_packet *batman_packet;
5beef3c9
AL
125
126 if (batman_if->if_active != IF_ACTIVE)
127 return;
128
46712df6
AG
129 packet_num = 0;
130 buff_pos = 0;
5beef3c9
AL
131 batman_packet = (struct batman_packet *)
132 (forw_packet->packet_buff);
133
134 /* adjust all flags and log packets */
135 while (aggregated_packet(buff_pos,
136 forw_packet->packet_len,
137 batman_packet->num_hna)) {
138
139 /* we might have aggregated direct link packets with an
140 * ordinary base packet */
141 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
142 (forw_packet->if_incoming == batman_if))
143 batman_packet->flags |= DIRECTLINK;
144 else
145 batman_packet->flags &= ~DIRECTLINK;
146
5beef3c9
AL
147 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
148 "Sending own" :
149 "Forwarding"));
bad2239e 150 bat_dbg(DBG_BATMAN,
b9b27e4e 151 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
bad2239e
AL
152 fwd_str,
153 (packet_num > 0 ? "aggregated " : ""),
b9b27e4e 154 batman_packet->orig, ntohs(batman_packet->seqno),
bad2239e
AL
155 batman_packet->tq, batman_packet->ttl,
156 (batman_packet->flags & DIRECTLINK ?
157 "on" : "off"),
158 batman_if->dev, batman_if->addr_str);
5beef3c9
AL
159
160 buff_pos += sizeof(struct batman_packet) +
161 (batman_packet->num_hna * ETH_ALEN);
162 packet_num++;
163 batman_packet = (struct batman_packet *)
164 (forw_packet->packet_buff + buff_pos);
165 }
166
167 send_raw_packet(forw_packet->packet_buff,
168 forw_packet->packet_len,
169 batman_if, broadcastAddr);
170}
171
172/* send a batman packet */
173static void send_packet(struct forw_packet *forw_packet)
174{
175 struct batman_if *batman_if;
176 struct batman_packet *batman_packet =
177 (struct batman_packet *)(forw_packet->packet_buff);
5beef3c9
AL
178 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
179
180 if (!forw_packet->if_incoming) {
bad2239e 181 printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
5beef3c9
AL
182 return;
183 }
184
185 if (forw_packet->if_incoming->if_active != IF_ACTIVE)
186 return;
187
5beef3c9
AL
188 /* multihomed peer assumed */
189 /* non-primary OGMs are only broadcasted on their interface */
190 if ((directlink && (batman_packet->ttl == 1)) ||
191 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
192
193 /* FIXME: what about aggregated packets ? */
bad2239e 194 bat_dbg(DBG_BATMAN,
b9b27e4e 195 "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%s]\n",
bad2239e 196 (forw_packet->own ? "Sending own" : "Forwarding"),
b9b27e4e 197 batman_packet->orig, ntohs(batman_packet->seqno),
bad2239e
AL
198 batman_packet->ttl, forw_packet->if_incoming->dev,
199 forw_packet->if_incoming->addr_str);
5beef3c9
AL
200
201 send_raw_packet(forw_packet->packet_buff,
202 forw_packet->packet_len,
203 forw_packet->if_incoming,
204 broadcastAddr);
205 return;
206 }
207
208 /* broadcast on every interface */
209 rcu_read_lock();
210 list_for_each_entry_rcu(batman_if, &if_list, list)
211 send_packet_to_if(forw_packet, batman_if);
212 rcu_read_unlock();
213}
214
215static void rebuild_batman_packet(struct batman_if *batman_if)
216{
217 int new_len;
218 unsigned char *new_buff;
219 struct batman_packet *batman_packet;
220
221 new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
222 new_buff = kmalloc(new_len, GFP_ATOMIC);
223
224 /* keep old buffer if kmalloc should fail */
225 if (new_buff) {
226 memcpy(new_buff, batman_if->packet_buff,
227 sizeof(struct batman_packet));
228 batman_packet = (struct batman_packet *)new_buff;
229
230 batman_packet->num_hna = hna_local_fill_buffer(
231 new_buff + sizeof(struct batman_packet),
232 new_len - sizeof(struct batman_packet));
233
234 kfree(batman_if->packet_buff);
235 batman_if->packet_buff = new_buff;
236 batman_if->packet_len = new_len;
237 }
238}
239
240void schedule_own_packet(struct batman_if *batman_if)
241{
242 unsigned long send_time;
243 struct batman_packet *batman_packet;
837b8248 244 int vis_server = atomic_read(&vis_mode);
5beef3c9
AL
245
246 /**
247 * the interface gets activated here to avoid race conditions between
248 * the moment of activating the interface in
249 * hardif_activate_interface() where the originator mac is set and
250 * outdated packets (especially uninitialized mac addresses) in the
251 * packet queue
252 */
253 if (batman_if->if_active == IF_TO_BE_ACTIVATED)
254 batman_if->if_active = IF_ACTIVE;
255
256 /* if local hna has changed and interface is a primary interface */
257 if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0))
258 rebuild_batman_packet(batman_if);
259
260 /**
261 * NOTE: packet_buff might just have been re-allocated in
262 * rebuild_batman_packet()
263 */
264 batman_packet = (struct batman_packet *)batman_if->packet_buff;
265
266 /* change sequence number to network order */
267 batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
268
837b8248 269 if (vis_server == VIS_TYPE_SERVER_SYNC)
5beef3c9
AL
270 batman_packet->flags = VIS_SERVER;
271 else
272 batman_packet->flags = 0;
273
274 /* could be read by receive_bat_packet() */
275 atomic_inc(&batman_if->seqno);
276
277 slide_own_bcast_window(batman_if);
278 send_time = own_send_time();
279 add_bat_packet_to_list(batman_if->packet_buff,
280 batman_if->packet_len, batman_if, 1, send_time);
281}
282
283void schedule_forward_packet(struct orig_node *orig_node,
284 struct ethhdr *ethhdr,
285 struct batman_packet *batman_packet,
286 uint8_t directlink, int hna_buff_len,
287 struct batman_if *if_incoming)
288{
289 unsigned char in_tq, in_ttl, tq_avg = 0;
290 unsigned long send_time;
291
292 if (batman_packet->ttl <= 1) {
bad2239e 293 bat_dbg(DBG_BATMAN, "ttl exceeded \n");
5beef3c9
AL
294 return;
295 }
296
297 in_tq = batman_packet->tq;
298 in_ttl = batman_packet->ttl;
299
300 batman_packet->ttl--;
301 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
302
303 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
304 * of our best tq value */
305 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
306
307 /* rebroadcast ogm of best ranking neighbor as is */
308 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
309 batman_packet->tq = orig_node->router->tq_avg;
310
311 if (orig_node->router->last_ttl)
312 batman_packet->ttl = orig_node->router->last_ttl - 1;
313 }
314
315 tq_avg = orig_node->router->tq_avg;
316 }
317
318 /* apply hop penalty */
319 batman_packet->tq = hop_penalty(batman_packet->tq);
320
bad2239e
AL
321 bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
322 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
323 batman_packet->ttl);
5beef3c9
AL
324
325 batman_packet->seqno = htons(batman_packet->seqno);
326
327 if (directlink)
328 batman_packet->flags |= DIRECTLINK;
329 else
330 batman_packet->flags &= ~DIRECTLINK;
331
332 send_time = forward_send_time();
333 add_bat_packet_to_list((unsigned char *)batman_packet,
334 sizeof(struct batman_packet) + hna_buff_len,
335 if_incoming, 0, send_time);
336}
337
338static void forw_packet_free(struct forw_packet *forw_packet)
339{
e7017195
SW
340 if (forw_packet->skb)
341 kfree_skb(forw_packet->skb);
5beef3c9
AL
342 kfree(forw_packet->packet_buff);
343 kfree(forw_packet);
344}
345
346static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
347 unsigned long send_time)
348{
cec4a69c 349 unsigned long flags;
5beef3c9
AL
350 INIT_HLIST_NODE(&forw_packet->list);
351
352 /* add new packet to packet list */
cec4a69c 353 spin_lock_irqsave(&forw_bcast_list_lock, flags);
5beef3c9 354 hlist_add_head(&forw_packet->list, &forw_bcast_list);
cec4a69c 355 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
5beef3c9
AL
356
357 /* start timer for this packet */
358 INIT_DELAYED_WORK(&forw_packet->delayed_work,
359 send_outstanding_bcast_packet);
360 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
361 send_time);
362}
363
e7017195 364void add_bcast_packet_to_list(struct sk_buff *skb)
5beef3c9
AL
365{
366 struct forw_packet *forw_packet;
367
368 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
369 if (!forw_packet)
370 return;
371
e7017195
SW
372 skb = skb_copy(skb, GFP_ATOMIC);
373 if (!skb) {
e281cf89 374 kfree(forw_packet);
5beef3c9 375 return;
e281cf89 376 }
5beef3c9 377
e7017195
SW
378 skb_reset_mac_header(skb);
379
380 forw_packet->skb = skb;
381 forw_packet->packet_buff = NULL;
5beef3c9
AL
382
383 /* how often did we send the bcast packet ? */
384 forw_packet->num_packets = 0;
385
386 _add_bcast_packet_to_list(forw_packet, 1);
387}
388
389void send_outstanding_bcast_packet(struct work_struct *work)
390{
391 struct batman_if *batman_if;
392 struct delayed_work *delayed_work =
393 container_of(work, struct delayed_work, work);
394 struct forw_packet *forw_packet =
395 container_of(delayed_work, struct forw_packet, delayed_work);
cec4a69c 396 unsigned long flags;
e7017195 397 struct sk_buff *skb1;
5beef3c9 398
cec4a69c 399 spin_lock_irqsave(&forw_bcast_list_lock, flags);
5beef3c9 400 hlist_del(&forw_packet->list);
cec4a69c 401 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
5beef3c9
AL
402
403 /* rebroadcast packet */
404 rcu_read_lock();
405 list_for_each_entry_rcu(batman_if, &if_list, list) {
e7017195
SW
406 /* send a copy of the saved skb */
407 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
408 if (skb1)
409 send_skb_packet(skb1,
5beef3c9
AL
410 batman_if, broadcastAddr);
411 }
412 rcu_read_unlock();
413
414 forw_packet->num_packets++;
415
416 /* if we still have some more bcasts to send and we are not shutting
417 * down */
418 if ((forw_packet->num_packets < 3) &&
419 (atomic_read(&module_state) != MODULE_DEACTIVATING))
420 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
421 else
422 forw_packet_free(forw_packet);
423}
424
425void send_outstanding_bat_packet(struct work_struct *work)
426{
427 struct delayed_work *delayed_work =
428 container_of(work, struct delayed_work, work);
429 struct forw_packet *forw_packet =
430 container_of(delayed_work, struct forw_packet, delayed_work);
e7017195 431 unsigned long flags;
5beef3c9 432
e7017195 433 spin_lock_irqsave(&forw_bat_list_lock, flags);
5beef3c9 434 hlist_del(&forw_packet->list);
e7017195 435 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
5beef3c9
AL
436
437 send_packet(forw_packet);
438
439 /**
440 * we have to have at least one packet in the queue
441 * to determine the queues wake up time unless we are
442 * shutting down
443 */
444 if ((forw_packet->own) &&
445 (atomic_read(&module_state) != MODULE_DEACTIVATING))
446 schedule_own_packet(forw_packet->if_incoming);
447
448 forw_packet_free(forw_packet);
449}
450
451void purge_outstanding_packets(void)
452{
453 struct forw_packet *forw_packet;
454 struct hlist_node *tmp_node, *safe_tmp_node;
cec4a69c 455 unsigned long flags;
5beef3c9 456
bad2239e 457 bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
5beef3c9
AL
458
459 /* free bcast list */
cec4a69c 460 spin_lock_irqsave(&forw_bcast_list_lock, flags);
5beef3c9
AL
461 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
462 &forw_bcast_list, list) {
463
cec4a69c 464 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
5beef3c9
AL
465
466 /**
467 * send_outstanding_bcast_packet() will lock the list to
468 * delete the item from the list
469 */
470 cancel_delayed_work_sync(&forw_packet->delayed_work);
cec4a69c 471 spin_lock_irqsave(&forw_bcast_list_lock, flags);
5beef3c9 472 }
cec4a69c 473 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
5beef3c9
AL
474
475 /* free batman packet list */
e7017195 476 spin_lock_irqsave(&forw_bat_list_lock, flags);
5beef3c9
AL
477 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
478 &forw_bat_list, list) {
479
e7017195 480 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
5beef3c9
AL
481
482 /**
483 * send_outstanding_bat_packet() will lock the list to
484 * delete the item from the list
485 */
486 cancel_delayed_work_sync(&forw_packet->delayed_work);
e7017195 487 spin_lock_irqsave(&forw_bat_list_lock, flags);
5beef3c9 488 }
e7017195 489 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
5beef3c9 490}