]>
Commit | Line | Data |
---|---|---|
5beef3c9 AL |
1 | /* |
2 | * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors: | |
3 | * | |
4 | * Marek Lindner, Simon Wunderlich | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of version 2 of the GNU General Public | |
8 | * License as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
18 | * 02110-1301, USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #include "main.h" | |
23 | #include "send.h" | |
24 | #include "log.h" | |
25 | #include "routing.h" | |
26 | #include "translation-table.h" | |
27 | #include "hard-interface.h" | |
28 | #include "types.h" | |
29 | #include "vis.h" | |
30 | #include "aggregation.h" | |
31 | ||
32 | #include "compat.h" | |
33 | ||
34 | /* apply hop penalty for a normal link */ | |
35 | static uint8_t hop_penalty(const uint8_t tq) | |
36 | { | |
37 | return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE); | |
38 | } | |
39 | ||
40 | /* when do we schedule our own packet to be sent */ | |
41 | static unsigned long own_send_time(void) | |
42 | { | |
43 | return jiffies + | |
44 | (((atomic_read(&originator_interval) - JITTER + | |
45 | (random32() % 2*JITTER)) * HZ) / 1000); | |
46 | } | |
47 | ||
48 | /* when do we schedule a forwarded packet to be sent */ | |
49 | static unsigned long forward_send_time(void) | |
50 | { | |
51 | unsigned long send_time = jiffies; /* Starting now plus... */ | |
52 | ||
53 | if (atomic_read(&aggregation_enabled)) | |
54 | send_time += (((MAX_AGGREGATION_MS - (JITTER/2) + | |
55 | (random32() % JITTER)) * HZ) / 1000); | |
56 | else | |
57 | send_time += (((random32() % (JITTER/2)) * HZ) / 1000); | |
58 | ||
59 | return send_time; | |
60 | } | |
61 | ||
62 | /* sends a raw packet. */ | |
63 | void send_raw_packet(unsigned char *pack_buff, int pack_buff_len, | |
64 | struct batman_if *batman_if, uint8_t *dst_addr) | |
65 | { | |
66 | struct ethhdr *ethhdr; | |
67 | struct sk_buff *skb; | |
68 | int retval; | |
69 | char *data; | |
70 | ||
71 | if (batman_if->if_active != IF_ACTIVE) | |
72 | return; | |
73 | ||
74 | if (!(batman_if->net_dev->flags & IFF_UP)) { | |
75 | debug_log(LOG_TYPE_WARN, | |
76 | "Interface %s is not up - can't send packet via that interface (IF_TO_BE_DEACTIVATED was here) !\n", | |
77 | batman_if->dev); | |
78 | return; | |
79 | } | |
80 | ||
81 | skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr)); | |
82 | if (!skb) | |
83 | return; | |
84 | data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr)); | |
85 | ||
86 | memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len); | |
87 | ||
88 | ethhdr = (struct ethhdr *) data; | |
89 | memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); | |
90 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | |
91 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); | |
92 | ||
93 | skb_reset_mac_header(skb); | |
94 | skb_set_network_header(skb, ETH_HLEN); | |
95 | skb->priority = TC_PRIO_CONTROL; | |
96 | skb->protocol = __constant_htons(ETH_P_BATMAN); | |
97 | skb->dev = batman_if->net_dev; | |
98 | ||
99 | /* dev_queue_xmit() returns a negative result on error. However on | |
100 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | |
101 | * (which is > 0). This will not be treated as an error. */ | |
102 | retval = dev_queue_xmit(skb); | |
103 | if (retval < 0) | |
104 | debug_log(LOG_TYPE_CRIT, | |
105 | "Can't write to raw socket (IF_TO_BE_DEACTIVATED was here): %i\n", | |
106 | retval); | |
107 | } | |
108 | ||
109 | /* Send a packet to a given interface */ | |
110 | static void send_packet_to_if(struct forw_packet *forw_packet, | |
111 | struct batman_if *batman_if) | |
112 | { | |
113 | char *fwd_str; | |
114 | uint8_t packet_num; | |
115 | int16_t buff_pos; | |
116 | struct batman_packet *batman_packet; | |
117 | char orig_str[ETH_STR_LEN]; | |
118 | ||
119 | if (batman_if->if_active != IF_ACTIVE) | |
120 | return; | |
121 | ||
122 | packet_num = buff_pos = 0; | |
123 | batman_packet = (struct batman_packet *) | |
124 | (forw_packet->packet_buff); | |
125 | ||
126 | /* adjust all flags and log packets */ | |
127 | while (aggregated_packet(buff_pos, | |
128 | forw_packet->packet_len, | |
129 | batman_packet->num_hna)) { | |
130 | ||
131 | /* we might have aggregated direct link packets with an | |
132 | * ordinary base packet */ | |
133 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && | |
134 | (forw_packet->if_incoming == batman_if)) | |
135 | batman_packet->flags |= DIRECTLINK; | |
136 | else | |
137 | batman_packet->flags &= ~DIRECTLINK; | |
138 | ||
139 | addr_to_string(orig_str, batman_packet->orig); | |
140 | fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? | |
141 | "Sending own" : | |
142 | "Forwarding")); | |
143 | debug_log(LOG_TYPE_BATMAN, | |
144 | "%s %spacket (originator %s, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n", | |
145 | fwd_str, | |
146 | (packet_num > 0 ? "aggregated " : ""), | |
147 | orig_str, ntohs(batman_packet->seqno), | |
148 | batman_packet->tq, batman_packet->ttl, | |
149 | (batman_packet->flags & DIRECTLINK ? | |
150 | "on" : "off"), | |
151 | batman_if->dev, batman_if->addr_str); | |
152 | ||
153 | buff_pos += sizeof(struct batman_packet) + | |
154 | (batman_packet->num_hna * ETH_ALEN); | |
155 | packet_num++; | |
156 | batman_packet = (struct batman_packet *) | |
157 | (forw_packet->packet_buff + buff_pos); | |
158 | } | |
159 | ||
160 | send_raw_packet(forw_packet->packet_buff, | |
161 | forw_packet->packet_len, | |
162 | batman_if, broadcastAddr); | |
163 | } | |
164 | ||
165 | /* send a batman packet */ | |
166 | static void send_packet(struct forw_packet *forw_packet) | |
167 | { | |
168 | struct batman_if *batman_if; | |
169 | struct batman_packet *batman_packet = | |
170 | (struct batman_packet *)(forw_packet->packet_buff); | |
171 | char orig_str[ETH_STR_LEN]; | |
172 | unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); | |
173 | ||
174 | if (!forw_packet->if_incoming) { | |
175 | debug_log(LOG_TYPE_CRIT, | |
176 | "Error - can't forward packet: incoming iface not specified\n"); | |
177 | return; | |
178 | } | |
179 | ||
180 | if (forw_packet->if_incoming->if_active != IF_ACTIVE) | |
181 | return; | |
182 | ||
183 | addr_to_string(orig_str, batman_packet->orig); | |
184 | ||
185 | /* multihomed peer assumed */ | |
186 | /* non-primary OGMs are only broadcasted on their interface */ | |
187 | if ((directlink && (batman_packet->ttl == 1)) || | |
188 | (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) { | |
189 | ||
190 | /* FIXME: what about aggregated packets ? */ | |
191 | debug_log(LOG_TYPE_BATMAN, | |
192 | "%s packet (originator %s, seqno %d, TTL %d) on interface %s [%s]\n", | |
193 | (forw_packet->own ? "Sending own" : "Forwarding"), | |
194 | orig_str, ntohs(batman_packet->seqno), | |
195 | batman_packet->ttl, forw_packet->if_incoming->dev, | |
196 | forw_packet->if_incoming->addr_str); | |
197 | ||
198 | send_raw_packet(forw_packet->packet_buff, | |
199 | forw_packet->packet_len, | |
200 | forw_packet->if_incoming, | |
201 | broadcastAddr); | |
202 | return; | |
203 | } | |
204 | ||
205 | /* broadcast on every interface */ | |
206 | rcu_read_lock(); | |
207 | list_for_each_entry_rcu(batman_if, &if_list, list) | |
208 | send_packet_to_if(forw_packet, batman_if); | |
209 | rcu_read_unlock(); | |
210 | } | |
211 | ||
212 | static void rebuild_batman_packet(struct batman_if *batman_if) | |
213 | { | |
214 | int new_len; | |
215 | unsigned char *new_buff; | |
216 | struct batman_packet *batman_packet; | |
217 | ||
218 | new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN); | |
219 | new_buff = kmalloc(new_len, GFP_ATOMIC); | |
220 | ||
221 | /* keep old buffer if kmalloc should fail */ | |
222 | if (new_buff) { | |
223 | memcpy(new_buff, batman_if->packet_buff, | |
224 | sizeof(struct batman_packet)); | |
225 | batman_packet = (struct batman_packet *)new_buff; | |
226 | ||
227 | batman_packet->num_hna = hna_local_fill_buffer( | |
228 | new_buff + sizeof(struct batman_packet), | |
229 | new_len - sizeof(struct batman_packet)); | |
230 | ||
231 | kfree(batman_if->packet_buff); | |
232 | batman_if->packet_buff = new_buff; | |
233 | batman_if->packet_len = new_len; | |
234 | } | |
235 | } | |
236 | ||
237 | void schedule_own_packet(struct batman_if *batman_if) | |
238 | { | |
239 | unsigned long send_time; | |
240 | struct batman_packet *batman_packet; | |
241 | ||
242 | /** | |
243 | * the interface gets activated here to avoid race conditions between | |
244 | * the moment of activating the interface in | |
245 | * hardif_activate_interface() where the originator mac is set and | |
246 | * outdated packets (especially uninitialized mac addresses) in the | |
247 | * packet queue | |
248 | */ | |
249 | if (batman_if->if_active == IF_TO_BE_ACTIVATED) | |
250 | batman_if->if_active = IF_ACTIVE; | |
251 | ||
252 | /* if local hna has changed and interface is a primary interface */ | |
253 | if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0)) | |
254 | rebuild_batman_packet(batman_if); | |
255 | ||
256 | /** | |
257 | * NOTE: packet_buff might just have been re-allocated in | |
258 | * rebuild_batman_packet() | |
259 | */ | |
260 | batman_packet = (struct batman_packet *)batman_if->packet_buff; | |
261 | ||
262 | /* change sequence number to network order */ | |
263 | batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno)); | |
264 | ||
265 | if (is_vis_server()) | |
266 | batman_packet->flags = VIS_SERVER; | |
267 | else | |
268 | batman_packet->flags = 0; | |
269 | ||
270 | /* could be read by receive_bat_packet() */ | |
271 | atomic_inc(&batman_if->seqno); | |
272 | ||
273 | slide_own_bcast_window(batman_if); | |
274 | send_time = own_send_time(); | |
275 | add_bat_packet_to_list(batman_if->packet_buff, | |
276 | batman_if->packet_len, batman_if, 1, send_time); | |
277 | } | |
278 | ||
279 | void schedule_forward_packet(struct orig_node *orig_node, | |
280 | struct ethhdr *ethhdr, | |
281 | struct batman_packet *batman_packet, | |
282 | uint8_t directlink, int hna_buff_len, | |
283 | struct batman_if *if_incoming) | |
284 | { | |
285 | unsigned char in_tq, in_ttl, tq_avg = 0; | |
286 | unsigned long send_time; | |
287 | ||
288 | if (batman_packet->ttl <= 1) { | |
289 | debug_log(LOG_TYPE_BATMAN, "ttl exceeded \n"); | |
290 | return; | |
291 | } | |
292 | ||
293 | in_tq = batman_packet->tq; | |
294 | in_ttl = batman_packet->ttl; | |
295 | ||
296 | batman_packet->ttl--; | |
297 | memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); | |
298 | ||
299 | /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast | |
300 | * of our best tq value */ | |
301 | if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { | |
302 | ||
303 | /* rebroadcast ogm of best ranking neighbor as is */ | |
304 | if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) { | |
305 | batman_packet->tq = orig_node->router->tq_avg; | |
306 | ||
307 | if (orig_node->router->last_ttl) | |
308 | batman_packet->ttl = orig_node->router->last_ttl - 1; | |
309 | } | |
310 | ||
311 | tq_avg = orig_node->router->tq_avg; | |
312 | } | |
313 | ||
314 | /* apply hop penalty */ | |
315 | batman_packet->tq = hop_penalty(batman_packet->tq); | |
316 | ||
317 | debug_log(LOG_TYPE_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n", | |
318 | in_tq, tq_avg, batman_packet->tq, in_ttl - 1, | |
319 | batman_packet->ttl); | |
320 | ||
321 | batman_packet->seqno = htons(batman_packet->seqno); | |
322 | ||
323 | if (directlink) | |
324 | batman_packet->flags |= DIRECTLINK; | |
325 | else | |
326 | batman_packet->flags &= ~DIRECTLINK; | |
327 | ||
328 | send_time = forward_send_time(); | |
329 | add_bat_packet_to_list((unsigned char *)batman_packet, | |
330 | sizeof(struct batman_packet) + hna_buff_len, | |
331 | if_incoming, 0, send_time); | |
332 | } | |
333 | ||
334 | static void forw_packet_free(struct forw_packet *forw_packet) | |
335 | { | |
336 | kfree(forw_packet->packet_buff); | |
337 | kfree(forw_packet); | |
338 | } | |
339 | ||
340 | static void _add_bcast_packet_to_list(struct forw_packet *forw_packet, | |
341 | unsigned long send_time) | |
342 | { | |
343 | INIT_HLIST_NODE(&forw_packet->list); | |
344 | ||
345 | /* add new packet to packet list */ | |
346 | spin_lock(&forw_bcast_list_lock); | |
347 | hlist_add_head(&forw_packet->list, &forw_bcast_list); | |
348 | spin_unlock(&forw_bcast_list_lock); | |
349 | ||
350 | /* start timer for this packet */ | |
351 | INIT_DELAYED_WORK(&forw_packet->delayed_work, | |
352 | send_outstanding_bcast_packet); | |
353 | queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, | |
354 | send_time); | |
355 | } | |
356 | ||
357 | void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len) | |
358 | { | |
359 | struct forw_packet *forw_packet; | |
360 | ||
361 | forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); | |
362 | if (!forw_packet) | |
363 | return; | |
364 | ||
365 | forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC); | |
e281cf89 JL |
366 | if (!forw_packet->packet_buff) { |
367 | kfree(forw_packet); | |
5beef3c9 | 368 | return; |
e281cf89 | 369 | } |
5beef3c9 AL |
370 | |
371 | forw_packet->packet_len = packet_len; | |
372 | memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len); | |
373 | ||
374 | /* how often did we send the bcast packet ? */ | |
375 | forw_packet->num_packets = 0; | |
376 | ||
377 | _add_bcast_packet_to_list(forw_packet, 1); | |
378 | } | |
379 | ||
380 | void send_outstanding_bcast_packet(struct work_struct *work) | |
381 | { | |
382 | struct batman_if *batman_if; | |
383 | struct delayed_work *delayed_work = | |
384 | container_of(work, struct delayed_work, work); | |
385 | struct forw_packet *forw_packet = | |
386 | container_of(delayed_work, struct forw_packet, delayed_work); | |
387 | ||
388 | spin_lock(&forw_bcast_list_lock); | |
389 | hlist_del(&forw_packet->list); | |
390 | spin_unlock(&forw_bcast_list_lock); | |
391 | ||
392 | /* rebroadcast packet */ | |
393 | rcu_read_lock(); | |
394 | list_for_each_entry_rcu(batman_if, &if_list, list) { | |
395 | send_raw_packet(forw_packet->packet_buff, | |
396 | forw_packet->packet_len, | |
397 | batman_if, broadcastAddr); | |
398 | } | |
399 | rcu_read_unlock(); | |
400 | ||
401 | forw_packet->num_packets++; | |
402 | ||
403 | /* if we still have some more bcasts to send and we are not shutting | |
404 | * down */ | |
405 | if ((forw_packet->num_packets < 3) && | |
406 | (atomic_read(&module_state) != MODULE_DEACTIVATING)) | |
407 | _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000)); | |
408 | else | |
409 | forw_packet_free(forw_packet); | |
410 | } | |
411 | ||
412 | void send_outstanding_bat_packet(struct work_struct *work) | |
413 | { | |
414 | struct delayed_work *delayed_work = | |
415 | container_of(work, struct delayed_work, work); | |
416 | struct forw_packet *forw_packet = | |
417 | container_of(delayed_work, struct forw_packet, delayed_work); | |
418 | ||
419 | spin_lock(&forw_bat_list_lock); | |
420 | hlist_del(&forw_packet->list); | |
421 | spin_unlock(&forw_bat_list_lock); | |
422 | ||
423 | send_packet(forw_packet); | |
424 | ||
425 | /** | |
426 | * we have to have at least one packet in the queue | |
427 | * to determine the queues wake up time unless we are | |
428 | * shutting down | |
429 | */ | |
430 | if ((forw_packet->own) && | |
431 | (atomic_read(&module_state) != MODULE_DEACTIVATING)) | |
432 | schedule_own_packet(forw_packet->if_incoming); | |
433 | ||
434 | forw_packet_free(forw_packet); | |
435 | } | |
436 | ||
437 | void purge_outstanding_packets(void) | |
438 | { | |
439 | struct forw_packet *forw_packet; | |
440 | struct hlist_node *tmp_node, *safe_tmp_node; | |
441 | ||
442 | debug_log(LOG_TYPE_BATMAN, "purge_outstanding_packets()\n"); | |
443 | ||
444 | /* free bcast list */ | |
445 | spin_lock(&forw_bcast_list_lock); | |
446 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | |
447 | &forw_bcast_list, list) { | |
448 | ||
449 | spin_unlock(&forw_bcast_list_lock); | |
450 | ||
451 | /** | |
452 | * send_outstanding_bcast_packet() will lock the list to | |
453 | * delete the item from the list | |
454 | */ | |
455 | cancel_delayed_work_sync(&forw_packet->delayed_work); | |
456 | spin_lock(&forw_bcast_list_lock); | |
457 | } | |
458 | spin_unlock(&forw_bcast_list_lock); | |
459 | ||
460 | /* free batman packet list */ | |
461 | spin_lock(&forw_bat_list_lock); | |
462 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | |
463 | &forw_bat_list, list) { | |
464 | ||
465 | spin_unlock(&forw_bat_list_lock); | |
466 | ||
467 | /** | |
468 | * send_outstanding_bat_packet() will lock the list to | |
469 | * delete the item from the list | |
470 | */ | |
471 | cancel_delayed_work_sync(&forw_packet->delayed_work); | |
472 | spin_lock(&forw_bat_list_lock); | |
473 | } | |
474 | spin_unlock(&forw_bat_list_lock); | |
475 | } |