]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/staging/batman-adv/translation-table.c
Staging: batman-adv: Mark locally used symbols as static
[net-next-2.6.git] / drivers / staging / batman-adv / translation-table.c
1 /*
2  * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "types.h"
26 #include "hash.h"
27
28 struct hashtable_t *hna_local_hash;
29 static struct hashtable_t *hna_global_hash;
30 atomic_t hna_local_changed;
31
32 DEFINE_SPINLOCK(hna_local_hash_lock);
33 static DEFINE_SPINLOCK(hna_global_hash_lock);
34
35 static void hna_local_purge(struct work_struct *work);
36 static DECLARE_DELAYED_WORK(hna_local_purge_wq, hna_local_purge);
37 static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
38                                  char *message);
39
40 static void hna_local_start_timer(void)
41 {
42         queue_delayed_work(bat_event_workqueue, &hna_local_purge_wq, 10 * HZ);
43 }
44
45 int hna_local_init(void)
46 {
47         if (hna_local_hash)
48                 return 1;
49
50         hna_local_hash = hash_new(128, compare_orig, choose_orig);
51
52         if (!hna_local_hash)
53                 return 0;
54
55         atomic_set(&hna_local_changed, 0);
56         hna_local_start_timer();
57
58         return 1;
59 }
60
61 void hna_local_add(uint8_t *addr)
62 {
63         struct hna_local_entry *hna_local_entry;
64         struct hna_global_entry *hna_global_entry;
65         struct hashtable_t *swaphash;
66         unsigned long flags;
67
68         spin_lock_irqsave(&hna_local_hash_lock, flags);
69         hna_local_entry =
70                 ((struct hna_local_entry *)hash_find(hna_local_hash, addr));
71         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
72
73         if (hna_local_entry != NULL) {
74                 hna_local_entry->last_seen = jiffies;
75                 return;
76         }
77
78         /* only announce as many hosts as possible in the batman-packet and
79            space in batman_packet->num_hna That also should give a limit to
80            MAC-flooding. */
81         if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
82             (num_hna + 1 > 255)) {
83                 bat_dbg(DBG_ROUTES,
84                         "Can't add new local hna entry (%pM): "
85                         "number of local hna entries exceeds packet size\n",
86                         addr);
87                 return;
88         }
89
90         bat_dbg(DBG_ROUTES, "Creating new local hna entry: %pM\n",
91                 addr);
92
93         hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
94         if (!hna_local_entry)
95                 return;
96
97         memcpy(hna_local_entry->addr, addr, ETH_ALEN);
98         hna_local_entry->last_seen = jiffies;
99
100         /* the batman interface mac address should never be purged */
101         if (compare_orig(addr, soft_device->dev_addr))
102                 hna_local_entry->never_purge = 1;
103         else
104                 hna_local_entry->never_purge = 0;
105
106         spin_lock_irqsave(&hna_local_hash_lock, flags);
107
108         hash_add(hna_local_hash, hna_local_entry);
109         num_hna++;
110         atomic_set(&hna_local_changed, 1);
111
112         if (hna_local_hash->elements * 4 > hna_local_hash->size) {
113                 swaphash = hash_resize(hna_local_hash,
114                                        hna_local_hash->size * 2);
115
116                 if (swaphash == NULL)
117                         printk(KERN_ERR "batman-adv:"
118                                "Couldn't resize local hna hash table\n");
119                 else
120                         hna_local_hash = swaphash;
121         }
122
123         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
124
125         /* remove address from global hash if present */
126         spin_lock_irqsave(&hna_global_hash_lock, flags);
127
128         hna_global_entry =
129                 ((struct hna_global_entry *)hash_find(hna_global_hash, addr));
130
131         if (hna_global_entry != NULL)
132                 _hna_global_del_orig(hna_global_entry, "local hna received");
133
134         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
135 }
136
137 int hna_local_fill_buffer(unsigned char *buff, int buff_len)
138 {
139         struct hna_local_entry *hna_local_entry;
140         HASHIT(hashit);
141         int i = 0;
142         unsigned long flags;
143
144         spin_lock_irqsave(&hna_local_hash_lock, flags);
145
146         while (hash_iterate(hna_local_hash, &hashit)) {
147
148                 if (buff_len < (i + 1) * ETH_ALEN)
149                         break;
150
151                 hna_local_entry = hashit.bucket->data;
152                 memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN);
153
154                 i++;
155         }
156
157         /* if we did not get all new local hnas see you next time  ;-) */
158         if (i == num_hna)
159                 atomic_set(&hna_local_changed, 0);
160
161         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
162
163         return i;
164 }
165
166 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
167 {
168         struct net_device *net_dev = (struct net_device *)seq->private;
169         struct bat_priv *bat_priv = netdev_priv(net_dev);
170         struct hna_local_entry *hna_local_entry;
171         HASHIT(hashit);
172         HASHIT(hashit_count);
173         unsigned long flags;
174         size_t buf_size, pos;
175         char *buff;
176
177         if (!bat_priv->primary_if) {
178                 return seq_printf(seq, "BATMAN mesh %s disabled - "
179                                "please specify interfaces to enable it\n",
180                                net_dev->name);
181         }
182
183         seq_printf(seq, "Locally retrieved addresses (from %s) "
184                    "announced via HNA:\n",
185                    net_dev->name);
186
187         spin_lock_irqsave(&hna_local_hash_lock, flags);
188
189         buf_size = 1;
190         /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
191         while (hash_iterate(hna_local_hash, &hashit_count))
192                 buf_size += 21;
193
194         buff = kmalloc(buf_size, GFP_ATOMIC);
195         if (!buff) {
196                 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
197                 return -ENOMEM;
198         }
199         buff[0] = '\0';
200         pos = 0;
201
202         while (hash_iterate(hna_local_hash, &hashit)) {
203                 hna_local_entry = hashit.bucket->data;
204
205                 pos += snprintf(buff + pos, 22, " * %pM\n",
206                                 hna_local_entry->addr);
207         }
208
209         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
210
211         seq_printf(seq, "%s", buff);
212         kfree(buff);
213         return 0;
214 }
215
216 static void _hna_local_del(void *data)
217 {
218         kfree(data);
219         num_hna--;
220         atomic_set(&hna_local_changed, 1);
221 }
222
223 static void hna_local_del(struct hna_local_entry *hna_local_entry,
224                           char *message)
225 {
226         bat_dbg(DBG_ROUTES, "Deleting local hna entry (%pM): %s\n",
227                 hna_local_entry->addr, message);
228
229         hash_remove(hna_local_hash, hna_local_entry->addr);
230         _hna_local_del(hna_local_entry);
231 }
232
233 void hna_local_remove(uint8_t *addr, char *message)
234 {
235         struct hna_local_entry *hna_local_entry;
236         unsigned long flags;
237
238         spin_lock_irqsave(&hna_local_hash_lock, flags);
239
240         hna_local_entry = (struct hna_local_entry *)
241                 hash_find(hna_local_hash, addr);
242         if (hna_local_entry)
243                 hna_local_del(hna_local_entry, message);
244
245         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
246 }
247
248 static void hna_local_purge(struct work_struct *work)
249 {
250         struct hna_local_entry *hna_local_entry;
251         HASHIT(hashit);
252         unsigned long flags;
253         unsigned long timeout;
254
255         spin_lock_irqsave(&hna_local_hash_lock, flags);
256
257         while (hash_iterate(hna_local_hash, &hashit)) {
258                 hna_local_entry = hashit.bucket->data;
259
260                 timeout = hna_local_entry->last_seen +
261                         ((LOCAL_HNA_TIMEOUT / 1000) * HZ);
262                 if ((!hna_local_entry->never_purge) &&
263                     time_after(jiffies, timeout))
264                         hna_local_del(hna_local_entry, "address timed out");
265         }
266
267         spin_unlock_irqrestore(&hna_local_hash_lock, flags);
268         hna_local_start_timer();
269 }
270
271 void hna_local_free(void)
272 {
273         if (!hna_local_hash)
274                 return;
275
276         cancel_delayed_work_sync(&hna_local_purge_wq);
277         hash_delete(hna_local_hash, _hna_local_del);
278         hna_local_hash = NULL;
279 }
280
281 int hna_global_init(void)
282 {
283         if (hna_global_hash)
284                 return 1;
285
286         hna_global_hash = hash_new(128, compare_orig, choose_orig);
287
288         if (!hna_global_hash)
289                 return 0;
290
291         return 1;
292 }
293
294 void hna_global_add_orig(struct orig_node *orig_node,
295                          unsigned char *hna_buff, int hna_buff_len)
296 {
297         struct hna_global_entry *hna_global_entry;
298         struct hna_local_entry *hna_local_entry;
299         struct hashtable_t *swaphash;
300         int hna_buff_count = 0;
301         unsigned long flags;
302         unsigned char *hna_ptr;
303
304         while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
305                 spin_lock_irqsave(&hna_global_hash_lock, flags);
306
307                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
308                 hna_global_entry = (struct hna_global_entry *)
309                         hash_find(hna_global_hash, hna_ptr);
310
311                 if (hna_global_entry == NULL) {
312                         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
313
314                         hna_global_entry =
315                                 kmalloc(sizeof(struct hna_global_entry),
316                                         GFP_ATOMIC);
317
318                         if (!hna_global_entry)
319                                 break;
320
321                         memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
322
323                         bat_dbg(DBG_ROUTES,
324                                 "Creating new global hna entry: "
325                                 "%pM (via %pM)\n",
326                                 hna_global_entry->addr, orig_node->orig);
327
328                         spin_lock_irqsave(&hna_global_hash_lock, flags);
329                         hash_add(hna_global_hash, hna_global_entry);
330
331                 }
332
333                 hna_global_entry->orig_node = orig_node;
334                 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
335
336                 /* remove address from local hash if present */
337                 spin_lock_irqsave(&hna_local_hash_lock, flags);
338
339                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
340                 hna_local_entry = (struct hna_local_entry *)
341                         hash_find(hna_local_hash, hna_ptr);
342
343                 if (hna_local_entry != NULL)
344                         hna_local_del(hna_local_entry, "global hna received");
345
346                 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
347
348                 hna_buff_count++;
349         }
350
351         /* initialize, and overwrite if malloc succeeds */
352         orig_node->hna_buff = NULL;
353         orig_node->hna_buff_len = 0;
354
355         if (hna_buff_len > 0) {
356                 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
357                 if (orig_node->hna_buff) {
358                         memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
359                         orig_node->hna_buff_len = hna_buff_len;
360                 }
361         }
362
363         spin_lock_irqsave(&hna_global_hash_lock, flags);
364
365         if (hna_global_hash->elements * 4 > hna_global_hash->size) {
366                 swaphash = hash_resize(hna_global_hash,
367                                        hna_global_hash->size * 2);
368
369                 if (swaphash == NULL)
370                         printk(KERN_ERR "batman-adv:"
371                                "Couldn't resize global hna hash table\n");
372                 else
373                         hna_global_hash = swaphash;
374         }
375
376         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
377 }
378
379 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
380 {
381         struct net_device *net_dev = (struct net_device *)seq->private;
382         struct bat_priv *bat_priv = netdev_priv(net_dev);
383         struct hna_global_entry *hna_global_entry;
384         HASHIT(hashit);
385         HASHIT(hashit_count);
386         unsigned long flags;
387         size_t buf_size, pos;
388         char *buff;
389
390         if (!bat_priv->primary_if) {
391                 return seq_printf(seq, "BATMAN mesh %s disabled - "
392                                   "please specify interfaces to enable it\n",
393                                   net_dev->name);
394         }
395
396         seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
397                    net_dev->name);
398
399         spin_lock_irqsave(&hna_global_hash_lock, flags);
400
401         buf_size = 1;
402         /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
403         while (hash_iterate(hna_global_hash, &hashit_count))
404                 buf_size += 43;
405
406         buff = kmalloc(buf_size, GFP_ATOMIC);
407         if (!buff) {
408                 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
409                 return -ENOMEM;
410         }
411         buff[0] = '\0';
412         pos = 0;
413
414         while (hash_iterate(hna_global_hash, &hashit)) {
415                 hna_global_entry = hashit.bucket->data;
416
417                 pos += snprintf(buff + pos, 44,
418                                 " * %pM via %pM\n", hna_global_entry->addr,
419                                 hna_global_entry->orig_node->orig);
420         }
421
422         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
423
424         seq_printf(seq, "%s", buff);
425         kfree(buff);
426         return 0;
427 }
428
429 static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
430                                  char *message)
431 {
432         bat_dbg(DBG_ROUTES, "Deleting global hna entry %pM (via %pM): %s\n",
433                 hna_global_entry->addr, hna_global_entry->orig_node->orig,
434                 message);
435
436         hash_remove(hna_global_hash, hna_global_entry->addr);
437         kfree(hna_global_entry);
438 }
439
440 void hna_global_del_orig(struct orig_node *orig_node, char *message)
441 {
442         struct hna_global_entry *hna_global_entry;
443         int hna_buff_count = 0;
444         unsigned long flags;
445         unsigned char *hna_ptr;
446
447         if (orig_node->hna_buff_len == 0)
448                 return;
449
450         spin_lock_irqsave(&hna_global_hash_lock, flags);
451
452         while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
453                 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
454                 hna_global_entry = (struct hna_global_entry *)
455                         hash_find(hna_global_hash, hna_ptr);
456
457                 if ((hna_global_entry != NULL) &&
458                     (hna_global_entry->orig_node == orig_node))
459                         _hna_global_del_orig(hna_global_entry, message);
460
461                 hna_buff_count++;
462         }
463
464         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
465
466         orig_node->hna_buff_len = 0;
467         kfree(orig_node->hna_buff);
468         orig_node->hna_buff = NULL;
469 }
470
471 static void hna_global_del(void *data)
472 {
473         kfree(data);
474 }
475
476 void hna_global_free(void)
477 {
478         if (!hna_global_hash)
479                 return;
480
481         hash_delete(hna_global_hash, hna_global_del);
482         hna_global_hash = NULL;
483 }
484
485 struct orig_node *transtable_search(uint8_t *addr)
486 {
487         struct hna_global_entry *hna_global_entry;
488         unsigned long flags;
489
490         spin_lock_irqsave(&hna_global_hash_lock, flags);
491         hna_global_entry = (struct hna_global_entry *)
492                 hash_find(hna_global_hash, addr);
493         spin_unlock_irqrestore(&hna_global_hash_lock, flags);
494
495         if (hna_global_entry == NULL)
496                 return NULL;
497
498         return hna_global_entry->orig_node;
499 }