]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ax25/ax25_out.c
[SK_BUFF]: Introduce skb_set_network_header
[net-next-2.6.git] / net / ax25 / ax25_out.c
CommitLineData
1da177e4
LT
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10 */
1da177e4
LT
11#include <linux/errno.h>
12#include <linux/types.h>
13#include <linux/socket.h>
14#include <linux/in.h>
15#include <linux/kernel.h>
70868eac 16#include <linux/module.h>
1da177e4
LT
17#include <linux/timer.h>
18#include <linux/string.h>
19#include <linux/sockios.h>
20#include <linux/spinlock.h>
21#include <linux/net.h>
22#include <net/ax25.h>
23#include <linux/inet.h>
24#include <linux/netdevice.h>
25#include <linux/skbuff.h>
26#include <linux/netfilter.h>
27#include <net/sock.h>
28#include <asm/uaccess.h>
29#include <asm/system.h>
30#include <linux/fcntl.h>
31#include <linux/mm.h>
32#include <linux/interrupt.h>
33
34static DEFINE_SPINLOCK(ax25_frag_lock);
35
36ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
37{
38 ax25_dev *ax25_dev;
39 ax25_cb *ax25;
40
41 /*
42 * Take the default packet length for the device if zero is
43 * specified.
44 */
45 if (paclen == 0) {
46 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
47 return NULL;
48
49 paclen = ax25_dev->values[AX25_VALUES_PACLEN];
50 }
51
52 /*
53 * Look for an existing connection.
54 */
55 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
56 ax25_output(ax25, paclen, skb);
57 return ax25; /* It already existed */
58 }
59
60 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
61 return NULL;
62
63 if ((ax25 = ax25_create_cb()) == NULL)
64 return NULL;
65
66 ax25_fillin_cb(ax25, ax25_dev);
67
68 ax25->source_addr = *src;
69 ax25->dest_addr = *dest;
70
71 if (digi != NULL) {
0459d70a
ACM
72 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
73 if (ax25->digipeat == NULL) {
1da177e4
LT
74 ax25_cb_put(ax25);
75 return NULL;
76 }
1da177e4
LT
77 }
78
79 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
80 case AX25_PROTO_STD_SIMPLEX:
81 case AX25_PROTO_STD_DUPLEX:
82 ax25_std_establish_data_link(ax25);
83 break;
84
85#ifdef CONFIG_AX25_DAMA_SLAVE
86 case AX25_PROTO_DAMA_SLAVE:
87 if (ax25_dev->dama.slave)
88 ax25_ds_establish_data_link(ax25);
89 else
90 ax25_std_establish_data_link(ax25);
91 break;
92#endif
93 }
94
95 ax25_cb_add(ax25);
96
97 ax25->state = AX25_STATE_1;
98
99 ax25_start_heartbeat(ax25);
100
101 ax25_output(ax25, paclen, skb);
102
103 return ax25; /* We had to create it */
104}
105
70868eac
RB
106EXPORT_SYMBOL(ax25_send_frame);
107
1da177e4
LT
108/*
109 * All outgoing AX.25 I frames pass via this routine. Therefore this is
110 * where the fragmentation of frames takes place. If fragment is set to
111 * zero then we are not allowed to do fragmentation, even if the frame
112 * is too large.
113 */
114void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
115{
116 struct sk_buff *skbn;
117 unsigned char *p;
118 int frontlen, len, fragno, ka9qfrag, first = 1;
119
120 if ((skb->len - 1) > paclen) {
121 if (*skb->data == AX25_P_TEXT) {
122 skb_pull(skb, 1); /* skip PID */
123 ka9qfrag = 0;
124 } else {
125 paclen -= 2; /* Allow for fragment control info */
126 ka9qfrag = 1;
127 }
128
129 fragno = skb->len / paclen;
130 if (skb->len % paclen == 0) fragno--;
131
132 frontlen = skb_headroom(skb); /* Address space + CTRL */
133
134 while (skb->len > 0) {
135 spin_lock_bh(&ax25_frag_lock);
136 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
137 spin_unlock_bh(&ax25_frag_lock);
138 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
139 return;
140 }
141
142 if (skb->sk != NULL)
143 skb_set_owner_w(skbn, skb->sk);
144
145 spin_unlock_bh(&ax25_frag_lock);
146
147 len = (paclen > skb->len) ? skb->len : paclen;
148
149 if (ka9qfrag == 1) {
150 skb_reserve(skbn, frontlen + 2);
c14d2450
ACM
151 skb_set_network_header(skbn,
152 skb_network_offset(skb));
1da177e4
LT
153 memcpy(skb_put(skbn, len), skb->data, len);
154 p = skb_push(skbn, 2);
155
156 *p++ = AX25_P_SEGMENT;
157
158 *p = fragno--;
159 if (first) {
160 *p |= AX25_SEG_FIRST;
161 first = 0;
162 }
163 } else {
164 skb_reserve(skbn, frontlen + 1);
c14d2450
ACM
165 skb_set_network_header(skbn,
166 skb_network_offset(skb));
1da177e4
LT
167 memcpy(skb_put(skbn, len), skb->data, len);
168 p = skb_push(skbn, 1);
169 *p = AX25_P_TEXT;
170 }
171
172 skb_pull(skb, len);
173 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
174 }
175
176 kfree_skb(skb);
177 } else {
178 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
179 }
180
181 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
182 case AX25_PROTO_STD_SIMPLEX:
183 case AX25_PROTO_STD_DUPLEX:
184 ax25_kick(ax25);
185 break;
186
187#ifdef CONFIG_AX25_DAMA_SLAVE
188 /*
189 * A DAMA slave is _required_ to work as normal AX.25L2V2
190 * if no DAMA master is available.
191 */
192 case AX25_PROTO_DAMA_SLAVE:
193 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
194 break;
195#endif
196 }
197}
198
199/*
200 * This procedure is passed a buffer descriptor for an iframe. It builds
201 * the rest of the control part of the frame and then writes it out.
202 */
203static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
204{
205 unsigned char *frame;
206
207 if (skb == NULL)
208 return;
209
c1d2bbe1 210 skb_reset_network_header(skb);
1da177e4
LT
211
212 if (ax25->modulus == AX25_MODULUS) {
213 frame = skb_push(skb, 1);
214
215 *frame = AX25_I;
216 *frame |= (poll_bit) ? AX25_PF : 0;
217 *frame |= (ax25->vr << 5);
218 *frame |= (ax25->vs << 1);
219 } else {
220 frame = skb_push(skb, 2);
221
222 frame[0] = AX25_I;
223 frame[0] |= (ax25->vs << 1);
224 frame[1] = (poll_bit) ? AX25_EPF : 0;
225 frame[1] |= (ax25->vr << 1);
226 }
227
228 ax25_start_idletimer(ax25);
229
230 ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
231}
232
233void ax25_kick(ax25_cb *ax25)
234{
235 struct sk_buff *skb, *skbn;
236 int last = 1;
237 unsigned short start, end, next;
238
239 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
240 return;
241
242 if (ax25->condition & AX25_COND_PEER_RX_BUSY)
243 return;
244
245 if (skb_peek(&ax25->write_queue) == NULL)
246 return;
247
248 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
249 end = (ax25->va + ax25->window) % ax25->modulus;
250
251 if (start == end)
252 return;
253
254 ax25->vs = start;
255
256 /*
257 * Transmit data until either we're out of data to send or
258 * the window is full. Send a poll on the final I frame if
259 * the window is filled.
260 */
261
262 /*
263 * Dequeue the frame and copy it.
264 */
265 skb = skb_dequeue(&ax25->write_queue);
266
267 do {
268 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
269 skb_queue_head(&ax25->write_queue, skb);
270 break;
271 }
272
273 if (skb->sk != NULL)
274 skb_set_owner_w(skbn, skb->sk);
275
276 next = (ax25->vs + 1) % ax25->modulus;
277 last = (next == end);
278
279 /*
280 * Transmit the frame copy.
281 * bke 960114: do not set the Poll bit on the last frame
282 * in DAMA mode.
283 */
284 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
285 case AX25_PROTO_STD_SIMPLEX:
286 case AX25_PROTO_STD_DUPLEX:
287 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
288 break;
289
290#ifdef CONFIG_AX25_DAMA_SLAVE
291 case AX25_PROTO_DAMA_SLAVE:
292 ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
293 break;
294#endif
295 }
296
297 ax25->vs = next;
298
299 /*
300 * Requeue the original data frame.
301 */
302 skb_queue_tail(&ax25->ack_queue, skb);
303
304 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
305
306 ax25->condition &= ~AX25_COND_ACK_PENDING;
307
308 if (!ax25_t1timer_running(ax25)) {
309 ax25_stop_t3timer(ax25);
310 ax25_calculate_t1(ax25);
311 ax25_start_t1timer(ax25);
312 }
313}
314
315void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
316{
317 struct sk_buff *skbn;
318 unsigned char *ptr;
319 int headroom;
320
321 if (ax25->ax25_dev == NULL) {
322 ax25_disconnect(ax25, ENETUNREACH);
323 return;
324 }
325
326 headroom = ax25_addr_size(ax25->digipeat);
327
328 if (skb_headroom(skb) < headroom) {
329 if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
330 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
331 kfree_skb(skb);
332 return;
333 }
334
335 if (skb->sk != NULL)
336 skb_set_owner_w(skbn, skb->sk);
337
338 kfree_skb(skb);
339 skb = skbn;
340 }
341
342 ptr = skb_push(skb, headroom);
343
344 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
345
29c4be51 346 ax25_queue_xmit(skb, ax25->ax25_dev->dev);
1da177e4
LT
347}
348
349/*
350 * A small shim to dev_queue_xmit to add the KISS control byte, and do
351 * any packet forwarding in operation.
352 */
29c4be51 353void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
354{
355 unsigned char *ptr;
356
56cb5156 357 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
1da177e4
LT
358
359 ptr = skb_push(skb, 1);
360 *ptr = 0x00; /* KISS */
361
362 dev_queue_xmit(skb);
363}
364
365int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
366{
367 if (ax25->vs == nr) {
368 ax25_frames_acked(ax25, nr);
369 ax25_calculate_rtt(ax25);
370 ax25_stop_t1timer(ax25);
371 ax25_start_t3timer(ax25);
372 return 1;
373 } else {
374 if (ax25->va != nr) {
375 ax25_frames_acked(ax25, nr);
376 ax25_calculate_t1(ax25);
377 ax25_start_t1timer(ax25);
378 return 1;
379 }
380 }
381 return 0;
382}
383