]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/dccp/input.c
[DCCP]: Initial implementation
[net-next-2.6.git] / net / dccp / input.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/input.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/skbuff.h>
16
17#include <net/sock.h>
18
19#include "ccid.h"
20#include "dccp.h"
21
22static void dccp_fin(struct sock *sk, struct sk_buff *skb)
23{
24 sk->sk_shutdown |= RCV_SHUTDOWN;
25 sock_set_flag(sk, SOCK_DONE);
26 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
27 __skb_queue_tail(&sk->sk_receive_queue, skb);
28 skb_set_owner_r(skb, sk);
29 sk->sk_data_ready(sk, 0);
30}
31
32static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
33{
34 switch (sk->sk_state) {
35 case DCCP_PARTOPEN:
36 case DCCP_OPEN:
37 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
38 dccp_fin(sk, skb);
39 dccp_set_state(sk, DCCP_CLOSED);
40 break;
41 }
42}
43
44static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
45{
46 /*
47 * Step 7: Check for unexpected packet types
48 * If (S.is_server and P.type == CloseReq)
49 * Send Sync packet acknowledging P.seqno
50 * Drop packet and return
51 */
52 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
53 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
54 return;
55 }
56
57 switch (sk->sk_state) {
58 case DCCP_PARTOPEN:
59 case DCCP_OPEN:
60 dccp_set_state(sk, DCCP_CLOSING);
61 dccp_send_close(sk);
62 break;
63 }
64}
65
66static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
67{
68 struct dccp_sock *dp = dccp_sk(sk);
69
70 if (dp->dccps_options.dccpo_send_ack_vector)
71 dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk,
72 DCCP_SKB_CB(skb)->dccpd_ack_seq);
73}
74
75static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
76{
77 const struct dccp_hdr *dh = dccp_hdr(skb);
78 struct dccp_sock *dp = dccp_sk(sk);
79 u64 lswl = dp->dccps_swl;
80 u64 lawl = dp->dccps_awl;
81
82 /*
83 * Step 5: Prepare sequence numbers for Sync
84 * If P.type == Sync or P.type == SyncAck,
85 * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
86 * / * P is valid, so update sequence number variables
87 * accordingly. After this update, P will pass the tests
88 * in Step 6. A SyncAck is generated if necessary in
89 * Step 15 * /
90 * Update S.GSR, S.SWL, S.SWH
91 * Otherwise,
92 * Drop packet and return
93 */
94 if (dh->dccph_type == DCCP_PKT_SYNC ||
95 dh->dccph_type == DCCP_PKT_SYNCACK) {
96 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh) &&
97 !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
98 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
99 else
100 return -1;
101 /*
102 * Step 6: Check sequence numbers
103 * Let LSWL = S.SWL and LAWL = S.AWL
104 * If P.type == CloseReq or P.type == Close or P.type == Reset,
105 * LSWL := S.GSR + 1, LAWL := S.GAR
106 * If LSWL <= P.seqno <= S.SWH
107 * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
108 * Update S.GSR, S.SWL, S.SWH
109 * If P.type != Sync,
110 * Update S.GAR
111 * Otherwise,
112 * Send Sync packet acknowledging P.seqno
113 * Drop packet and return
114 */
115 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
116 dh->dccph_type == DCCP_PKT_CLOSE ||
117 dh->dccph_type == DCCP_PKT_RESET) {
118 lswl = dp->dccps_gsr;
119 dccp_inc_seqno(&lswl);
120 lawl = dp->dccps_gar;
121 }
122
123 if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
124 (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
125 between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, lawl, dp->dccps_awh))) {
126 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
127
128 if (dh->dccph_type != DCCP_PKT_SYNC &&
129 DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
130 dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
131 } else {
132 dccp_pr_debug("Step 6 failed, sending SYNC...\n");
133 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
134 return -1;
135 }
136
137 return 0;
138}
139
140int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
141 const struct dccp_hdr *dh, const unsigned len)
142{
143 struct dccp_sock *dp = dccp_sk(sk);
144
145 if (dccp_check_seqno(sk, skb))
146 goto discard;
147
148 if (dccp_parse_options(sk, skb))
149 goto discard;
150
151 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
152 dccp_event_ack_recv(sk, skb);
153
154 /*
155 * FIXME: check ECN to see if we should use
156 * DCCP_ACKPKTS_STATE_ECN_MARKED
157 */
158 if (dp->dccps_options.dccpo_send_ack_vector) {
159 struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
160
161 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
162 DCCP_SKB_CB(skb)->dccpd_seq,
163 DCCP_ACKPKTS_STATE_RECEIVED)) {
164 LIMIT_NETDEBUG(pr_info("DCCP: acknowledgeable packets buffer full!\n"));
165 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
166 inet_csk_schedule_ack(sk);
167 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MIN, TCP_RTO_MAX);
168 goto discard;
169 }
170
171 /*
172 * FIXME: this activation is probably wrong, have to study more
173 * TCP delack machinery and how it fits into DCCP draft, but
174 * for now it kinda "works" 8)
175 */
176 if (!inet_csk_ack_scheduled(sk)) {
177 inet_csk_schedule_ack(sk);
178 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ, TCP_RTO_MAX);
179 }
180 }
181
182 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
183 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
184
185 switch (dccp_hdr(skb)->dccph_type) {
186 case DCCP_PKT_DATAACK:
187 case DCCP_PKT_DATA:
188 /*
189 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED option
190 * if it is.
191 */
192 __skb_pull(skb, dh->dccph_doff * 4);
193 __skb_queue_tail(&sk->sk_receive_queue, skb);
194 skb_set_owner_r(skb, sk);
195 sk->sk_data_ready(sk, 0);
196 return 0;
197 case DCCP_PKT_ACK:
198 goto discard;
199 case DCCP_PKT_RESET:
200 /*
201 * Step 9: Process Reset
202 * If P.type == Reset,
203 * Tear down connection
204 * S.state := TIMEWAIT
205 * Set TIMEWAIT timer
206 * Drop packet and return
207 */
208 dccp_fin(sk, skb);
209 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
210 return 0;
211 case DCCP_PKT_CLOSEREQ:
212 dccp_rcv_closereq(sk, skb);
213 goto discard;
214 case DCCP_PKT_CLOSE:
215 dccp_rcv_close(sk, skb);
216 return 0;
217 case DCCP_PKT_REQUEST:
218 /* Step 7
219 * or (S.is_server and P.type == Response)
220 * or (S.is_client and P.type == Request)
221 * or (S.state >= OPEN and P.type == Request
222 * and P.seqno >= S.OSR)
223 * or (S.state >= OPEN and P.type == Response
224 * and P.seqno >= S.OSR)
225 * or (S.state == RESPOND and P.type == Data),
226 * Send Sync packet acknowledging P.seqno
227 * Drop packet and return
228 */
229 if (dp->dccps_role != DCCP_ROLE_LISTEN)
230 goto send_sync;
231 goto check_seq;
232 case DCCP_PKT_RESPONSE:
233 if (dp->dccps_role != DCCP_ROLE_CLIENT)
234 goto send_sync;
235check_seq:
236 if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
237send_sync:
238 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
239 }
240 break;
241 }
242
243 DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
244discard:
245 __kfree_skb(skb);
246 return 0;
247}
248
249static int dccp_rcv_request_sent_state_process(struct sock *sk,
250 struct sk_buff *skb,
251 const struct dccp_hdr *dh,
252 const unsigned len)
253{
254 /*
255 * Step 4: Prepare sequence numbers in REQUEST
256 * If S.state == REQUEST,
257 * If (P.type == Response or P.type == Reset)
258 * and S.AWL <= P.ackno <= S.AWH,
259 * / * Set sequence number variables corresponding to the
260 * other endpoint, so P will pass the tests in Step 6 * /
261 * Set S.GSR, S.ISR, S.SWL, S.SWH
262 * / * Response processing continues in Step 10; Reset
263 * processing continues in Step 9 * /
264 */
265 if (dh->dccph_type == DCCP_PKT_RESPONSE) {
266 const struct inet_connection_sock *icsk = inet_csk(sk);
267 struct dccp_sock *dp = dccp_sk(sk);
268
269 /* Stop the REQUEST timer */
270 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
271 BUG_TRAP(sk->sk_send_head != NULL);
272 __kfree_skb(sk->sk_send_head);
273 sk->sk_send_head = NULL;
274
275 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh)) {
276 dccp_pr_debug("invalid ackno: S.AWL=%llu, P.ackno=%llu, S.AWH=%llu \n",
277 dp->dccps_awl, DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awh);
278 goto out_invalid_packet;
279 }
280
281 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
282 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
283
284 if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
285 ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
286 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
287 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
288 /* FIXME: send appropriate RESET code */
289 goto out_invalid_packet;
290 }
291
292 dccp_sync_mss(sk, dp->dccps_pmtu_cookie);
293
294 /*
295 * Step 10: Process REQUEST state (second part)
296 * If S.state == REQUEST,
297 * / * If we get here, P is a valid Response from the server (see
298 * Step 4), and we should move to PARTOPEN state. PARTOPEN
299 * means send an Ack, don't send Data packets, retransmit
300 * Acks periodically, and always include any Init Cookie from
301 * the Response * /
302 * S.state := PARTOPEN
303 * Set PARTOPEN timer
304 * Continue with S.state == PARTOPEN
305 * / * Step 12 will send the Ack completing the three-way
306 * handshake * /
307 */
308 dccp_set_state(sk, DCCP_PARTOPEN);
309
310 /* Make sure socket is routed, for correct metrics. */
311 inet_sk_rebuild_header(sk);
312
313 if (!sock_flag(sk, SOCK_DEAD)) {
314 sk->sk_state_change(sk);
315 sk_wake_async(sk, 0, POLL_OUT);
316 }
317
318 if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
319 icsk->icsk_accept_queue.rskq_defer_accept) {
320 /* Save one ACK. Data will be ready after
321 * several ticks, if write_pending is set.
322 *
323 * It may be deleted, but with this feature tcpdumps
324 * look so _wonderfully_ clever, that I was not able
325 * to stand against the temptation 8) --ANK
326 */
327 /*
328 * OK, in DCCP we can as well do a similar trick, its
329 * even in the draft, but there is no need for us to
330 * schedule an ack here, as dccp_sendmsg does this for
331 * us, also stated in the draft. -acme
332 */
333 __kfree_skb(skb);
334 return 0;
335 }
336 dccp_send_ack(sk);
337 return -1;
338 }
339
340out_invalid_packet:
341 return 1; /* dccp_v4_do_rcv will send a reset, but...
342 FIXME: the reset code should be DCCP_RESET_CODE_PACKET_ERROR */
343}
344
345static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
346 struct sk_buff *skb,
347 const struct dccp_hdr *dh,
348 const unsigned len)
349{
350 int queued = 0;
351
352 switch (dh->dccph_type) {
353 case DCCP_PKT_RESET:
354 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
355 break;
356 case DCCP_PKT_DATAACK:
357 case DCCP_PKT_ACK:
358 /*
359 * FIXME: we should be reseting the PARTOPEN (DELACK) timer here,
360 * but only if we haven't used the DELACK timer for something else,
361 * like sending a delayed ack for a TIMESTAMP echo, etc, for now
362 * were not clearing it, sending an extra ACK when there is nothing
363 * else to do in DELACK is not a big deal after all.
364 */
365
366 /* Stop the PARTOPEN timer */
367 if (sk->sk_state == DCCP_PARTOPEN)
368 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
369
370 dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
371 dccp_set_state(sk, DCCP_OPEN);
372
373 if (dh->dccph_type == DCCP_PKT_DATAACK) {
374 dccp_rcv_established(sk, skb, dh, len);
375 queued = 1; /* packet was queued (by dccp_rcv_established) */
376 }
377 break;
378 }
379
380 return queued;
381}
382
383int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
384 struct dccp_hdr *dh, unsigned len)
385{
386 struct dccp_sock *dp = dccp_sk(sk);
387 const int old_state = sk->sk_state;
388 int queued = 0;
389
390 if (sk->sk_state != DCCP_LISTEN && sk->sk_state != DCCP_REQUESTING) {
391 if (dccp_check_seqno(sk, skb))
392 goto discard;
393
394 /*
395 * Step 8: Process options and mark acknowledgeable
396 */
397 if (dccp_parse_options(sk, skb))
398 goto discard;
399
400 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
401 dccp_event_ack_recv(sk, skb);
402
403 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
404 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
405
406 /*
407 * FIXME: check ECN to see if we should use
408 * DCCP_ACKPKTS_STATE_ECN_MARKED
409 */
410 if (dp->dccps_options.dccpo_send_ack_vector) {
411 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
412 DCCP_SKB_CB(skb)->dccpd_seq,
413 DCCP_ACKPKTS_STATE_RECEIVED))
414 goto discard;
415 /*
416 * FIXME: this activation is probably wrong, have to study more
417 * TCP delack machinery and how it fits into DCCP draft, but
418 * for now it kinda "works" 8)
419 */
420 if (dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1 &&
421 !inet_csk_ack_scheduled(sk)) {
422 inet_csk_schedule_ack(sk);
423 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MIN, TCP_RTO_MAX);
424 }
425 }
426 }
427
428 /*
429 * Step 9: Process Reset
430 * If P.type == Reset,
431 * Tear down connection
432 * S.state := TIMEWAIT
433 * Set TIMEWAIT timer
434 * Drop packet and return
435 */
436 if (dh->dccph_type == DCCP_PKT_RESET) {
437 /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
438 dccp_fin(sk, skb);
439 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
440 return 0;
441 /*
442 * Step 7: Check for unexpected packet types
443 * If (S.is_server and P.type == CloseReq)
444 * or (S.is_server and P.type == Response)
445 * or (S.is_client and P.type == Request)
446 * or (S.state == RESPOND and P.type == Data),
447 * Send Sync packet acknowledging P.seqno
448 * Drop packet and return
449 */
450 } else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
451 (dh->dccph_type == DCCP_PKT_RESPONSE || dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
452 (dp->dccps_role == DCCP_ROLE_CLIENT &&
453 dh->dccph_type == DCCP_PKT_REQUEST) ||
454 (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
455 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq);
456 goto discard;
457 }
458
459 switch (sk->sk_state) {
460 case DCCP_CLOSED:
461 return 1;
462
463 case DCCP_LISTEN:
464 if (dh->dccph_type == DCCP_PKT_ACK ||
465 dh->dccph_type == DCCP_PKT_DATAACK)
466 return 1;
467
468 if (dh->dccph_type == DCCP_PKT_RESET)
469 goto discard;
470
471 if (dh->dccph_type == DCCP_PKT_REQUEST) {
472 if (dccp_v4_conn_request(sk, skb) < 0)
473 return 1;
474
475 /* FIXME: do congestion control initialization */
476 goto discard;
477 }
478 goto discard;
479
480 case DCCP_REQUESTING:
481 /* FIXME: do congestion control initialization */
482
483 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
484 if (queued >= 0)
485 return queued;
486
487 __kfree_skb(skb);
488 return 0;
489
490 case DCCP_RESPOND:
491 case DCCP_PARTOPEN:
492 queued = dccp_rcv_respond_partopen_state_process(sk, skb, dh, len);
493 break;
494 }
495
496 if (dh->dccph_type == DCCP_PKT_ACK || dh->dccph_type == DCCP_PKT_DATAACK) {
497 switch (old_state) {
498 case DCCP_PARTOPEN:
499 sk->sk_state_change(sk);
500 sk_wake_async(sk, 0, POLL_OUT);
501 break;
502 }
503 }
504
505 if (!queued) {
506discard:
507 __kfree_skb(skb);
508 }
509 return 0;
510}