]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/dccp/proto.c
[IPV6]: remove useless test in ip6_append_data
[net-next-2.6.git] / net / dccp / proto.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/config.h>
13#include <linux/dccp.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/in.h>
21#include <linux/if_arp.h>
22#include <linux/init.h>
23#include <linux/random.h>
24#include <net/checksum.h>
25
14c85021 26#include <net/inet_sock.h>
7c657876
ACM
27#include <net/sock.h>
28#include <net/xfrm.h>
29
30#include <asm/semaphore.h>
31#include <linux/spinlock.h>
32#include <linux/timer.h>
33#include <linux/delay.h>
34#include <linux/poll.h>
7c657876
ACM
35
36#include "ccid.h"
37#include "dccp.h"
afe00251 38#include "feat.h"
7c657876 39
ba89966c 40DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
7c657876 41
f21e68ca
ACM
42EXPORT_SYMBOL_GPL(dccp_statistics);
43
7c657876
ACM
44atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
f21e68ca
ACM
46EXPORT_SYMBOL_GPL(dccp_orphan_count);
47
075ae866
ACM
48struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
49 .lhash_lock = RW_LOCK_UNLOCKED,
50 .lhash_users = ATOMIC_INIT(0),
51 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
52};
53
54EXPORT_SYMBOL_GPL(dccp_hashinfo);
55
c25a18ba
ACM
56void dccp_set_state(struct sock *sk, const int state)
57{
58 const int oldstate = sk->sk_state;
59
60 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
61 dccp_role(sk), sk,
62 dccp_state_name(oldstate), dccp_state_name(state));
63 WARN_ON(state == oldstate);
64
65 switch (state) {
66 case DCCP_OPEN:
67 if (oldstate != DCCP_OPEN)
68 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
69 break;
70
71 case DCCP_CLOSED:
72 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
74
75 sk->sk_prot->unhash(sk);
76 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78 inet_put_port(&dccp_hashinfo, sk);
79 /* fall through */
80 default:
81 if (oldstate == DCCP_OPEN)
82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
83 }
84
85 /* Change state AFTER socket is unhashed to avoid closed
86 * socket sitting in hash tables.
87 */
88 sk->sk_state = state;
89}
90
91EXPORT_SYMBOL_GPL(dccp_set_state);
92
93void dccp_done(struct sock *sk)
94{
95 dccp_set_state(sk, DCCP_CLOSED);
96 dccp_clear_xmit_timers(sk);
97
98 sk->sk_shutdown = SHUTDOWN_MASK;
99
100 if (!sock_flag(sk, SOCK_DEAD))
101 sk->sk_state_change(sk);
102 else
103 inet_csk_destroy_sock(sk);
104}
105
106EXPORT_SYMBOL_GPL(dccp_done);
107
7c657876
ACM
108const char *dccp_packet_name(const int type)
109{
110 static const char *dccp_packet_names[] = {
111 [DCCP_PKT_REQUEST] = "REQUEST",
112 [DCCP_PKT_RESPONSE] = "RESPONSE",
113 [DCCP_PKT_DATA] = "DATA",
114 [DCCP_PKT_ACK] = "ACK",
115 [DCCP_PKT_DATAACK] = "DATAACK",
116 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
117 [DCCP_PKT_CLOSE] = "CLOSE",
118 [DCCP_PKT_RESET] = "RESET",
119 [DCCP_PKT_SYNC] = "SYNC",
120 [DCCP_PKT_SYNCACK] = "SYNCACK",
121 };
122
123 if (type >= DCCP_NR_PKT_TYPES)
124 return "INVALID";
125 else
126 return dccp_packet_names[type];
127}
128
129EXPORT_SYMBOL_GPL(dccp_packet_name);
130
131const char *dccp_state_name(const int state)
132{
133 static char *dccp_state_names[] = {
134 [DCCP_OPEN] = "OPEN",
135 [DCCP_REQUESTING] = "REQUESTING",
136 [DCCP_PARTOPEN] = "PARTOPEN",
137 [DCCP_LISTEN] = "LISTEN",
138 [DCCP_RESPOND] = "RESPOND",
139 [DCCP_CLOSING] = "CLOSING",
140 [DCCP_TIME_WAIT] = "TIME_WAIT",
141 [DCCP_CLOSED] = "CLOSED",
142 };
143
144 if (state >= DCCP_MAX_STATES)
145 return "INVALID STATE!";
146 else
147 return dccp_state_names[state];
148}
149
150EXPORT_SYMBOL_GPL(dccp_state_name);
151
c985ed70
ACM
152void dccp_hash(struct sock *sk)
153{
154 inet_hash(&dccp_hashinfo, sk);
155}
156
157EXPORT_SYMBOL_GPL(dccp_hash);
158
159void dccp_unhash(struct sock *sk)
160{
161 inet_unhash(&dccp_hashinfo, sk);
162}
163
164EXPORT_SYMBOL_GPL(dccp_unhash);
165
72478873 166int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
3e0fadc5
ACM
167{
168 struct dccp_sock *dp = dccp_sk(sk);
169 struct inet_connection_sock *icsk = inet_csk(sk);
3e0fadc5
ACM
170
171 dccp_options_init(&dp->dccps_options);
172 do_gettimeofday(&dp->dccps_epoch);
173
174 /*
175 * FIXME: We're hardcoding the CCID, and doing this at this point makes
176 * the listening (master) sock get CCID control blocks, which is not
177 * necessary, but for now, to not mess with the test userspace apps,
178 * lets leave it here, later the real solution is to do this in a
179 * setsockopt(CCIDs-I-want/accept). -acme
180 */
72478873 181 if (likely(ctl_sock_initialized)) {
3e0fadc5
ACM
182 int rc = dccp_feat_init(sk);
183
184 if (rc)
185 return rc;
186
187 if (dp->dccps_options.dccpo_send_ack_vector) {
188 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
189 if (dp->dccps_hc_rx_ackvec == NULL)
190 return -ENOMEM;
191 }
192 dp->dccps_hc_rx_ccid =
193 ccid_hc_rx_new(dp->dccps_options.dccpo_rx_ccid,
194 sk, GFP_KERNEL);
195 dp->dccps_hc_tx_ccid =
196 ccid_hc_tx_new(dp->dccps_options.dccpo_tx_ccid,
197 sk, GFP_KERNEL);
198 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
199 dp->dccps_hc_tx_ccid == NULL)) {
200 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
201 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
202 if (dp->dccps_options.dccpo_send_ack_vector) {
203 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
204 dp->dccps_hc_rx_ackvec = NULL;
205 }
206 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
207 return -ENOMEM;
208 }
209 } else {
210 /* control socket doesn't need feat nego */
211 INIT_LIST_HEAD(&dp->dccps_options.dccpo_pending);
212 INIT_LIST_HEAD(&dp->dccps_options.dccpo_conf);
3e0fadc5
ACM
213 }
214
215 dccp_init_xmit_timers(sk);
216 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
217 sk->sk_state = DCCP_CLOSED;
218 sk->sk_write_space = dccp_write_space;
219 icsk->icsk_sync_mss = dccp_sync_mss;
220 dp->dccps_mss_cache = 536;
221 dp->dccps_role = DCCP_ROLE_UNDEFINED;
222 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
223 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
224
225 return 0;
226}
227
228EXPORT_SYMBOL_GPL(dccp_init_sock);
229
230int dccp_destroy_sock(struct sock *sk)
231{
232 struct dccp_sock *dp = dccp_sk(sk);
233
234 /*
235 * DCCP doesn't use sk_write_queue, just sk_send_head
236 * for retransmissions
237 */
238 if (sk->sk_send_head != NULL) {
239 kfree_skb(sk->sk_send_head);
240 sk->sk_send_head = NULL;
241 }
242
243 /* Clean up a referenced DCCP bind bucket. */
244 if (inet_csk(sk)->icsk_bind_hash != NULL)
245 inet_put_port(&dccp_hashinfo, sk);
246
247 kfree(dp->dccps_service_list);
248 dp->dccps_service_list = NULL;
249
250 if (dp->dccps_options.dccpo_send_ack_vector) {
251 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
252 dp->dccps_hc_rx_ackvec = NULL;
253 }
254 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
255 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
256 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
257
258 /* clean up feature negotiation state */
259 dccp_feat_clean(sk);
260
261 return 0;
262}
263
264EXPORT_SYMBOL_GPL(dccp_destroy_sock);
265
7c657876
ACM
266static inline int dccp_listen_start(struct sock *sk)
267{
67e6b629
ACM
268 struct dccp_sock *dp = dccp_sk(sk);
269
270 dp->dccps_role = DCCP_ROLE_LISTEN;
271 /*
272 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
273 * before calling listen()
274 */
275 if (dccp_service_not_initialized(sk))
276 return -EPROTO;
7c657876
ACM
277 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
278}
279
280int dccp_disconnect(struct sock *sk, int flags)
281{
282 struct inet_connection_sock *icsk = inet_csk(sk);
283 struct inet_sock *inet = inet_sk(sk);
284 int err = 0;
285 const int old_state = sk->sk_state;
286
287 if (old_state != DCCP_CLOSED)
288 dccp_set_state(sk, DCCP_CLOSED);
289
290 /* ABORT function of RFC793 */
291 if (old_state == DCCP_LISTEN) {
292 inet_csk_listen_stop(sk);
293 /* FIXME: do the active reset thing */
294 } else if (old_state == DCCP_REQUESTING)
295 sk->sk_err = ECONNRESET;
296
297 dccp_clear_xmit_timers(sk);
298 __skb_queue_purge(&sk->sk_receive_queue);
299 if (sk->sk_send_head != NULL) {
300 __kfree_skb(sk->sk_send_head);
301 sk->sk_send_head = NULL;
302 }
303
304 inet->dport = 0;
305
306 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
307 inet_reset_saddr(sk);
308
309 sk->sk_shutdown = 0;
310 sock_reset_flag(sk, SOCK_DONE);
311
312 icsk->icsk_backoff = 0;
313 inet_csk_delack_init(sk);
314 __sk_dst_reset(sk);
315
316 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
317
318 sk->sk_error_report(sk);
319 return err;
320}
321
f21e68ca
ACM
322EXPORT_SYMBOL_GPL(dccp_disconnect);
323
331968bd
ACM
324/*
325 * Wait for a DCCP event.
326 *
327 * Note that we don't need to lock the socket, as the upper poll layers
328 * take care of normal races (between the test and the event) and we don't
329 * go look at any of the socket buffers directly.
330 */
f21e68ca
ACM
331unsigned int dccp_poll(struct file *file, struct socket *sock,
332 poll_table *wait)
331968bd
ACM
333{
334 unsigned int mask;
335 struct sock *sk = sock->sk;
336
337 poll_wait(file, sk->sk_sleep, wait);
338 if (sk->sk_state == DCCP_LISTEN)
339 return inet_csk_listen_poll(sk);
340
341 /* Socket is not locked. We are protected from async events
342 by poll logic and correct handling of state changes
343 made by another threads is impossible in any case.
344 */
345
346 mask = 0;
347 if (sk->sk_err)
348 mask = POLLERR;
349
350 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
351 mask |= POLLHUP;
352 if (sk->sk_shutdown & RCV_SHUTDOWN)
353 mask |= POLLIN | POLLRDNORM;
354
355 /* Connected? */
356 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
357 if (atomic_read(&sk->sk_rmem_alloc) > 0)
358 mask |= POLLIN | POLLRDNORM;
359
360 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
361 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
362 mask |= POLLOUT | POLLWRNORM;
363 } else { /* send SIGIO later */
364 set_bit(SOCK_ASYNC_NOSPACE,
365 &sk->sk_socket->flags);
366 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
367
368 /* Race breaker. If space is freed after
369 * wspace test but before the flags are set,
370 * IO signal will be lost.
371 */
372 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
373 mask |= POLLOUT | POLLWRNORM;
374 }
375 }
376 }
377 return mask;
378}
379
f21e68ca
ACM
380EXPORT_SYMBOL_GPL(dccp_poll);
381
7c657876
ACM
382int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
383{
384 dccp_pr_debug("entry\n");
385 return -ENOIOCTLCMD;
386}
387
f21e68ca
ACM
388EXPORT_SYMBOL_GPL(dccp_ioctl);
389
60fe62e7 390static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
67e6b629
ACM
391 char __user *optval, int optlen)
392{
393 struct dccp_sock *dp = dccp_sk(sk);
394 struct dccp_service_list *sl = NULL;
395
396 if (service == DCCP_SERVICE_INVALID_VALUE ||
397 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
398 return -EINVAL;
399
400 if (optlen > sizeof(service)) {
401 sl = kmalloc(optlen, GFP_KERNEL);
402 if (sl == NULL)
403 return -ENOMEM;
404
405 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
406 if (copy_from_user(sl->dccpsl_list,
407 optval + sizeof(service),
408 optlen - sizeof(service)) ||
409 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
410 kfree(sl);
411 return -EFAULT;
412 }
413 }
414
415 lock_sock(sk);
416 dp->dccps_service = service;
417
a51482bd 418 kfree(dp->dccps_service_list);
67e6b629
ACM
419
420 dp->dccps_service_list = sl;
421 release_sock(sk);
422 return 0;
423}
424
afe00251
AB
425/* byte 1 is feature. the rest is the preference list */
426static int dccp_setsockopt_change(struct sock *sk, int type,
427 struct dccp_so_feat __user *optval)
428{
429 struct dccp_so_feat opt;
430 u8 *val;
431 int rc;
432
433 if (copy_from_user(&opt, optval, sizeof(opt)))
434 return -EFAULT;
435
436 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
437 if (!val)
438 return -ENOMEM;
439
440 if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
441 rc = -EFAULT;
442 goto out_free_val;
443 }
444
445 rc = dccp_feat_change(sk, type, opt.dccpsf_feat, val, opt.dccpsf_len,
446 GFP_KERNEL);
447 if (rc)
448 goto out_free_val;
449
450out:
451 return rc;
452
453out_free_val:
454 kfree(val);
455 goto out;
456}
457
7c657876 458int dccp_setsockopt(struct sock *sk, int level, int optname,
a1d3a355 459 char __user *optval, int optlen)
7c657876 460{
a84ffe43
ACM
461 struct dccp_sock *dp;
462 int err;
463 int val;
7c657876
ACM
464
465 if (level != SOL_DCCP)
57cca05a
ACM
466 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
467 optname, optval,
468 optlen);
7c657876 469
a84ffe43
ACM
470 if (optlen < sizeof(int))
471 return -EINVAL;
472
473 if (get_user(val, (int __user *)optval))
474 return -EFAULT;
475
67e6b629
ACM
476 if (optname == DCCP_SOCKOPT_SERVICE)
477 return dccp_setsockopt_service(sk, val, optval, optlen);
a84ffe43 478
67e6b629 479 lock_sock(sk);
a84ffe43
ACM
480 dp = dccp_sk(sk);
481 err = 0;
482
483 switch (optname) {
484 case DCCP_SOCKOPT_PACKET_SIZE:
485 dp->dccps_packet_size = val;
486 break;
afe00251
AB
487
488 case DCCP_SOCKOPT_CHANGE_L:
489 if (optlen != sizeof(struct dccp_so_feat))
490 err = -EINVAL;
491 else
492 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
493 (struct dccp_so_feat *)
494 optval);
495 break;
496
497 case DCCP_SOCKOPT_CHANGE_R:
498 if (optlen != sizeof(struct dccp_so_feat))
499 err = -EINVAL;
500 else
501 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
502 (struct dccp_so_feat *)
503 optval);
504 break;
505
a84ffe43
ACM
506 default:
507 err = -ENOPROTOOPT;
508 break;
509 }
510
511 release_sock(sk);
512 return err;
7c657876
ACM
513}
514
f21e68ca
ACM
515EXPORT_SYMBOL_GPL(dccp_setsockopt);
516
67e6b629 517static int dccp_getsockopt_service(struct sock *sk, int len,
60fe62e7 518 __be32 __user *optval,
67e6b629
ACM
519 int __user *optlen)
520{
521 const struct dccp_sock *dp = dccp_sk(sk);
522 const struct dccp_service_list *sl;
523 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
524
525 lock_sock(sk);
526 if (dccp_service_not_initialized(sk))
527 goto out;
528
529 if ((sl = dp->dccps_service_list) != NULL) {
530 slen = sl->dccpsl_nr * sizeof(u32);
531 total_len += slen;
532 }
533
534 err = -EINVAL;
535 if (total_len > len)
536 goto out;
537
538 err = 0;
539 if (put_user(total_len, optlen) ||
540 put_user(dp->dccps_service, optval) ||
541 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
542 err = -EFAULT;
543out:
544 release_sock(sk);
545 return err;
546}
547
7c657876 548int dccp_getsockopt(struct sock *sk, int level, int optname,
a1d3a355 549 char __user *optval, int __user *optlen)
7c657876 550{
a84ffe43
ACM
551 struct dccp_sock *dp;
552 int val, len;
7c657876
ACM
553
554 if (level != SOL_DCCP)
57cca05a
ACM
555 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
556 optname, optval,
557 optlen);
a84ffe43
ACM
558 if (get_user(len, optlen))
559 return -EFAULT;
560
88f964db 561 if (len < sizeof(int))
a84ffe43
ACM
562 return -EINVAL;
563
564 dp = dccp_sk(sk);
565
566 switch (optname) {
567 case DCCP_SOCKOPT_PACKET_SIZE:
568 val = dp->dccps_packet_size;
88f964db 569 len = sizeof(dp->dccps_packet_size);
a84ffe43 570 break;
88f964db
ACM
571 case DCCP_SOCKOPT_SERVICE:
572 return dccp_getsockopt_service(sk, len,
60fe62e7 573 (__be32 __user *)optval, optlen);
88f964db
ACM
574 case 128 ... 191:
575 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
576 len, (u32 __user *)optval, optlen);
577 case 192 ... 255:
578 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
579 len, (u32 __user *)optval, optlen);
a84ffe43
ACM
580 default:
581 return -ENOPROTOOPT;
582 }
583
584 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
585 return -EFAULT;
586
587 return 0;
7c657876
ACM
588}
589
f21e68ca
ACM
590EXPORT_SYMBOL_GPL(dccp_getsockopt);
591
7c657876
ACM
592int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
593 size_t len)
594{
595 const struct dccp_sock *dp = dccp_sk(sk);
596 const int flags = msg->msg_flags;
597 const int noblock = flags & MSG_DONTWAIT;
598 struct sk_buff *skb;
599 int rc, size;
600 long timeo;
601
602 if (len > dp->dccps_mss_cache)
603 return -EMSGSIZE;
604
605 lock_sock(sk);
27258ee5 606 timeo = sock_sndtimeo(sk, noblock);
7c657876
ACM
607
608 /*
609 * We have to use sk_stream_wait_connect here to set sk_write_pending,
610 * so that the trick in dccp_rcv_request_sent_state_process.
611 */
612 /* Wait for a connection to finish. */
613 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
614 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
27258ee5 615 goto out_release;
7c657876
ACM
616
617 size = sk->sk_prot->max_header + len;
618 release_sock(sk);
619 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
620 lock_sock(sk);
7c657876
ACM
621 if (skb == NULL)
622 goto out_release;
623
624 skb_reserve(skb, sk->sk_prot->max_header);
625 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
27258ee5
ACM
626 if (rc != 0)
627 goto out_discard;
628
d6809c12 629 rc = dccp_write_xmit(sk, skb, &timeo);
20472af9
ACM
630 /*
631 * XXX we don't use sk_write_queue, so just discard the packet.
632 * Current plan however is to _use_ sk_write_queue with
633 * an algorith similar to tcp_sendmsg, where the main difference
634 * is that in DCCP we have to respect packet boundaries, so
635 * no coalescing of skbs.
636 *
637 * This bug was _quickly_ found & fixed by just looking at an OSTRA
638 * generated callgraph 8) -acme
639 */
7c657876
ACM
640out_release:
641 release_sock(sk);
642 return rc ? : len;
27258ee5
ACM
643out_discard:
644 kfree_skb(skb);
7c657876 645 goto out_release;
7c657876
ACM
646}
647
f21e68ca
ACM
648EXPORT_SYMBOL_GPL(dccp_sendmsg);
649
7c657876
ACM
650int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
651 size_t len, int nonblock, int flags, int *addr_len)
652{
653 const struct dccp_hdr *dh;
7c657876
ACM
654 long timeo;
655
656 lock_sock(sk);
657
531669a0
ACM
658 if (sk->sk_state == DCCP_LISTEN) {
659 len = -ENOTCONN;
7c657876 660 goto out;
7c657876 661 }
7c657876 662
531669a0 663 timeo = sock_rcvtimeo(sk, nonblock);
7c657876
ACM
664
665 do {
531669a0 666 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
7c657876 667
531669a0
ACM
668 if (skb == NULL)
669 goto verify_sock_status;
7c657876 670
531669a0 671 dh = dccp_hdr(skb);
7c657876 672
531669a0
ACM
673 if (dh->dccph_type == DCCP_PKT_DATA ||
674 dh->dccph_type == DCCP_PKT_DATAACK)
675 goto found_ok_skb;
7c657876 676
531669a0
ACM
677 if (dh->dccph_type == DCCP_PKT_RESET ||
678 dh->dccph_type == DCCP_PKT_CLOSE) {
679 dccp_pr_debug("found fin ok!\n");
680 len = 0;
681 goto found_fin_ok;
682 }
683 dccp_pr_debug("packet_type=%s\n",
684 dccp_packet_name(dh->dccph_type));
685 sk_eat_skb(sk, skb);
686verify_sock_status:
687 if (sock_flag(sk, SOCK_DONE)) {
688 len = 0;
7c657876 689 break;
531669a0 690 }
7c657876 691
531669a0
ACM
692 if (sk->sk_err) {
693 len = sock_error(sk);
694 break;
695 }
7c657876 696
531669a0
ACM
697 if (sk->sk_shutdown & RCV_SHUTDOWN) {
698 len = 0;
699 break;
700 }
7c657876 701
531669a0
ACM
702 if (sk->sk_state == DCCP_CLOSED) {
703 if (!sock_flag(sk, SOCK_DONE)) {
704 /* This occurs when user tries to read
705 * from never connected socket.
706 */
707 len = -ENOTCONN;
7c657876
ACM
708 break;
709 }
531669a0
ACM
710 len = 0;
711 break;
7c657876
ACM
712 }
713
531669a0
ACM
714 if (!timeo) {
715 len = -EAGAIN;
716 break;
717 }
7c657876 718
531669a0
ACM
719 if (signal_pending(current)) {
720 len = sock_intr_errno(timeo);
721 break;
722 }
7c657876 723
531669a0 724 sk_wait_data(sk, &timeo);
7c657876 725 continue;
7c657876 726 found_ok_skb:
531669a0
ACM
727 if (len > skb->len)
728 len = skb->len;
729 else if (len < skb->len)
730 msg->msg_flags |= MSG_TRUNC;
731
732 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
733 /* Exception. Bailout! */
734 len = -EFAULT;
735 break;
7c657876 736 }
7c657876
ACM
737 found_fin_ok:
738 if (!(flags & MSG_PEEK))
739 sk_eat_skb(sk, skb);
740 break;
531669a0 741 } while (1);
7c657876
ACM
742out:
743 release_sock(sk);
531669a0 744 return len;
7c657876
ACM
745}
746
f21e68ca
ACM
747EXPORT_SYMBOL_GPL(dccp_recvmsg);
748
749int inet_dccp_listen(struct socket *sock, int backlog)
7c657876
ACM
750{
751 struct sock *sk = sock->sk;
752 unsigned char old_state;
753 int err;
754
755 lock_sock(sk);
756
757 err = -EINVAL;
758 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
759 goto out;
760
761 old_state = sk->sk_state;
762 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
763 goto out;
764
765 /* Really, if the socket is already in listen state
766 * we can only allow the backlog to be adjusted.
767 */
768 if (old_state != DCCP_LISTEN) {
769 /*
770 * FIXME: here it probably should be sk->sk_prot->listen_start
771 * see tcp_listen_start
772 */
773 err = dccp_listen_start(sk);
774 if (err)
775 goto out;
776 }
777 sk->sk_max_ack_backlog = backlog;
778 err = 0;
779
780out:
781 release_sock(sk);
782 return err;
783}
784
f21e68ca
ACM
785EXPORT_SYMBOL_GPL(inet_dccp_listen);
786
7c657876 787static const unsigned char dccp_new_state[] = {
7690af3f
ACM
788 /* current state: new state: action: */
789 [0] = DCCP_CLOSED,
790 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
791 [DCCP_REQUESTING] = DCCP_CLOSED,
792 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
793 [DCCP_LISTEN] = DCCP_CLOSED,
794 [DCCP_RESPOND] = DCCP_CLOSED,
795 [DCCP_CLOSING] = DCCP_CLOSED,
796 [DCCP_TIME_WAIT] = DCCP_CLOSED,
797 [DCCP_CLOSED] = DCCP_CLOSED,
7c657876
ACM
798};
799
800static int dccp_close_state(struct sock *sk)
801{
802 const int next = dccp_new_state[sk->sk_state];
803 const int ns = next & DCCP_STATE_MASK;
804
805 if (ns != sk->sk_state)
806 dccp_set_state(sk, ns);
807
808 return next & DCCP_ACTION_FIN;
809}
810
811void dccp_close(struct sock *sk, long timeout)
812{
813 struct sk_buff *skb;
814
815 lock_sock(sk);
816
817 sk->sk_shutdown = SHUTDOWN_MASK;
818
819 if (sk->sk_state == DCCP_LISTEN) {
820 dccp_set_state(sk, DCCP_CLOSED);
821
822 /* Special case. */
823 inet_csk_listen_stop(sk);
824
825 goto adjudge_to_death;
826 }
827
828 /*
829 * We need to flush the recv. buffs. We do this only on the
830 * descriptor close, not protocol-sourced closes, because the
831 *reader process may not have drained the data yet!
832 */
833 /* FIXME: check for unread data */
834 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
835 __kfree_skb(skb);
836 }
837
838 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
839 /* Check zero linger _after_ checking for unread data. */
840 sk->sk_prot->disconnect(sk, 0);
841 } else if (dccp_close_state(sk)) {
7ad07e7c 842 dccp_send_close(sk, 1);
7c657876
ACM
843 }
844
845 sk_stream_wait_close(sk, timeout);
846
847adjudge_to_death:
7ad07e7c
ACM
848 /*
849 * It is the last release_sock in its life. It will remove backlog.
850 */
7c657876
ACM
851 release_sock(sk);
852 /*
853 * Now socket is owned by kernel and we acquire BH lock
854 * to finish close. No need to check for user refs.
855 */
856 local_bh_disable();
857 bh_lock_sock(sk);
858 BUG_TRAP(!sock_owned_by_user(sk));
859
860 sock_hold(sk);
861 sock_orphan(sk);
7ad07e7c
ACM
862
863 /*
864 * The last release_sock may have processed the CLOSE or RESET
865 * packet moving sock to CLOSED state, if not we have to fire
866 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
867 * in draft-ietf-dccp-spec-11. -acme
868 */
869 if (sk->sk_state == DCCP_CLOSING) {
870 /* FIXME: should start at 2 * RTT */
871 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
872 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
873 inet_csk(sk)->icsk_rto,
874 DCCP_RTO_MAX);
875#if 0
876 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
7c657876 877 dccp_set_state(sk, DCCP_CLOSED);
7ad07e7c
ACM
878#endif
879 }
7c657876 880
7ad07e7c 881 atomic_inc(sk->sk_prot->orphan_count);
7c657876
ACM
882 if (sk->sk_state == DCCP_CLOSED)
883 inet_csk_destroy_sock(sk);
884
885 /* Otherwise, socket is reprieved until protocol close. */
886
887 bh_unlock_sock(sk);
888 local_bh_enable();
889 sock_put(sk);
890}
891
f21e68ca
ACM
892EXPORT_SYMBOL_GPL(dccp_close);
893
7c657876
ACM
894void dccp_shutdown(struct sock *sk, int how)
895{
896 dccp_pr_debug("entry\n");
897}
898
f21e68ca
ACM
899EXPORT_SYMBOL_GPL(dccp_shutdown);
900
46f09ffa 901static int __init dccp_mib_init(void)
7c657876
ACM
902{
903 int rc = -ENOMEM;
904
905 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
906 if (dccp_statistics[0] == NULL)
907 goto out;
908
909 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
910 if (dccp_statistics[1] == NULL)
911 goto out_free_one;
912
913 rc = 0;
914out:
915 return rc;
916out_free_one:
917 free_percpu(dccp_statistics[0]);
918 dccp_statistics[0] = NULL;
919 goto out;
920
921}
922
b61fafc4 923static void dccp_mib_exit(void)
46f09ffa
ACM
924{
925 free_percpu(dccp_statistics[0]);
926 free_percpu(dccp_statistics[1]);
927 dccp_statistics[0] = dccp_statistics[1] = NULL;
928}
929
7c657876
ACM
930static int thash_entries;
931module_param(thash_entries, int, 0444);
932MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
933
a1d3a355 934#ifdef CONFIG_IP_DCCP_DEBUG
7c657876
ACM
935int dccp_debug;
936module_param(dccp_debug, int, 0444);
937MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
f21e68ca
ACM
938
939EXPORT_SYMBOL_GPL(dccp_debug);
a1d3a355 940#endif
7c657876
ACM
941
942static int __init dccp_init(void)
943{
944 unsigned long goal;
945 int ehash_order, bhash_order, i;
b61fafc4 946 int rc = -ENOBUFS;
7c657876 947
7690af3f
ACM
948 dccp_hashinfo.bind_bucket_cachep =
949 kmem_cache_create("dccp_bind_bucket",
950 sizeof(struct inet_bind_bucket), 0,
951 SLAB_HWCACHE_ALIGN, NULL, NULL);
7c657876 952 if (!dccp_hashinfo.bind_bucket_cachep)
b61fafc4 953 goto out;
7c657876
ACM
954
955 /*
956 * Size and allocate the main established and bind bucket
957 * hash tables.
958 *
959 * The methodology is similar to that of the buffer cache.
960 */
961 if (num_physpages >= (128 * 1024))
962 goal = num_physpages >> (21 - PAGE_SHIFT);
963 else
964 goal = num_physpages >> (23 - PAGE_SHIFT);
965
966 if (thash_entries)
7690af3f
ACM
967 goal = (thash_entries *
968 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
7c657876
ACM
969 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
970 ;
971 do {
972 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
973 sizeof(struct inet_ehash_bucket);
974 dccp_hashinfo.ehash_size >>= 1;
7690af3f
ACM
975 while (dccp_hashinfo.ehash_size &
976 (dccp_hashinfo.ehash_size - 1))
7c657876
ACM
977 dccp_hashinfo.ehash_size--;
978 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
979 __get_free_pages(GFP_ATOMIC, ehash_order);
980 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
981
982 if (!dccp_hashinfo.ehash) {
983 printk(KERN_CRIT "Failed to allocate DCCP "
984 "established hash table\n");
985 goto out_free_bind_bucket_cachep;
986 }
987
988 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
989 rwlock_init(&dccp_hashinfo.ehash[i].lock);
990 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
991 }
992
993 bhash_order = ehash_order;
994
995 do {
996 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
997 sizeof(struct inet_bind_hashbucket);
7690af3f
ACM
998 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
999 bhash_order > 0)
7c657876
ACM
1000 continue;
1001 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1002 __get_free_pages(GFP_ATOMIC, bhash_order);
1003 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1004
1005 if (!dccp_hashinfo.bhash) {
1006 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
1007 goto out_free_dccp_ehash;
1008 }
1009
1010 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1011 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1012 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1013 }
1014
46f09ffa 1015 rc = dccp_mib_init();
fa23e2ec 1016 if (rc)
7c657876
ACM
1017 goto out_free_dccp_bhash;
1018
9b07ef5d 1019 rc = dccp_ackvec_init();
7c657876 1020 if (rc)
b61fafc4 1021 goto out_free_dccp_mib;
9b07ef5d 1022
e55d912f 1023 rc = dccp_sysctl_init();
9b07ef5d
ACM
1024 if (rc)
1025 goto out_ackvec_exit;
7c657876
ACM
1026out:
1027 return rc;
9b07ef5d
ACM
1028out_ackvec_exit:
1029 dccp_ackvec_exit();
b61fafc4 1030out_free_dccp_mib:
46f09ffa 1031 dccp_mib_exit();
7c657876
ACM
1032out_free_dccp_bhash:
1033 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1034 dccp_hashinfo.bhash = NULL;
1035out_free_dccp_ehash:
1036 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1037 dccp_hashinfo.ehash = NULL;
1038out_free_bind_bucket_cachep:
1039 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1040 dccp_hashinfo.bind_bucket_cachep = NULL;
7c657876
ACM
1041 goto out;
1042}
1043
7c657876
ACM
1044static void __exit dccp_fini(void)
1045{
46f09ffa 1046 dccp_mib_exit();
725ba8ee
ACM
1047 free_pages((unsigned long)dccp_hashinfo.bhash,
1048 get_order(dccp_hashinfo.bhash_size *
1049 sizeof(struct inet_bind_hashbucket)));
1050 free_pages((unsigned long)dccp_hashinfo.ehash,
1051 get_order(dccp_hashinfo.ehash_size *
1052 sizeof(struct inet_ehash_bucket)));
7c657876 1053 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
9b07ef5d 1054 dccp_ackvec_exit();
e55d912f 1055 dccp_sysctl_exit();
7c657876
ACM
1056}
1057
1058module_init(dccp_init);
1059module_exit(dccp_fini);
1060
7c657876
ACM
1061MODULE_LICENSE("GPL");
1062MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1063MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");