]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/dccp/proto.c
[DCCP]: Shift sysctls into feat.h
[net-next-2.6.git] / net / dccp / proto.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
7c657876
ACM
12#include <linux/dccp.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/in.h>
20#include <linux/if_arp.h>
21#include <linux/init.h>
22#include <linux/random.h>
23#include <net/checksum.h>
24
14c85021 25#include <net/inet_sock.h>
7c657876
ACM
26#include <net/sock.h>
27#include <net/xfrm.h>
28
29#include <asm/semaphore.h>
30#include <linux/spinlock.h>
31#include <linux/timer.h>
32#include <linux/delay.h>
33#include <linux/poll.h>
7c657876
ACM
34
35#include "ccid.h"
36#include "dccp.h"
afe00251 37#include "feat.h"
7c657876 38
ba89966c 39DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
7c657876 40
f21e68ca
ACM
41EXPORT_SYMBOL_GPL(dccp_statistics);
42
7c657876
ACM
43atomic_t dccp_orphan_count = ATOMIC_INIT(0);
44
f21e68ca
ACM
45EXPORT_SYMBOL_GPL(dccp_orphan_count);
46
075ae866
ACM
47struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
48 .lhash_lock = RW_LOCK_UNLOCKED,
49 .lhash_users = ATOMIC_INIT(0),
50 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
51};
52
53EXPORT_SYMBOL_GPL(dccp_hashinfo);
54
c25a18ba
ACM
55void dccp_set_state(struct sock *sk, const int state)
56{
57 const int oldstate = sk->sk_state;
58
59 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
60 dccp_role(sk), sk,
61 dccp_state_name(oldstate), dccp_state_name(state));
62 WARN_ON(state == oldstate);
63
64 switch (state) {
65 case DCCP_OPEN:
66 if (oldstate != DCCP_OPEN)
67 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
68 break;
69
70 case DCCP_CLOSED:
71 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
72 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
73
74 sk->sk_prot->unhash(sk);
75 if (inet_csk(sk)->icsk_bind_hash != NULL &&
76 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
77 inet_put_port(&dccp_hashinfo, sk);
78 /* fall through */
79 default:
80 if (oldstate == DCCP_OPEN)
81 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
82 }
83
84 /* Change state AFTER socket is unhashed to avoid closed
85 * socket sitting in hash tables.
86 */
87 sk->sk_state = state;
88}
89
90EXPORT_SYMBOL_GPL(dccp_set_state);
91
92void dccp_done(struct sock *sk)
93{
94 dccp_set_state(sk, DCCP_CLOSED);
95 dccp_clear_xmit_timers(sk);
96
97 sk->sk_shutdown = SHUTDOWN_MASK;
98
99 if (!sock_flag(sk, SOCK_DEAD))
100 sk->sk_state_change(sk);
101 else
102 inet_csk_destroy_sock(sk);
103}
104
105EXPORT_SYMBOL_GPL(dccp_done);
106
7c657876
ACM
107const char *dccp_packet_name(const int type)
108{
109 static const char *dccp_packet_names[] = {
110 [DCCP_PKT_REQUEST] = "REQUEST",
111 [DCCP_PKT_RESPONSE] = "RESPONSE",
112 [DCCP_PKT_DATA] = "DATA",
113 [DCCP_PKT_ACK] = "ACK",
114 [DCCP_PKT_DATAACK] = "DATAACK",
115 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
116 [DCCP_PKT_CLOSE] = "CLOSE",
117 [DCCP_PKT_RESET] = "RESET",
118 [DCCP_PKT_SYNC] = "SYNC",
119 [DCCP_PKT_SYNCACK] = "SYNCACK",
120 };
121
122 if (type >= DCCP_NR_PKT_TYPES)
123 return "INVALID";
124 else
125 return dccp_packet_names[type];
126}
127
128EXPORT_SYMBOL_GPL(dccp_packet_name);
129
130const char *dccp_state_name(const int state)
131{
132 static char *dccp_state_names[] = {
133 [DCCP_OPEN] = "OPEN",
134 [DCCP_REQUESTING] = "REQUESTING",
135 [DCCP_PARTOPEN] = "PARTOPEN",
136 [DCCP_LISTEN] = "LISTEN",
137 [DCCP_RESPOND] = "RESPOND",
138 [DCCP_CLOSING] = "CLOSING",
139 [DCCP_TIME_WAIT] = "TIME_WAIT",
140 [DCCP_CLOSED] = "CLOSED",
141 };
142
143 if (state >= DCCP_MAX_STATES)
144 return "INVALID STATE!";
145 else
146 return dccp_state_names[state];
147}
148
149EXPORT_SYMBOL_GPL(dccp_state_name);
150
c985ed70
ACM
151void dccp_hash(struct sock *sk)
152{
153 inet_hash(&dccp_hashinfo, sk);
154}
155
156EXPORT_SYMBOL_GPL(dccp_hash);
157
158void dccp_unhash(struct sock *sk)
159{
160 inet_unhash(&dccp_hashinfo, sk);
161}
162
163EXPORT_SYMBOL_GPL(dccp_unhash);
164
72478873 165int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
3e0fadc5
ACM
166{
167 struct dccp_sock *dp = dccp_sk(sk);
a4bf3902 168 struct dccp_minisock *dmsk = dccp_msk(sk);
3e0fadc5 169 struct inet_connection_sock *icsk = inet_csk(sk);
3e0fadc5 170
a4bf3902 171 dccp_minisock_init(&dp->dccps_minisock);
3e0fadc5
ACM
172 do_gettimeofday(&dp->dccps_epoch);
173
174 /*
175 * FIXME: We're hardcoding the CCID, and doing this at this point makes
176 * the listening (master) sock get CCID control blocks, which is not
177 * necessary, but for now, to not mess with the test userspace apps,
178 * lets leave it here, later the real solution is to do this in a
179 * setsockopt(CCIDs-I-want/accept). -acme
180 */
72478873 181 if (likely(ctl_sock_initialized)) {
8ca0d17b 182 int rc = dccp_feat_init(dmsk);
3e0fadc5
ACM
183
184 if (rc)
185 return rc;
186
a4bf3902 187 if (dmsk->dccpms_send_ack_vector) {
3e0fadc5
ACM
188 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
189 if (dp->dccps_hc_rx_ackvec == NULL)
190 return -ENOMEM;
191 }
a4bf3902
ACM
192 dp->dccps_hc_rx_ccid = ccid_hc_rx_new(dmsk->dccpms_rx_ccid,
193 sk, GFP_KERNEL);
194 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
195 sk, GFP_KERNEL);
3e0fadc5
ACM
196 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
197 dp->dccps_hc_tx_ccid == NULL)) {
198 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
199 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
a4bf3902 200 if (dmsk->dccpms_send_ack_vector) {
3e0fadc5
ACM
201 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
202 dp->dccps_hc_rx_ackvec = NULL;
203 }
204 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
205 return -ENOMEM;
206 }
207 } else {
208 /* control socket doesn't need feat nego */
a4bf3902
ACM
209 INIT_LIST_HEAD(&dmsk->dccpms_pending);
210 INIT_LIST_HEAD(&dmsk->dccpms_conf);
3e0fadc5
ACM
211 }
212
213 dccp_init_xmit_timers(sk);
214 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
215 sk->sk_state = DCCP_CLOSED;
216 sk->sk_write_space = dccp_write_space;
217 icsk->icsk_sync_mss = dccp_sync_mss;
218 dp->dccps_mss_cache = 536;
219 dp->dccps_role = DCCP_ROLE_UNDEFINED;
220 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
221 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
222
223 return 0;
224}
225
226EXPORT_SYMBOL_GPL(dccp_init_sock);
227
228int dccp_destroy_sock(struct sock *sk)
229{
230 struct dccp_sock *dp = dccp_sk(sk);
8ca0d17b 231 struct dccp_minisock *dmsk = dccp_msk(sk);
3e0fadc5
ACM
232
233 /*
234 * DCCP doesn't use sk_write_queue, just sk_send_head
235 * for retransmissions
236 */
237 if (sk->sk_send_head != NULL) {
238 kfree_skb(sk->sk_send_head);
239 sk->sk_send_head = NULL;
240 }
241
242 /* Clean up a referenced DCCP bind bucket. */
243 if (inet_csk(sk)->icsk_bind_hash != NULL)
244 inet_put_port(&dccp_hashinfo, sk);
245
246 kfree(dp->dccps_service_list);
247 dp->dccps_service_list = NULL;
248
8ca0d17b 249 if (dmsk->dccpms_send_ack_vector) {
3e0fadc5
ACM
250 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
251 dp->dccps_hc_rx_ackvec = NULL;
252 }
253 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
254 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
255 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
256
257 /* clean up feature negotiation state */
8ca0d17b 258 dccp_feat_clean(dmsk);
3e0fadc5
ACM
259
260 return 0;
261}
262
263EXPORT_SYMBOL_GPL(dccp_destroy_sock);
264
7c657876
ACM
265static inline int dccp_listen_start(struct sock *sk)
266{
67e6b629
ACM
267 struct dccp_sock *dp = dccp_sk(sk);
268
269 dp->dccps_role = DCCP_ROLE_LISTEN;
270 /*
271 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
272 * before calling listen()
273 */
274 if (dccp_service_not_initialized(sk))
275 return -EPROTO;
7c657876
ACM
276 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
277}
278
279int dccp_disconnect(struct sock *sk, int flags)
280{
281 struct inet_connection_sock *icsk = inet_csk(sk);
282 struct inet_sock *inet = inet_sk(sk);
283 int err = 0;
284 const int old_state = sk->sk_state;
285
286 if (old_state != DCCP_CLOSED)
287 dccp_set_state(sk, DCCP_CLOSED);
288
289 /* ABORT function of RFC793 */
290 if (old_state == DCCP_LISTEN) {
291 inet_csk_listen_stop(sk);
292 /* FIXME: do the active reset thing */
293 } else if (old_state == DCCP_REQUESTING)
294 sk->sk_err = ECONNRESET;
295
296 dccp_clear_xmit_timers(sk);
297 __skb_queue_purge(&sk->sk_receive_queue);
298 if (sk->sk_send_head != NULL) {
299 __kfree_skb(sk->sk_send_head);
300 sk->sk_send_head = NULL;
301 }
302
303 inet->dport = 0;
304
305 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
306 inet_reset_saddr(sk);
307
308 sk->sk_shutdown = 0;
309 sock_reset_flag(sk, SOCK_DONE);
310
311 icsk->icsk_backoff = 0;
312 inet_csk_delack_init(sk);
313 __sk_dst_reset(sk);
314
315 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
316
317 sk->sk_error_report(sk);
318 return err;
319}
320
f21e68ca
ACM
321EXPORT_SYMBOL_GPL(dccp_disconnect);
322
331968bd
ACM
323/*
324 * Wait for a DCCP event.
325 *
326 * Note that we don't need to lock the socket, as the upper poll layers
327 * take care of normal races (between the test and the event) and we don't
328 * go look at any of the socket buffers directly.
329 */
f21e68ca
ACM
330unsigned int dccp_poll(struct file *file, struct socket *sock,
331 poll_table *wait)
331968bd
ACM
332{
333 unsigned int mask;
334 struct sock *sk = sock->sk;
335
336 poll_wait(file, sk->sk_sleep, wait);
337 if (sk->sk_state == DCCP_LISTEN)
338 return inet_csk_listen_poll(sk);
339
340 /* Socket is not locked. We are protected from async events
341 by poll logic and correct handling of state changes
342 made by another threads is impossible in any case.
343 */
344
345 mask = 0;
346 if (sk->sk_err)
347 mask = POLLERR;
348
349 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
350 mask |= POLLHUP;
351 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 352 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
331968bd
ACM
353
354 /* Connected? */
355 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
356 if (atomic_read(&sk->sk_rmem_alloc) > 0)
357 mask |= POLLIN | POLLRDNORM;
358
359 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
360 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
361 mask |= POLLOUT | POLLWRNORM;
362 } else { /* send SIGIO later */
363 set_bit(SOCK_ASYNC_NOSPACE,
364 &sk->sk_socket->flags);
365 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
366
367 /* Race breaker. If space is freed after
368 * wspace test but before the flags are set,
369 * IO signal will be lost.
370 */
371 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
372 mask |= POLLOUT | POLLWRNORM;
373 }
374 }
375 }
376 return mask;
377}
378
f21e68ca
ACM
379EXPORT_SYMBOL_GPL(dccp_poll);
380
7c657876
ACM
381int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
382{
383 dccp_pr_debug("entry\n");
384 return -ENOIOCTLCMD;
385}
386
f21e68ca
ACM
387EXPORT_SYMBOL_GPL(dccp_ioctl);
388
60fe62e7 389static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
67e6b629
ACM
390 char __user *optval, int optlen)
391{
392 struct dccp_sock *dp = dccp_sk(sk);
393 struct dccp_service_list *sl = NULL;
394
395 if (service == DCCP_SERVICE_INVALID_VALUE ||
396 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
397 return -EINVAL;
398
399 if (optlen > sizeof(service)) {
400 sl = kmalloc(optlen, GFP_KERNEL);
401 if (sl == NULL)
402 return -ENOMEM;
403
404 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
405 if (copy_from_user(sl->dccpsl_list,
406 optval + sizeof(service),
407 optlen - sizeof(service)) ||
408 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
409 kfree(sl);
410 return -EFAULT;
411 }
412 }
413
414 lock_sock(sk);
415 dp->dccps_service = service;
416
a51482bd 417 kfree(dp->dccps_service_list);
67e6b629
ACM
418
419 dp->dccps_service_list = sl;
420 release_sock(sk);
421 return 0;
422}
423
afe00251
AB
424/* byte 1 is feature. the rest is the preference list */
425static int dccp_setsockopt_change(struct sock *sk, int type,
426 struct dccp_so_feat __user *optval)
427{
428 struct dccp_so_feat opt;
429 u8 *val;
430 int rc;
431
432 if (copy_from_user(&opt, optval, sizeof(opt)))
433 return -EFAULT;
434
435 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
436 if (!val)
437 return -ENOMEM;
438
439 if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
440 rc = -EFAULT;
441 goto out_free_val;
442 }
443
8ca0d17b
ACM
444 rc = dccp_feat_change(dccp_msk(sk), type, opt.dccpsf_feat,
445 val, opt.dccpsf_len, GFP_KERNEL);
afe00251
AB
446 if (rc)
447 goto out_free_val;
448
449out:
450 return rc;
451
452out_free_val:
453 kfree(val);
454 goto out;
455}
456
3fdadf7d
DM
457static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
458 char __user *optval, int optlen)
7c657876 459{
a84ffe43
ACM
460 struct dccp_sock *dp;
461 int err;
462 int val;
7c657876 463
a84ffe43
ACM
464 if (optlen < sizeof(int))
465 return -EINVAL;
466
467 if (get_user(val, (int __user *)optval))
468 return -EFAULT;
469
67e6b629
ACM
470 if (optname == DCCP_SOCKOPT_SERVICE)
471 return dccp_setsockopt_service(sk, val, optval, optlen);
a84ffe43 472
67e6b629 473 lock_sock(sk);
a84ffe43
ACM
474 dp = dccp_sk(sk);
475 err = 0;
476
477 switch (optname) {
478 case DCCP_SOCKOPT_PACKET_SIZE:
479 dp->dccps_packet_size = val;
480 break;
afe00251
AB
481
482 case DCCP_SOCKOPT_CHANGE_L:
483 if (optlen != sizeof(struct dccp_so_feat))
484 err = -EINVAL;
485 else
486 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
9faefb6d 487 (struct dccp_so_feat __user *)
afe00251
AB
488 optval);
489 break;
490
491 case DCCP_SOCKOPT_CHANGE_R:
492 if (optlen != sizeof(struct dccp_so_feat))
493 err = -EINVAL;
494 else
495 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
9faefb6d 496 (struct dccp_so_feat __user *)
afe00251
AB
497 optval);
498 break;
499
a84ffe43
ACM
500 default:
501 err = -ENOPROTOOPT;
502 break;
503 }
504
505 release_sock(sk);
506 return err;
7c657876
ACM
507}
508
3fdadf7d
DM
509int dccp_setsockopt(struct sock *sk, int level, int optname,
510 char __user *optval, int optlen)
511{
512 if (level != SOL_DCCP)
513 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
514 optname, optval,
515 optlen);
516 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
517}
543d9cfe 518
f21e68ca
ACM
519EXPORT_SYMBOL_GPL(dccp_setsockopt);
520
3fdadf7d
DM
521#ifdef CONFIG_COMPAT
522int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
543d9cfe 523 char __user *optval, int optlen)
3fdadf7d 524{
dec73ff0
ACM
525 if (level != SOL_DCCP)
526 return inet_csk_compat_setsockopt(sk, level, optname,
527 optval, optlen);
3fdadf7d
DM
528 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
529}
543d9cfe 530
3fdadf7d
DM
531EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
532#endif
533
67e6b629 534static int dccp_getsockopt_service(struct sock *sk, int len,
60fe62e7 535 __be32 __user *optval,
67e6b629
ACM
536 int __user *optlen)
537{
538 const struct dccp_sock *dp = dccp_sk(sk);
539 const struct dccp_service_list *sl;
540 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
541
542 lock_sock(sk);
543 if (dccp_service_not_initialized(sk))
544 goto out;
545
546 if ((sl = dp->dccps_service_list) != NULL) {
547 slen = sl->dccpsl_nr * sizeof(u32);
548 total_len += slen;
549 }
550
551 err = -EINVAL;
552 if (total_len > len)
553 goto out;
554
555 err = 0;
556 if (put_user(total_len, optlen) ||
557 put_user(dp->dccps_service, optval) ||
558 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
559 err = -EFAULT;
560out:
561 release_sock(sk);
562 return err;
563}
564
3fdadf7d 565static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
a1d3a355 566 char __user *optval, int __user *optlen)
7c657876 567{
a84ffe43
ACM
568 struct dccp_sock *dp;
569 int val, len;
7c657876 570
a84ffe43
ACM
571 if (get_user(len, optlen))
572 return -EFAULT;
573
88f964db 574 if (len < sizeof(int))
a84ffe43
ACM
575 return -EINVAL;
576
577 dp = dccp_sk(sk);
578
579 switch (optname) {
580 case DCCP_SOCKOPT_PACKET_SIZE:
581 val = dp->dccps_packet_size;
88f964db 582 len = sizeof(dp->dccps_packet_size);
a84ffe43 583 break;
88f964db
ACM
584 case DCCP_SOCKOPT_SERVICE:
585 return dccp_getsockopt_service(sk, len,
60fe62e7 586 (__be32 __user *)optval, optlen);
88f964db
ACM
587 case 128 ... 191:
588 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
589 len, (u32 __user *)optval, optlen);
590 case 192 ... 255:
591 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
592 len, (u32 __user *)optval, optlen);
a84ffe43
ACM
593 default:
594 return -ENOPROTOOPT;
595 }
596
597 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
598 return -EFAULT;
599
600 return 0;
7c657876
ACM
601}
602
3fdadf7d
DM
603int dccp_getsockopt(struct sock *sk, int level, int optname,
604 char __user *optval, int __user *optlen)
605{
606 if (level != SOL_DCCP)
607 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
608 optname, optval,
609 optlen);
610 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
611}
543d9cfe 612
f21e68ca
ACM
613EXPORT_SYMBOL_GPL(dccp_getsockopt);
614
3fdadf7d
DM
615#ifdef CONFIG_COMPAT
616int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
543d9cfe 617 char __user *optval, int __user *optlen)
3fdadf7d 618{
dec73ff0
ACM
619 if (level != SOL_DCCP)
620 return inet_csk_compat_getsockopt(sk, level, optname,
621 optval, optlen);
3fdadf7d
DM
622 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
623}
543d9cfe 624
3fdadf7d
DM
625EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
626#endif
627
7c657876
ACM
628int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
629 size_t len)
630{
631 const struct dccp_sock *dp = dccp_sk(sk);
632 const int flags = msg->msg_flags;
633 const int noblock = flags & MSG_DONTWAIT;
634 struct sk_buff *skb;
635 int rc, size;
636 long timeo;
637
638 if (len > dp->dccps_mss_cache)
639 return -EMSGSIZE;
640
641 lock_sock(sk);
27258ee5 642 timeo = sock_sndtimeo(sk, noblock);
7c657876
ACM
643
644 /*
645 * We have to use sk_stream_wait_connect here to set sk_write_pending,
646 * so that the trick in dccp_rcv_request_sent_state_process.
647 */
648 /* Wait for a connection to finish. */
649 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
650 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
27258ee5 651 goto out_release;
7c657876
ACM
652
653 size = sk->sk_prot->max_header + len;
654 release_sock(sk);
655 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
656 lock_sock(sk);
7c657876
ACM
657 if (skb == NULL)
658 goto out_release;
659
660 skb_reserve(skb, sk->sk_prot->max_header);
661 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
27258ee5
ACM
662 if (rc != 0)
663 goto out_discard;
664
d6809c12 665 rc = dccp_write_xmit(sk, skb, &timeo);
20472af9
ACM
666 /*
667 * XXX we don't use sk_write_queue, so just discard the packet.
668 * Current plan however is to _use_ sk_write_queue with
669 * an algorith similar to tcp_sendmsg, where the main difference
670 * is that in DCCP we have to respect packet boundaries, so
671 * no coalescing of skbs.
672 *
673 * This bug was _quickly_ found & fixed by just looking at an OSTRA
674 * generated callgraph 8) -acme
675 */
7c657876
ACM
676out_release:
677 release_sock(sk);
678 return rc ? : len;
27258ee5
ACM
679out_discard:
680 kfree_skb(skb);
7c657876 681 goto out_release;
7c657876
ACM
682}
683
f21e68ca
ACM
684EXPORT_SYMBOL_GPL(dccp_sendmsg);
685
7c657876
ACM
686int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
687 size_t len, int nonblock, int flags, int *addr_len)
688{
689 const struct dccp_hdr *dh;
7c657876
ACM
690 long timeo;
691
692 lock_sock(sk);
693
531669a0
ACM
694 if (sk->sk_state == DCCP_LISTEN) {
695 len = -ENOTCONN;
7c657876 696 goto out;
7c657876 697 }
7c657876 698
531669a0 699 timeo = sock_rcvtimeo(sk, nonblock);
7c657876
ACM
700
701 do {
531669a0 702 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
7c657876 703
531669a0
ACM
704 if (skb == NULL)
705 goto verify_sock_status;
7c657876 706
531669a0 707 dh = dccp_hdr(skb);
7c657876 708
531669a0
ACM
709 if (dh->dccph_type == DCCP_PKT_DATA ||
710 dh->dccph_type == DCCP_PKT_DATAACK)
711 goto found_ok_skb;
7c657876 712
531669a0
ACM
713 if (dh->dccph_type == DCCP_PKT_RESET ||
714 dh->dccph_type == DCCP_PKT_CLOSE) {
715 dccp_pr_debug("found fin ok!\n");
716 len = 0;
717 goto found_fin_ok;
718 }
719 dccp_pr_debug("packet_type=%s\n",
720 dccp_packet_name(dh->dccph_type));
624d1164 721 sk_eat_skb(sk, skb, 0);
531669a0
ACM
722verify_sock_status:
723 if (sock_flag(sk, SOCK_DONE)) {
724 len = 0;
7c657876 725 break;
531669a0 726 }
7c657876 727
531669a0
ACM
728 if (sk->sk_err) {
729 len = sock_error(sk);
730 break;
731 }
7c657876 732
531669a0
ACM
733 if (sk->sk_shutdown & RCV_SHUTDOWN) {
734 len = 0;
735 break;
736 }
7c657876 737
531669a0
ACM
738 if (sk->sk_state == DCCP_CLOSED) {
739 if (!sock_flag(sk, SOCK_DONE)) {
740 /* This occurs when user tries to read
741 * from never connected socket.
742 */
743 len = -ENOTCONN;
7c657876
ACM
744 break;
745 }
531669a0
ACM
746 len = 0;
747 break;
7c657876
ACM
748 }
749
531669a0
ACM
750 if (!timeo) {
751 len = -EAGAIN;
752 break;
753 }
7c657876 754
531669a0
ACM
755 if (signal_pending(current)) {
756 len = sock_intr_errno(timeo);
757 break;
758 }
7c657876 759
531669a0 760 sk_wait_data(sk, &timeo);
7c657876 761 continue;
7c657876 762 found_ok_skb:
531669a0
ACM
763 if (len > skb->len)
764 len = skb->len;
765 else if (len < skb->len)
766 msg->msg_flags |= MSG_TRUNC;
767
768 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
769 /* Exception. Bailout! */
770 len = -EFAULT;
771 break;
7c657876 772 }
7c657876
ACM
773 found_fin_ok:
774 if (!(flags & MSG_PEEK))
624d1164 775 sk_eat_skb(sk, skb, 0);
7c657876 776 break;
531669a0 777 } while (1);
7c657876
ACM
778out:
779 release_sock(sk);
531669a0 780 return len;
7c657876
ACM
781}
782
f21e68ca
ACM
783EXPORT_SYMBOL_GPL(dccp_recvmsg);
784
785int inet_dccp_listen(struct socket *sock, int backlog)
7c657876
ACM
786{
787 struct sock *sk = sock->sk;
788 unsigned char old_state;
789 int err;
790
791 lock_sock(sk);
792
793 err = -EINVAL;
794 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
795 goto out;
796
797 old_state = sk->sk_state;
798 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
799 goto out;
800
801 /* Really, if the socket is already in listen state
802 * we can only allow the backlog to be adjusted.
803 */
804 if (old_state != DCCP_LISTEN) {
805 /*
806 * FIXME: here it probably should be sk->sk_prot->listen_start
807 * see tcp_listen_start
808 */
809 err = dccp_listen_start(sk);
810 if (err)
811 goto out;
812 }
813 sk->sk_max_ack_backlog = backlog;
814 err = 0;
815
816out:
817 release_sock(sk);
818 return err;
819}
820
f21e68ca
ACM
821EXPORT_SYMBOL_GPL(inet_dccp_listen);
822
7c657876 823static const unsigned char dccp_new_state[] = {
7690af3f
ACM
824 /* current state: new state: action: */
825 [0] = DCCP_CLOSED,
826 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
827 [DCCP_REQUESTING] = DCCP_CLOSED,
828 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
829 [DCCP_LISTEN] = DCCP_CLOSED,
830 [DCCP_RESPOND] = DCCP_CLOSED,
831 [DCCP_CLOSING] = DCCP_CLOSED,
832 [DCCP_TIME_WAIT] = DCCP_CLOSED,
833 [DCCP_CLOSED] = DCCP_CLOSED,
7c657876
ACM
834};
835
836static int dccp_close_state(struct sock *sk)
837{
838 const int next = dccp_new_state[sk->sk_state];
839 const int ns = next & DCCP_STATE_MASK;
840
841 if (ns != sk->sk_state)
842 dccp_set_state(sk, ns);
843
844 return next & DCCP_ACTION_FIN;
845}
846
847void dccp_close(struct sock *sk, long timeout)
848{
849 struct sk_buff *skb;
134af346 850 int state;
7c657876
ACM
851
852 lock_sock(sk);
853
854 sk->sk_shutdown = SHUTDOWN_MASK;
855
856 if (sk->sk_state == DCCP_LISTEN) {
857 dccp_set_state(sk, DCCP_CLOSED);
858
859 /* Special case. */
860 inet_csk_listen_stop(sk);
861
862 goto adjudge_to_death;
863 }
864
865 /*
866 * We need to flush the recv. buffs. We do this only on the
867 * descriptor close, not protocol-sourced closes, because the
868 *reader process may not have drained the data yet!
869 */
870 /* FIXME: check for unread data */
871 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
872 __kfree_skb(skb);
873 }
874
875 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
876 /* Check zero linger _after_ checking for unread data. */
877 sk->sk_prot->disconnect(sk, 0);
878 } else if (dccp_close_state(sk)) {
7ad07e7c 879 dccp_send_close(sk, 1);
7c657876
ACM
880 }
881
882 sk_stream_wait_close(sk, timeout);
883
884adjudge_to_death:
134af346
HX
885 state = sk->sk_state;
886 sock_hold(sk);
887 sock_orphan(sk);
888 atomic_inc(sk->sk_prot->orphan_count);
889
7ad07e7c
ACM
890 /*
891 * It is the last release_sock in its life. It will remove backlog.
892 */
7c657876
ACM
893 release_sock(sk);
894 /*
895 * Now socket is owned by kernel and we acquire BH lock
896 * to finish close. No need to check for user refs.
897 */
898 local_bh_disable();
899 bh_lock_sock(sk);
900 BUG_TRAP(!sock_owned_by_user(sk));
901
134af346
HX
902 /* Have we already been destroyed by a softirq or backlog? */
903 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
904 goto out;
7ad07e7c
ACM
905
906 /*
907 * The last release_sock may have processed the CLOSE or RESET
908 * packet moving sock to CLOSED state, if not we have to fire
909 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
910 * in draft-ietf-dccp-spec-11. -acme
911 */
912 if (sk->sk_state == DCCP_CLOSING) {
913 /* FIXME: should start at 2 * RTT */
914 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
915 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
916 inet_csk(sk)->icsk_rto,
917 DCCP_RTO_MAX);
918#if 0
919 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
7c657876 920 dccp_set_state(sk, DCCP_CLOSED);
7ad07e7c
ACM
921#endif
922 }
7c657876 923
7c657876
ACM
924 if (sk->sk_state == DCCP_CLOSED)
925 inet_csk_destroy_sock(sk);
926
927 /* Otherwise, socket is reprieved until protocol close. */
928
134af346 929out:
7c657876
ACM
930 bh_unlock_sock(sk);
931 local_bh_enable();
932 sock_put(sk);
933}
934
f21e68ca
ACM
935EXPORT_SYMBOL_GPL(dccp_close);
936
7c657876
ACM
937void dccp_shutdown(struct sock *sk, int how)
938{
939 dccp_pr_debug("entry\n");
940}
941
f21e68ca
ACM
942EXPORT_SYMBOL_GPL(dccp_shutdown);
943
46f09ffa 944static int __init dccp_mib_init(void)
7c657876
ACM
945{
946 int rc = -ENOMEM;
947
948 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
949 if (dccp_statistics[0] == NULL)
950 goto out;
951
952 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
953 if (dccp_statistics[1] == NULL)
954 goto out_free_one;
955
956 rc = 0;
957out:
958 return rc;
959out_free_one:
960 free_percpu(dccp_statistics[0]);
961 dccp_statistics[0] = NULL;
962 goto out;
963
964}
965
b61fafc4 966static void dccp_mib_exit(void)
46f09ffa
ACM
967{
968 free_percpu(dccp_statistics[0]);
969 free_percpu(dccp_statistics[1]);
970 dccp_statistics[0] = dccp_statistics[1] = NULL;
971}
972
7c657876
ACM
973static int thash_entries;
974module_param(thash_entries, int, 0444);
975MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
976
a1d3a355 977#ifdef CONFIG_IP_DCCP_DEBUG
7c657876
ACM
978int dccp_debug;
979module_param(dccp_debug, int, 0444);
980MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
f21e68ca
ACM
981
982EXPORT_SYMBOL_GPL(dccp_debug);
a1d3a355 983#endif
7c657876
ACM
984
985static int __init dccp_init(void)
986{
987 unsigned long goal;
988 int ehash_order, bhash_order, i;
b61fafc4 989 int rc = -ENOBUFS;
7c657876 990
7690af3f
ACM
991 dccp_hashinfo.bind_bucket_cachep =
992 kmem_cache_create("dccp_bind_bucket",
993 sizeof(struct inet_bind_bucket), 0,
994 SLAB_HWCACHE_ALIGN, NULL, NULL);
7c657876 995 if (!dccp_hashinfo.bind_bucket_cachep)
b61fafc4 996 goto out;
7c657876
ACM
997
998 /*
999 * Size and allocate the main established and bind bucket
1000 * hash tables.
1001 *
1002 * The methodology is similar to that of the buffer cache.
1003 */
1004 if (num_physpages >= (128 * 1024))
1005 goal = num_physpages >> (21 - PAGE_SHIFT);
1006 else
1007 goal = num_physpages >> (23 - PAGE_SHIFT);
1008
1009 if (thash_entries)
7690af3f
ACM
1010 goal = (thash_entries *
1011 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
7c657876
ACM
1012 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1013 ;
1014 do {
1015 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1016 sizeof(struct inet_ehash_bucket);
1017 dccp_hashinfo.ehash_size >>= 1;
7690af3f
ACM
1018 while (dccp_hashinfo.ehash_size &
1019 (dccp_hashinfo.ehash_size - 1))
7c657876
ACM
1020 dccp_hashinfo.ehash_size--;
1021 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1022 __get_free_pages(GFP_ATOMIC, ehash_order);
1023 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1024
1025 if (!dccp_hashinfo.ehash) {
1026 printk(KERN_CRIT "Failed to allocate DCCP "
1027 "established hash table\n");
1028 goto out_free_bind_bucket_cachep;
1029 }
1030
1031 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
1032 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1033 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1034 }
1035
1036 bhash_order = ehash_order;
1037
1038 do {
1039 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1040 sizeof(struct inet_bind_hashbucket);
7690af3f
ACM
1041 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1042 bhash_order > 0)
7c657876
ACM
1043 continue;
1044 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1045 __get_free_pages(GFP_ATOMIC, bhash_order);
1046 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1047
1048 if (!dccp_hashinfo.bhash) {
1049 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
1050 goto out_free_dccp_ehash;
1051 }
1052
1053 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1054 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1055 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1056 }
1057
46f09ffa 1058 rc = dccp_mib_init();
fa23e2ec 1059 if (rc)
7c657876
ACM
1060 goto out_free_dccp_bhash;
1061
9b07ef5d 1062 rc = dccp_ackvec_init();
7c657876 1063 if (rc)
b61fafc4 1064 goto out_free_dccp_mib;
9b07ef5d 1065
e55d912f 1066 rc = dccp_sysctl_init();
9b07ef5d
ACM
1067 if (rc)
1068 goto out_ackvec_exit;
7c657876
ACM
1069out:
1070 return rc;
9b07ef5d
ACM
1071out_ackvec_exit:
1072 dccp_ackvec_exit();
b61fafc4 1073out_free_dccp_mib:
46f09ffa 1074 dccp_mib_exit();
7c657876
ACM
1075out_free_dccp_bhash:
1076 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1077 dccp_hashinfo.bhash = NULL;
1078out_free_dccp_ehash:
1079 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1080 dccp_hashinfo.ehash = NULL;
1081out_free_bind_bucket_cachep:
1082 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1083 dccp_hashinfo.bind_bucket_cachep = NULL;
7c657876
ACM
1084 goto out;
1085}
1086
7c657876
ACM
1087static void __exit dccp_fini(void)
1088{
46f09ffa 1089 dccp_mib_exit();
725ba8ee
ACM
1090 free_pages((unsigned long)dccp_hashinfo.bhash,
1091 get_order(dccp_hashinfo.bhash_size *
1092 sizeof(struct inet_bind_hashbucket)));
1093 free_pages((unsigned long)dccp_hashinfo.ehash,
1094 get_order(dccp_hashinfo.ehash_size *
1095 sizeof(struct inet_ehash_bucket)));
7c657876 1096 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
9b07ef5d 1097 dccp_ackvec_exit();
e55d912f 1098 dccp_sysctl_exit();
7c657876
ACM
1099}
1100
1101module_init(dccp_init);
1102module_exit(dccp_fini);
1103
7c657876
ACM
1104MODULE_LICENSE("GPL");
1105MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1106MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");