]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/dccp/proto.c
[PATCH] IRQ: prevent enabling of previously disabled interrupt
[net-next-2.6.git] / net / dccp / proto.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/config.h>
13#include <linux/dccp.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/in.h>
21#include <linux/if_arp.h>
22#include <linux/init.h>
23#include <linux/random.h>
24#include <net/checksum.h>
25
14c85021 26#include <net/inet_sock.h>
7c657876
ACM
27#include <net/sock.h>
28#include <net/xfrm.h>
29
30#include <asm/semaphore.h>
31#include <linux/spinlock.h>
32#include <linux/timer.h>
33#include <linux/delay.h>
34#include <linux/poll.h>
7c657876
ACM
35
36#include "ccid.h"
37#include "dccp.h"
afe00251 38#include "feat.h"
7c657876 39
ba89966c 40DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
7c657876 41
f21e68ca
ACM
42EXPORT_SYMBOL_GPL(dccp_statistics);
43
7c657876
ACM
44atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
f21e68ca
ACM
46EXPORT_SYMBOL_GPL(dccp_orphan_count);
47
075ae866
ACM
48struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
49 .lhash_lock = RW_LOCK_UNLOCKED,
50 .lhash_users = ATOMIC_INIT(0),
51 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
52};
53
54EXPORT_SYMBOL_GPL(dccp_hashinfo);
55
c25a18ba
ACM
56void dccp_set_state(struct sock *sk, const int state)
57{
58 const int oldstate = sk->sk_state;
59
60 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
61 dccp_role(sk), sk,
62 dccp_state_name(oldstate), dccp_state_name(state));
63 WARN_ON(state == oldstate);
64
65 switch (state) {
66 case DCCP_OPEN:
67 if (oldstate != DCCP_OPEN)
68 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
69 break;
70
71 case DCCP_CLOSED:
72 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
74
75 sk->sk_prot->unhash(sk);
76 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78 inet_put_port(&dccp_hashinfo, sk);
79 /* fall through */
80 default:
81 if (oldstate == DCCP_OPEN)
82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
83 }
84
85 /* Change state AFTER socket is unhashed to avoid closed
86 * socket sitting in hash tables.
87 */
88 sk->sk_state = state;
89}
90
91EXPORT_SYMBOL_GPL(dccp_set_state);
92
93void dccp_done(struct sock *sk)
94{
95 dccp_set_state(sk, DCCP_CLOSED);
96 dccp_clear_xmit_timers(sk);
97
98 sk->sk_shutdown = SHUTDOWN_MASK;
99
100 if (!sock_flag(sk, SOCK_DEAD))
101 sk->sk_state_change(sk);
102 else
103 inet_csk_destroy_sock(sk);
104}
105
106EXPORT_SYMBOL_GPL(dccp_done);
107
7c657876
ACM
108const char *dccp_packet_name(const int type)
109{
110 static const char *dccp_packet_names[] = {
111 [DCCP_PKT_REQUEST] = "REQUEST",
112 [DCCP_PKT_RESPONSE] = "RESPONSE",
113 [DCCP_PKT_DATA] = "DATA",
114 [DCCP_PKT_ACK] = "ACK",
115 [DCCP_PKT_DATAACK] = "DATAACK",
116 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
117 [DCCP_PKT_CLOSE] = "CLOSE",
118 [DCCP_PKT_RESET] = "RESET",
119 [DCCP_PKT_SYNC] = "SYNC",
120 [DCCP_PKT_SYNCACK] = "SYNCACK",
121 };
122
123 if (type >= DCCP_NR_PKT_TYPES)
124 return "INVALID";
125 else
126 return dccp_packet_names[type];
127}
128
129EXPORT_SYMBOL_GPL(dccp_packet_name);
130
131const char *dccp_state_name(const int state)
132{
133 static char *dccp_state_names[] = {
134 [DCCP_OPEN] = "OPEN",
135 [DCCP_REQUESTING] = "REQUESTING",
136 [DCCP_PARTOPEN] = "PARTOPEN",
137 [DCCP_LISTEN] = "LISTEN",
138 [DCCP_RESPOND] = "RESPOND",
139 [DCCP_CLOSING] = "CLOSING",
140 [DCCP_TIME_WAIT] = "TIME_WAIT",
141 [DCCP_CLOSED] = "CLOSED",
142 };
143
144 if (state >= DCCP_MAX_STATES)
145 return "INVALID STATE!";
146 else
147 return dccp_state_names[state];
148}
149
150EXPORT_SYMBOL_GPL(dccp_state_name);
151
c985ed70
ACM
152void dccp_hash(struct sock *sk)
153{
154 inet_hash(&dccp_hashinfo, sk);
155}
156
157EXPORT_SYMBOL_GPL(dccp_hash);
158
159void dccp_unhash(struct sock *sk)
160{
161 inet_unhash(&dccp_hashinfo, sk);
162}
163
164EXPORT_SYMBOL_GPL(dccp_unhash);
165
72478873 166int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
3e0fadc5
ACM
167{
168 struct dccp_sock *dp = dccp_sk(sk);
a4bf3902 169 struct dccp_minisock *dmsk = dccp_msk(sk);
3e0fadc5 170 struct inet_connection_sock *icsk = inet_csk(sk);
3e0fadc5 171
a4bf3902 172 dccp_minisock_init(&dp->dccps_minisock);
3e0fadc5
ACM
173 do_gettimeofday(&dp->dccps_epoch);
174
175 /*
176 * FIXME: We're hardcoding the CCID, and doing this at this point makes
177 * the listening (master) sock get CCID control blocks, which is not
178 * necessary, but for now, to not mess with the test userspace apps,
179 * lets leave it here, later the real solution is to do this in a
180 * setsockopt(CCIDs-I-want/accept). -acme
181 */
72478873 182 if (likely(ctl_sock_initialized)) {
8ca0d17b 183 int rc = dccp_feat_init(dmsk);
3e0fadc5
ACM
184
185 if (rc)
186 return rc;
187
a4bf3902 188 if (dmsk->dccpms_send_ack_vector) {
3e0fadc5
ACM
189 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
190 if (dp->dccps_hc_rx_ackvec == NULL)
191 return -ENOMEM;
192 }
a4bf3902
ACM
193 dp->dccps_hc_rx_ccid = ccid_hc_rx_new(dmsk->dccpms_rx_ccid,
194 sk, GFP_KERNEL);
195 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
196 sk, GFP_KERNEL);
3e0fadc5
ACM
197 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
198 dp->dccps_hc_tx_ccid == NULL)) {
199 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
200 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
a4bf3902 201 if (dmsk->dccpms_send_ack_vector) {
3e0fadc5
ACM
202 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
203 dp->dccps_hc_rx_ackvec = NULL;
204 }
205 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
206 return -ENOMEM;
207 }
208 } else {
209 /* control socket doesn't need feat nego */
a4bf3902
ACM
210 INIT_LIST_HEAD(&dmsk->dccpms_pending);
211 INIT_LIST_HEAD(&dmsk->dccpms_conf);
3e0fadc5
ACM
212 }
213
214 dccp_init_xmit_timers(sk);
215 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
216 sk->sk_state = DCCP_CLOSED;
217 sk->sk_write_space = dccp_write_space;
218 icsk->icsk_sync_mss = dccp_sync_mss;
219 dp->dccps_mss_cache = 536;
220 dp->dccps_role = DCCP_ROLE_UNDEFINED;
221 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
222 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
223
224 return 0;
225}
226
227EXPORT_SYMBOL_GPL(dccp_init_sock);
228
229int dccp_destroy_sock(struct sock *sk)
230{
231 struct dccp_sock *dp = dccp_sk(sk);
8ca0d17b 232 struct dccp_minisock *dmsk = dccp_msk(sk);
3e0fadc5
ACM
233
234 /*
235 * DCCP doesn't use sk_write_queue, just sk_send_head
236 * for retransmissions
237 */
238 if (sk->sk_send_head != NULL) {
239 kfree_skb(sk->sk_send_head);
240 sk->sk_send_head = NULL;
241 }
242
243 /* Clean up a referenced DCCP bind bucket. */
244 if (inet_csk(sk)->icsk_bind_hash != NULL)
245 inet_put_port(&dccp_hashinfo, sk);
246
247 kfree(dp->dccps_service_list);
248 dp->dccps_service_list = NULL;
249
8ca0d17b 250 if (dmsk->dccpms_send_ack_vector) {
3e0fadc5
ACM
251 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
252 dp->dccps_hc_rx_ackvec = NULL;
253 }
254 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
255 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
256 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
257
258 /* clean up feature negotiation state */
8ca0d17b 259 dccp_feat_clean(dmsk);
3e0fadc5
ACM
260
261 return 0;
262}
263
264EXPORT_SYMBOL_GPL(dccp_destroy_sock);
265
7c657876
ACM
266static inline int dccp_listen_start(struct sock *sk)
267{
67e6b629
ACM
268 struct dccp_sock *dp = dccp_sk(sk);
269
270 dp->dccps_role = DCCP_ROLE_LISTEN;
271 /*
272 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
273 * before calling listen()
274 */
275 if (dccp_service_not_initialized(sk))
276 return -EPROTO;
7c657876
ACM
277 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
278}
279
280int dccp_disconnect(struct sock *sk, int flags)
281{
282 struct inet_connection_sock *icsk = inet_csk(sk);
283 struct inet_sock *inet = inet_sk(sk);
284 int err = 0;
285 const int old_state = sk->sk_state;
286
287 if (old_state != DCCP_CLOSED)
288 dccp_set_state(sk, DCCP_CLOSED);
289
290 /* ABORT function of RFC793 */
291 if (old_state == DCCP_LISTEN) {
292 inet_csk_listen_stop(sk);
293 /* FIXME: do the active reset thing */
294 } else if (old_state == DCCP_REQUESTING)
295 sk->sk_err = ECONNRESET;
296
297 dccp_clear_xmit_timers(sk);
298 __skb_queue_purge(&sk->sk_receive_queue);
299 if (sk->sk_send_head != NULL) {
300 __kfree_skb(sk->sk_send_head);
301 sk->sk_send_head = NULL;
302 }
303
304 inet->dport = 0;
305
306 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
307 inet_reset_saddr(sk);
308
309 sk->sk_shutdown = 0;
310 sock_reset_flag(sk, SOCK_DONE);
311
312 icsk->icsk_backoff = 0;
313 inet_csk_delack_init(sk);
314 __sk_dst_reset(sk);
315
316 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
317
318 sk->sk_error_report(sk);
319 return err;
320}
321
f21e68ca
ACM
322EXPORT_SYMBOL_GPL(dccp_disconnect);
323
331968bd
ACM
324/*
325 * Wait for a DCCP event.
326 *
327 * Note that we don't need to lock the socket, as the upper poll layers
328 * take care of normal races (between the test and the event) and we don't
329 * go look at any of the socket buffers directly.
330 */
f21e68ca
ACM
331unsigned int dccp_poll(struct file *file, struct socket *sock,
332 poll_table *wait)
331968bd
ACM
333{
334 unsigned int mask;
335 struct sock *sk = sock->sk;
336
337 poll_wait(file, sk->sk_sleep, wait);
338 if (sk->sk_state == DCCP_LISTEN)
339 return inet_csk_listen_poll(sk);
340
341 /* Socket is not locked. We are protected from async events
342 by poll logic and correct handling of state changes
343 made by another threads is impossible in any case.
344 */
345
346 mask = 0;
347 if (sk->sk_err)
348 mask = POLLERR;
349
350 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
351 mask |= POLLHUP;
352 if (sk->sk_shutdown & RCV_SHUTDOWN)
353 mask |= POLLIN | POLLRDNORM;
354
355 /* Connected? */
356 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
357 if (atomic_read(&sk->sk_rmem_alloc) > 0)
358 mask |= POLLIN | POLLRDNORM;
359
360 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
361 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
362 mask |= POLLOUT | POLLWRNORM;
363 } else { /* send SIGIO later */
364 set_bit(SOCK_ASYNC_NOSPACE,
365 &sk->sk_socket->flags);
366 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
367
368 /* Race breaker. If space is freed after
369 * wspace test but before the flags are set,
370 * IO signal will be lost.
371 */
372 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
373 mask |= POLLOUT | POLLWRNORM;
374 }
375 }
376 }
377 return mask;
378}
379
f21e68ca
ACM
380EXPORT_SYMBOL_GPL(dccp_poll);
381
7c657876
ACM
382int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
383{
384 dccp_pr_debug("entry\n");
385 return -ENOIOCTLCMD;
386}
387
f21e68ca
ACM
388EXPORT_SYMBOL_GPL(dccp_ioctl);
389
60fe62e7 390static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
67e6b629
ACM
391 char __user *optval, int optlen)
392{
393 struct dccp_sock *dp = dccp_sk(sk);
394 struct dccp_service_list *sl = NULL;
395
396 if (service == DCCP_SERVICE_INVALID_VALUE ||
397 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
398 return -EINVAL;
399
400 if (optlen > sizeof(service)) {
401 sl = kmalloc(optlen, GFP_KERNEL);
402 if (sl == NULL)
403 return -ENOMEM;
404
405 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
406 if (copy_from_user(sl->dccpsl_list,
407 optval + sizeof(service),
408 optlen - sizeof(service)) ||
409 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
410 kfree(sl);
411 return -EFAULT;
412 }
413 }
414
415 lock_sock(sk);
416 dp->dccps_service = service;
417
a51482bd 418 kfree(dp->dccps_service_list);
67e6b629
ACM
419
420 dp->dccps_service_list = sl;
421 release_sock(sk);
422 return 0;
423}
424
afe00251
AB
425/* byte 1 is feature. the rest is the preference list */
426static int dccp_setsockopt_change(struct sock *sk, int type,
427 struct dccp_so_feat __user *optval)
428{
429 struct dccp_so_feat opt;
430 u8 *val;
431 int rc;
432
433 if (copy_from_user(&opt, optval, sizeof(opt)))
434 return -EFAULT;
435
436 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
437 if (!val)
438 return -ENOMEM;
439
440 if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
441 rc = -EFAULT;
442 goto out_free_val;
443 }
444
8ca0d17b
ACM
445 rc = dccp_feat_change(dccp_msk(sk), type, opt.dccpsf_feat,
446 val, opt.dccpsf_len, GFP_KERNEL);
afe00251
AB
447 if (rc)
448 goto out_free_val;
449
450out:
451 return rc;
452
453out_free_val:
454 kfree(val);
455 goto out;
456}
457
3fdadf7d
DM
458static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
459 char __user *optval, int optlen)
7c657876 460{
a84ffe43
ACM
461 struct dccp_sock *dp;
462 int err;
463 int val;
7c657876 464
a84ffe43
ACM
465 if (optlen < sizeof(int))
466 return -EINVAL;
467
468 if (get_user(val, (int __user *)optval))
469 return -EFAULT;
470
67e6b629
ACM
471 if (optname == DCCP_SOCKOPT_SERVICE)
472 return dccp_setsockopt_service(sk, val, optval, optlen);
a84ffe43 473
67e6b629 474 lock_sock(sk);
a84ffe43
ACM
475 dp = dccp_sk(sk);
476 err = 0;
477
478 switch (optname) {
479 case DCCP_SOCKOPT_PACKET_SIZE:
480 dp->dccps_packet_size = val;
481 break;
afe00251
AB
482
483 case DCCP_SOCKOPT_CHANGE_L:
484 if (optlen != sizeof(struct dccp_so_feat))
485 err = -EINVAL;
486 else
487 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
488 (struct dccp_so_feat *)
489 optval);
490 break;
491
492 case DCCP_SOCKOPT_CHANGE_R:
493 if (optlen != sizeof(struct dccp_so_feat))
494 err = -EINVAL;
495 else
496 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
497 (struct dccp_so_feat *)
498 optval);
499 break;
500
a84ffe43
ACM
501 default:
502 err = -ENOPROTOOPT;
503 break;
504 }
505
506 release_sock(sk);
507 return err;
7c657876
ACM
508}
509
3fdadf7d
DM
510int dccp_setsockopt(struct sock *sk, int level, int optname,
511 char __user *optval, int optlen)
512{
513 if (level != SOL_DCCP)
514 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
515 optname, optval,
516 optlen);
517 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
518}
543d9cfe 519
f21e68ca
ACM
520EXPORT_SYMBOL_GPL(dccp_setsockopt);
521
3fdadf7d
DM
522#ifdef CONFIG_COMPAT
523int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
543d9cfe 524 char __user *optval, int optlen)
3fdadf7d 525{
dec73ff0
ACM
526 if (level != SOL_DCCP)
527 return inet_csk_compat_setsockopt(sk, level, optname,
528 optval, optlen);
3fdadf7d
DM
529 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
530}
543d9cfe 531
3fdadf7d
DM
532EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
533#endif
534
67e6b629 535static int dccp_getsockopt_service(struct sock *sk, int len,
60fe62e7 536 __be32 __user *optval,
67e6b629
ACM
537 int __user *optlen)
538{
539 const struct dccp_sock *dp = dccp_sk(sk);
540 const struct dccp_service_list *sl;
541 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
542
543 lock_sock(sk);
544 if (dccp_service_not_initialized(sk))
545 goto out;
546
547 if ((sl = dp->dccps_service_list) != NULL) {
548 slen = sl->dccpsl_nr * sizeof(u32);
549 total_len += slen;
550 }
551
552 err = -EINVAL;
553 if (total_len > len)
554 goto out;
555
556 err = 0;
557 if (put_user(total_len, optlen) ||
558 put_user(dp->dccps_service, optval) ||
559 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
560 err = -EFAULT;
561out:
562 release_sock(sk);
563 return err;
564}
565
3fdadf7d 566static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
a1d3a355 567 char __user *optval, int __user *optlen)
7c657876 568{
a84ffe43
ACM
569 struct dccp_sock *dp;
570 int val, len;
7c657876 571
a84ffe43
ACM
572 if (get_user(len, optlen))
573 return -EFAULT;
574
88f964db 575 if (len < sizeof(int))
a84ffe43
ACM
576 return -EINVAL;
577
578 dp = dccp_sk(sk);
579
580 switch (optname) {
581 case DCCP_SOCKOPT_PACKET_SIZE:
582 val = dp->dccps_packet_size;
88f964db 583 len = sizeof(dp->dccps_packet_size);
a84ffe43 584 break;
88f964db
ACM
585 case DCCP_SOCKOPT_SERVICE:
586 return dccp_getsockopt_service(sk, len,
60fe62e7 587 (__be32 __user *)optval, optlen);
88f964db
ACM
588 case 128 ... 191:
589 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
590 len, (u32 __user *)optval, optlen);
591 case 192 ... 255:
592 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
593 len, (u32 __user *)optval, optlen);
a84ffe43
ACM
594 default:
595 return -ENOPROTOOPT;
596 }
597
598 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
599 return -EFAULT;
600
601 return 0;
7c657876
ACM
602}
603
3fdadf7d
DM
604int dccp_getsockopt(struct sock *sk, int level, int optname,
605 char __user *optval, int __user *optlen)
606{
607 if (level != SOL_DCCP)
608 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
609 optname, optval,
610 optlen);
611 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
612}
543d9cfe 613
f21e68ca
ACM
614EXPORT_SYMBOL_GPL(dccp_getsockopt);
615
3fdadf7d
DM
616#ifdef CONFIG_COMPAT
617int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
543d9cfe 618 char __user *optval, int __user *optlen)
3fdadf7d 619{
dec73ff0
ACM
620 if (level != SOL_DCCP)
621 return inet_csk_compat_getsockopt(sk, level, optname,
622 optval, optlen);
3fdadf7d
DM
623 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
624}
543d9cfe 625
3fdadf7d
DM
626EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
627#endif
628
7c657876
ACM
629int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
630 size_t len)
631{
632 const struct dccp_sock *dp = dccp_sk(sk);
633 const int flags = msg->msg_flags;
634 const int noblock = flags & MSG_DONTWAIT;
635 struct sk_buff *skb;
636 int rc, size;
637 long timeo;
638
639 if (len > dp->dccps_mss_cache)
640 return -EMSGSIZE;
641
642 lock_sock(sk);
27258ee5 643 timeo = sock_sndtimeo(sk, noblock);
7c657876
ACM
644
645 /*
646 * We have to use sk_stream_wait_connect here to set sk_write_pending,
647 * so that the trick in dccp_rcv_request_sent_state_process.
648 */
649 /* Wait for a connection to finish. */
650 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
651 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
27258ee5 652 goto out_release;
7c657876
ACM
653
654 size = sk->sk_prot->max_header + len;
655 release_sock(sk);
656 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
657 lock_sock(sk);
7c657876
ACM
658 if (skb == NULL)
659 goto out_release;
660
661 skb_reserve(skb, sk->sk_prot->max_header);
662 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
27258ee5
ACM
663 if (rc != 0)
664 goto out_discard;
665
d6809c12 666 rc = dccp_write_xmit(sk, skb, &timeo);
20472af9
ACM
667 /*
668 * XXX we don't use sk_write_queue, so just discard the packet.
669 * Current plan however is to _use_ sk_write_queue with
670 * an algorith similar to tcp_sendmsg, where the main difference
671 * is that in DCCP we have to respect packet boundaries, so
672 * no coalescing of skbs.
673 *
674 * This bug was _quickly_ found & fixed by just looking at an OSTRA
675 * generated callgraph 8) -acme
676 */
7c657876
ACM
677out_release:
678 release_sock(sk);
679 return rc ? : len;
27258ee5
ACM
680out_discard:
681 kfree_skb(skb);
7c657876 682 goto out_release;
7c657876
ACM
683}
684
f21e68ca
ACM
685EXPORT_SYMBOL_GPL(dccp_sendmsg);
686
7c657876
ACM
687int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
688 size_t len, int nonblock, int flags, int *addr_len)
689{
690 const struct dccp_hdr *dh;
7c657876
ACM
691 long timeo;
692
693 lock_sock(sk);
694
531669a0
ACM
695 if (sk->sk_state == DCCP_LISTEN) {
696 len = -ENOTCONN;
7c657876 697 goto out;
7c657876 698 }
7c657876 699
531669a0 700 timeo = sock_rcvtimeo(sk, nonblock);
7c657876
ACM
701
702 do {
531669a0 703 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
7c657876 704
531669a0
ACM
705 if (skb == NULL)
706 goto verify_sock_status;
7c657876 707
531669a0 708 dh = dccp_hdr(skb);
7c657876 709
531669a0
ACM
710 if (dh->dccph_type == DCCP_PKT_DATA ||
711 dh->dccph_type == DCCP_PKT_DATAACK)
712 goto found_ok_skb;
7c657876 713
531669a0
ACM
714 if (dh->dccph_type == DCCP_PKT_RESET ||
715 dh->dccph_type == DCCP_PKT_CLOSE) {
716 dccp_pr_debug("found fin ok!\n");
717 len = 0;
718 goto found_fin_ok;
719 }
720 dccp_pr_debug("packet_type=%s\n",
721 dccp_packet_name(dh->dccph_type));
722 sk_eat_skb(sk, skb);
723verify_sock_status:
724 if (sock_flag(sk, SOCK_DONE)) {
725 len = 0;
7c657876 726 break;
531669a0 727 }
7c657876 728
531669a0
ACM
729 if (sk->sk_err) {
730 len = sock_error(sk);
731 break;
732 }
7c657876 733
531669a0
ACM
734 if (sk->sk_shutdown & RCV_SHUTDOWN) {
735 len = 0;
736 break;
737 }
7c657876 738
531669a0
ACM
739 if (sk->sk_state == DCCP_CLOSED) {
740 if (!sock_flag(sk, SOCK_DONE)) {
741 /* This occurs when user tries to read
742 * from never connected socket.
743 */
744 len = -ENOTCONN;
7c657876
ACM
745 break;
746 }
531669a0
ACM
747 len = 0;
748 break;
7c657876
ACM
749 }
750
531669a0
ACM
751 if (!timeo) {
752 len = -EAGAIN;
753 break;
754 }
7c657876 755
531669a0
ACM
756 if (signal_pending(current)) {
757 len = sock_intr_errno(timeo);
758 break;
759 }
7c657876 760
531669a0 761 sk_wait_data(sk, &timeo);
7c657876 762 continue;
7c657876 763 found_ok_skb:
531669a0
ACM
764 if (len > skb->len)
765 len = skb->len;
766 else if (len < skb->len)
767 msg->msg_flags |= MSG_TRUNC;
768
769 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
770 /* Exception. Bailout! */
771 len = -EFAULT;
772 break;
7c657876 773 }
7c657876
ACM
774 found_fin_ok:
775 if (!(flags & MSG_PEEK))
776 sk_eat_skb(sk, skb);
777 break;
531669a0 778 } while (1);
7c657876
ACM
779out:
780 release_sock(sk);
531669a0 781 return len;
7c657876
ACM
782}
783
f21e68ca
ACM
784EXPORT_SYMBOL_GPL(dccp_recvmsg);
785
786int inet_dccp_listen(struct socket *sock, int backlog)
7c657876
ACM
787{
788 struct sock *sk = sock->sk;
789 unsigned char old_state;
790 int err;
791
792 lock_sock(sk);
793
794 err = -EINVAL;
795 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
796 goto out;
797
798 old_state = sk->sk_state;
799 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
800 goto out;
801
802 /* Really, if the socket is already in listen state
803 * we can only allow the backlog to be adjusted.
804 */
805 if (old_state != DCCP_LISTEN) {
806 /*
807 * FIXME: here it probably should be sk->sk_prot->listen_start
808 * see tcp_listen_start
809 */
810 err = dccp_listen_start(sk);
811 if (err)
812 goto out;
813 }
814 sk->sk_max_ack_backlog = backlog;
815 err = 0;
816
817out:
818 release_sock(sk);
819 return err;
820}
821
f21e68ca
ACM
822EXPORT_SYMBOL_GPL(inet_dccp_listen);
823
7c657876 824static const unsigned char dccp_new_state[] = {
7690af3f
ACM
825 /* current state: new state: action: */
826 [0] = DCCP_CLOSED,
827 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
828 [DCCP_REQUESTING] = DCCP_CLOSED,
829 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
830 [DCCP_LISTEN] = DCCP_CLOSED,
831 [DCCP_RESPOND] = DCCP_CLOSED,
832 [DCCP_CLOSING] = DCCP_CLOSED,
833 [DCCP_TIME_WAIT] = DCCP_CLOSED,
834 [DCCP_CLOSED] = DCCP_CLOSED,
7c657876
ACM
835};
836
837static int dccp_close_state(struct sock *sk)
838{
839 const int next = dccp_new_state[sk->sk_state];
840 const int ns = next & DCCP_STATE_MASK;
841
842 if (ns != sk->sk_state)
843 dccp_set_state(sk, ns);
844
845 return next & DCCP_ACTION_FIN;
846}
847
848void dccp_close(struct sock *sk, long timeout)
849{
850 struct sk_buff *skb;
851
852 lock_sock(sk);
853
854 sk->sk_shutdown = SHUTDOWN_MASK;
855
856 if (sk->sk_state == DCCP_LISTEN) {
857 dccp_set_state(sk, DCCP_CLOSED);
858
859 /* Special case. */
860 inet_csk_listen_stop(sk);
861
862 goto adjudge_to_death;
863 }
864
865 /*
866 * We need to flush the recv. buffs. We do this only on the
867 * descriptor close, not protocol-sourced closes, because the
868 *reader process may not have drained the data yet!
869 */
870 /* FIXME: check for unread data */
871 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
872 __kfree_skb(skb);
873 }
874
875 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
876 /* Check zero linger _after_ checking for unread data. */
877 sk->sk_prot->disconnect(sk, 0);
878 } else if (dccp_close_state(sk)) {
7ad07e7c 879 dccp_send_close(sk, 1);
7c657876
ACM
880 }
881
882 sk_stream_wait_close(sk, timeout);
883
884adjudge_to_death:
7ad07e7c
ACM
885 /*
886 * It is the last release_sock in its life. It will remove backlog.
887 */
7c657876
ACM
888 release_sock(sk);
889 /*
890 * Now socket is owned by kernel and we acquire BH lock
891 * to finish close. No need to check for user refs.
892 */
893 local_bh_disable();
894 bh_lock_sock(sk);
895 BUG_TRAP(!sock_owned_by_user(sk));
896
897 sock_hold(sk);
898 sock_orphan(sk);
7ad07e7c
ACM
899
900 /*
901 * The last release_sock may have processed the CLOSE or RESET
902 * packet moving sock to CLOSED state, if not we have to fire
903 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
904 * in draft-ietf-dccp-spec-11. -acme
905 */
906 if (sk->sk_state == DCCP_CLOSING) {
907 /* FIXME: should start at 2 * RTT */
908 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
909 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
910 inet_csk(sk)->icsk_rto,
911 DCCP_RTO_MAX);
912#if 0
913 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
7c657876 914 dccp_set_state(sk, DCCP_CLOSED);
7ad07e7c
ACM
915#endif
916 }
7c657876 917
7ad07e7c 918 atomic_inc(sk->sk_prot->orphan_count);
7c657876
ACM
919 if (sk->sk_state == DCCP_CLOSED)
920 inet_csk_destroy_sock(sk);
921
922 /* Otherwise, socket is reprieved until protocol close. */
923
924 bh_unlock_sock(sk);
925 local_bh_enable();
926 sock_put(sk);
927}
928
f21e68ca
ACM
929EXPORT_SYMBOL_GPL(dccp_close);
930
7c657876
ACM
931void dccp_shutdown(struct sock *sk, int how)
932{
933 dccp_pr_debug("entry\n");
934}
935
f21e68ca
ACM
936EXPORT_SYMBOL_GPL(dccp_shutdown);
937
46f09ffa 938static int __init dccp_mib_init(void)
7c657876
ACM
939{
940 int rc = -ENOMEM;
941
942 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
943 if (dccp_statistics[0] == NULL)
944 goto out;
945
946 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
947 if (dccp_statistics[1] == NULL)
948 goto out_free_one;
949
950 rc = 0;
951out:
952 return rc;
953out_free_one:
954 free_percpu(dccp_statistics[0]);
955 dccp_statistics[0] = NULL;
956 goto out;
957
958}
959
b61fafc4 960static void dccp_mib_exit(void)
46f09ffa
ACM
961{
962 free_percpu(dccp_statistics[0]);
963 free_percpu(dccp_statistics[1]);
964 dccp_statistics[0] = dccp_statistics[1] = NULL;
965}
966
7c657876
ACM
967static int thash_entries;
968module_param(thash_entries, int, 0444);
969MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
970
a1d3a355 971#ifdef CONFIG_IP_DCCP_DEBUG
7c657876
ACM
972int dccp_debug;
973module_param(dccp_debug, int, 0444);
974MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
f21e68ca
ACM
975
976EXPORT_SYMBOL_GPL(dccp_debug);
a1d3a355 977#endif
7c657876
ACM
978
979static int __init dccp_init(void)
980{
981 unsigned long goal;
982 int ehash_order, bhash_order, i;
b61fafc4 983 int rc = -ENOBUFS;
7c657876 984
7690af3f
ACM
985 dccp_hashinfo.bind_bucket_cachep =
986 kmem_cache_create("dccp_bind_bucket",
987 sizeof(struct inet_bind_bucket), 0,
988 SLAB_HWCACHE_ALIGN, NULL, NULL);
7c657876 989 if (!dccp_hashinfo.bind_bucket_cachep)
b61fafc4 990 goto out;
7c657876
ACM
991
992 /*
993 * Size and allocate the main established and bind bucket
994 * hash tables.
995 *
996 * The methodology is similar to that of the buffer cache.
997 */
998 if (num_physpages >= (128 * 1024))
999 goal = num_physpages >> (21 - PAGE_SHIFT);
1000 else
1001 goal = num_physpages >> (23 - PAGE_SHIFT);
1002
1003 if (thash_entries)
7690af3f
ACM
1004 goal = (thash_entries *
1005 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
7c657876
ACM
1006 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1007 ;
1008 do {
1009 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1010 sizeof(struct inet_ehash_bucket);
1011 dccp_hashinfo.ehash_size >>= 1;
7690af3f
ACM
1012 while (dccp_hashinfo.ehash_size &
1013 (dccp_hashinfo.ehash_size - 1))
7c657876
ACM
1014 dccp_hashinfo.ehash_size--;
1015 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1016 __get_free_pages(GFP_ATOMIC, ehash_order);
1017 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1018
1019 if (!dccp_hashinfo.ehash) {
1020 printk(KERN_CRIT "Failed to allocate DCCP "
1021 "established hash table\n");
1022 goto out_free_bind_bucket_cachep;
1023 }
1024
1025 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
1026 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1027 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1028 }
1029
1030 bhash_order = ehash_order;
1031
1032 do {
1033 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1034 sizeof(struct inet_bind_hashbucket);
7690af3f
ACM
1035 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1036 bhash_order > 0)
7c657876
ACM
1037 continue;
1038 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1039 __get_free_pages(GFP_ATOMIC, bhash_order);
1040 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1041
1042 if (!dccp_hashinfo.bhash) {
1043 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
1044 goto out_free_dccp_ehash;
1045 }
1046
1047 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1048 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1049 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1050 }
1051
46f09ffa 1052 rc = dccp_mib_init();
fa23e2ec 1053 if (rc)
7c657876
ACM
1054 goto out_free_dccp_bhash;
1055
9b07ef5d 1056 rc = dccp_ackvec_init();
7c657876 1057 if (rc)
b61fafc4 1058 goto out_free_dccp_mib;
9b07ef5d 1059
e55d912f 1060 rc = dccp_sysctl_init();
9b07ef5d
ACM
1061 if (rc)
1062 goto out_ackvec_exit;
7c657876
ACM
1063out:
1064 return rc;
9b07ef5d
ACM
1065out_ackvec_exit:
1066 dccp_ackvec_exit();
b61fafc4 1067out_free_dccp_mib:
46f09ffa 1068 dccp_mib_exit();
7c657876
ACM
1069out_free_dccp_bhash:
1070 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1071 dccp_hashinfo.bhash = NULL;
1072out_free_dccp_ehash:
1073 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1074 dccp_hashinfo.ehash = NULL;
1075out_free_bind_bucket_cachep:
1076 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1077 dccp_hashinfo.bind_bucket_cachep = NULL;
7c657876
ACM
1078 goto out;
1079}
1080
7c657876
ACM
1081static void __exit dccp_fini(void)
1082{
46f09ffa 1083 dccp_mib_exit();
725ba8ee
ACM
1084 free_pages((unsigned long)dccp_hashinfo.bhash,
1085 get_order(dccp_hashinfo.bhash_size *
1086 sizeof(struct inet_bind_hashbucket)));
1087 free_pages((unsigned long)dccp_hashinfo.ehash,
1088 get_order(dccp_hashinfo.ehash_size *
1089 sizeof(struct inet_ehash_bucket)));
7c657876 1090 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
9b07ef5d 1091 dccp_ackvec_exit();
e55d912f 1092 dccp_sysctl_exit();
7c657876
ACM
1093}
1094
1095module_init(dccp_init);
1096module_exit(dccp_fini);
1097
7c657876
ACM
1098MODULE_LICENSE("GPL");
1099MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1100MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");