]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/inet_connection_sock.c
netdrv intel: always enable VLAN filtering except in promiscous mode
[net-next-2.6.git] / net / ipv4 / inet_connection_sock.c
CommitLineData
3f421baa
ACM
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Support for INET connection oriented protocols.
7 *
8 * Authors: See the TCP sources
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
14 */
15
3f421baa
ACM
16#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
a019d6fe 25#include <net/xfrm.h>
3f421baa
ACM
26
27#ifdef INET_CSK_DEBUG
28const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30#endif
31
32/*
33 * This array holds the first and last local port number.
3f421baa 34 */
3f196eb5 35int sysctl_local_port_range[2] = { 32768, 61000 };
227b60f5
SH
36DEFINE_SEQLOCK(sysctl_port_range_lock);
37
38void inet_get_local_port_range(int *low, int *high)
39{
40 unsigned seq;
41 do {
42 seq = read_seqbegin(&sysctl_port_range_lock);
43
44 *low = sysctl_local_port_range[0];
45 *high = sysctl_local_port_range[1];
46 } while (read_seqretry(&sysctl_port_range_lock, seq));
47}
48EXPORT_SYMBOL(inet_get_local_port_range);
3f421baa 49
971af18b
ACM
50int inet_csk_bind_conflict(const struct sock *sk,
51 const struct inet_bind_bucket *tb)
3f421baa 52{
82103232 53 const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
3f421baa
ACM
54 struct sock *sk2;
55 struct hlist_node *node;
56 int reuse = sk->sk_reuse;
57
7477fd2e
PE
58 /*
59 * Unlike other sk lookup places we do not check
60 * for sk_net here, since _all_ the socks listed
61 * in tb->owners list belong to the same net - the
62 * one this bucket belongs to.
63 */
64
3f421baa
ACM
65 sk_for_each_bound(sk2, node, &tb->owners) {
66 if (sk != sk2 &&
67 !inet_v6_ipv6only(sk2) &&
68 (!sk->sk_bound_dev_if ||
69 !sk2->sk_bound_dev_if ||
70 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
71 if (!reuse || !sk2->sk_reuse ||
72 sk2->sk_state == TCP_LISTEN) {
82103232 73 const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
3f421baa
ACM
74 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
75 sk2_rcv_saddr == sk_rcv_saddr)
76 break;
77 }
78 }
79 }
80 return node != NULL;
81}
82
971af18b
ACM
83EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
84
3f421baa
ACM
85/* Obtain a reference to a local port for the given sock,
86 * if snum is zero it means select any available local port.
87 */
ab1e0a13 88int inet_csk_get_port(struct sock *sk, unsigned short snum)
3f421baa 89{
39d8cda7 90 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
3f421baa
ACM
91 struct inet_bind_hashbucket *head;
92 struct hlist_node *node;
93 struct inet_bind_bucket *tb;
94 int ret;
3b1e0a65 95 struct net *net = sock_net(sk);
3f421baa
ACM
96
97 local_bh_disable();
98 if (!snum) {
227b60f5
SH
99 int remaining, rover, low, high;
100
101 inet_get_local_port_range(&low, &high);
a25de534 102 remaining = (high - low) + 1;
227b60f5 103 rover = net_random() % remaining + low;
3f421baa 104
3f421baa 105 do {
7f635ab7
PE
106 head = &hashinfo->bhash[inet_bhashfn(net, rover,
107 hashinfo->bhash_size)];
3f421baa
ACM
108 spin_lock(&head->lock);
109 inet_bind_bucket_for_each(tb, node, &head->chain)
941b1d22 110 if (tb->ib_net == net && tb->port == rover)
3f421baa
ACM
111 goto next;
112 break;
113 next:
114 spin_unlock(&head->lock);
6df71634
SH
115 if (++rover > high)
116 rover = low;
3f421baa 117 } while (--remaining > 0);
3f421baa
ACM
118
119 /* Exhausted local port range during search? It is not
120 * possible for us to be holding one of the bind hash
121 * locks if this test triggers, because if 'remaining'
122 * drops to zero, we broke out of the do/while loop at
123 * the top level, not from the 'break;' statement.
124 */
125 ret = 1;
126 if (remaining <= 0)
127 goto fail;
128
129 /* OK, here is the one we will use. HEAD is
130 * non-NULL and we hold it's mutex.
131 */
132 snum = rover;
133 } else {
7f635ab7
PE
134 head = &hashinfo->bhash[inet_bhashfn(net, snum,
135 hashinfo->bhash_size)];
3f421baa
ACM
136 spin_lock(&head->lock);
137 inet_bind_bucket_for_each(tb, node, &head->chain)
941b1d22 138 if (tb->ib_net == net && tb->port == snum)
3f421baa
ACM
139 goto tb_found;
140 }
141 tb = NULL;
142 goto tb_not_found;
143tb_found:
144 if (!hlist_empty(&tb->owners)) {
3f421baa
ACM
145 if (tb->fastreuse > 0 &&
146 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
147 goto success;
148 } else {
149 ret = 1;
ab1e0a13 150 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb))
3f421baa
ACM
151 goto fail_unlock;
152 }
153 }
154tb_not_found:
155 ret = 1;
941b1d22
PE
156 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
157 net, head, snum)) == NULL)
3f421baa
ACM
158 goto fail_unlock;
159 if (hlist_empty(&tb->owners)) {
160 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
161 tb->fastreuse = 1;
162 else
163 tb->fastreuse = 0;
164 } else if (tb->fastreuse &&
165 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
166 tb->fastreuse = 0;
167success:
168 if (!inet_csk(sk)->icsk_bind_hash)
169 inet_bind_hash(sk, tb, snum);
170 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
e905a9ed 171 ret = 0;
3f421baa
ACM
172
173fail_unlock:
174 spin_unlock(&head->lock);
175fail:
176 local_bh_enable();
177 return ret;
178}
179
180EXPORT_SYMBOL_GPL(inet_csk_get_port);
181
182/*
183 * Wait for an incoming connection, avoid race conditions. This must be called
184 * with the socket locked.
185 */
186static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
187{
188 struct inet_connection_sock *icsk = inet_csk(sk);
189 DEFINE_WAIT(wait);
190 int err;
191
192 /*
193 * True wake-one mechanism for incoming connections: only
194 * one process gets woken up, not the 'whole herd'.
195 * Since we do not 'race & poll' for established sockets
196 * anymore, the common case will execute the loop only once.
197 *
198 * Subtle issue: "add_wait_queue_exclusive()" will be added
199 * after any current non-exclusive waiters, and we know that
200 * it will always _stay_ after any new non-exclusive waiters
201 * because all non-exclusive waiters are added at the
202 * beginning of the wait-queue. As such, it's ok to "drop"
203 * our exclusiveness temporarily when we get woken up without
204 * having to remove and re-insert us on the wait queue.
205 */
206 for (;;) {
207 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
208 TASK_INTERRUPTIBLE);
209 release_sock(sk);
210 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
211 timeo = schedule_timeout(timeo);
212 lock_sock(sk);
213 err = 0;
214 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
215 break;
216 err = -EINVAL;
217 if (sk->sk_state != TCP_LISTEN)
218 break;
219 err = sock_intr_errno(timeo);
220 if (signal_pending(current))
221 break;
222 err = -EAGAIN;
223 if (!timeo)
224 break;
225 }
226 finish_wait(sk->sk_sleep, &wait);
227 return err;
228}
229
230/*
231 * This will accept the next outstanding connection.
232 */
233struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
234{
235 struct inet_connection_sock *icsk = inet_csk(sk);
236 struct sock *newsk;
237 int error;
238
239 lock_sock(sk);
240
241 /* We need to make sure that this socket is listening,
242 * and that it has something pending.
243 */
244 error = -EINVAL;
245 if (sk->sk_state != TCP_LISTEN)
246 goto out_err;
247
248 /* Find already established connection */
249 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
250 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
251
252 /* If this is a non blocking socket don't sleep */
253 error = -EAGAIN;
254 if (!timeo)
255 goto out_err;
256
257 error = inet_csk_wait_for_connect(sk, timeo);
258 if (error)
259 goto out_err;
260 }
261
262 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
263 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
264out:
265 release_sock(sk);
266 return newsk;
267out_err:
268 newsk = NULL;
269 *err = error;
270 goto out;
271}
272
273EXPORT_SYMBOL(inet_csk_accept);
274
275/*
276 * Using different timers for retransmit, delayed acks and probes
e905a9ed 277 * We may wish use just one timer maintaining a list of expire jiffies
3f421baa
ACM
278 * to optimize.
279 */
280void inet_csk_init_xmit_timers(struct sock *sk,
281 void (*retransmit_handler)(unsigned long),
282 void (*delack_handler)(unsigned long),
283 void (*keepalive_handler)(unsigned long))
284{
285 struct inet_connection_sock *icsk = inet_csk(sk);
286
b24b8a24
PE
287 setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
288 (unsigned long)sk);
289 setup_timer(&icsk->icsk_delack_timer, delack_handler,
290 (unsigned long)sk);
291 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
3f421baa
ACM
292 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
293}
294
295EXPORT_SYMBOL(inet_csk_init_xmit_timers);
296
297void inet_csk_clear_xmit_timers(struct sock *sk)
298{
299 struct inet_connection_sock *icsk = inet_csk(sk);
300
301 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
302
303 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
304 sk_stop_timer(sk, &icsk->icsk_delack_timer);
305 sk_stop_timer(sk, &sk->sk_timer);
306}
307
308EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
309
310void inet_csk_delete_keepalive_timer(struct sock *sk)
311{
312 sk_stop_timer(sk, &sk->sk_timer);
313}
314
315EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
316
317void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
318{
319 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
320}
321
322EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
323
324struct dst_entry* inet_csk_route_req(struct sock *sk,
325 const struct request_sock *req)
326{
327 struct rtable *rt;
328 const struct inet_request_sock *ireq = inet_rsk(req);
329 struct ip_options *opt = inet_rsk(req)->opt;
330 struct flowi fl = { .oif = sk->sk_bound_dev_if,
331 .nl_u = { .ip4_u =
332 { .daddr = ((opt && opt->srr) ?
333 opt->faddr :
334 ireq->rmt_addr),
335 .saddr = ireq->loc_addr,
336 .tos = RT_CONN_FLAGS(sk) } },
337 .proto = sk->sk_protocol,
338 .uli_u = { .ports =
339 { .sport = inet_sk(sk)->sport,
340 .dport = ireq->rmt_port } } };
341
4237c75c 342 security_req_classify_flow(req, &fl);
3b1e0a65 343 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) {
3f421baa
ACM
344 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
345 return NULL;
346 }
347 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
348 ip_rt_put(rt);
349 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
350 return NULL;
351 }
352 return &rt->u.dst;
353}
354
355EXPORT_SYMBOL_GPL(inet_csk_route_req);
356
6b72977b 357static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
72a3effa 358 const u32 rnd, const u32 synq_hsize)
3f421baa 359{
6b72977b 360 return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
3f421baa
ACM
361}
362
363#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
364#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
365#else
366#define AF_INET_FAMILY(fam) 1
367#endif
368
369struct request_sock *inet_csk_search_req(const struct sock *sk,
370 struct request_sock ***prevp,
6b72977b 371 const __be16 rport, const __be32 raddr,
7f25afbb 372 const __be32 laddr)
3f421baa
ACM
373{
374 const struct inet_connection_sock *icsk = inet_csk(sk);
375 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
376 struct request_sock *req, **prev;
377
378 for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
379 lopt->nr_table_entries)];
380 (req = *prev) != NULL;
381 prev = &req->dl_next) {
382 const struct inet_request_sock *ireq = inet_rsk(req);
383
384 if (ireq->rmt_port == rport &&
385 ireq->rmt_addr == raddr &&
386 ireq->loc_addr == laddr &&
387 AF_INET_FAMILY(req->rsk_ops->family)) {
388 BUG_TRAP(!req->sk);
389 *prevp = prev;
390 break;
391 }
392 }
393
394 return req;
395}
396
397EXPORT_SYMBOL_GPL(inet_csk_search_req);
398
399void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
c2977c22 400 unsigned long timeout)
3f421baa
ACM
401{
402 struct inet_connection_sock *icsk = inet_csk(sk);
403 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
404 const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
405 lopt->hash_rnd, lopt->nr_table_entries);
406
407 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
408 inet_csk_reqsk_queue_added(sk, timeout);
409}
410
a019d6fe
ACM
411/* Only thing we need from tcp.h */
412extern int sysctl_tcp_synack_retries;
413
3f421baa 414EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
9f1d2604 415
a019d6fe
ACM
416void inet_csk_reqsk_queue_prune(struct sock *parent,
417 const unsigned long interval,
418 const unsigned long timeout,
419 const unsigned long max_rto)
420{
421 struct inet_connection_sock *icsk = inet_csk(parent);
422 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
423 struct listen_sock *lopt = queue->listen_opt;
ec0a1966
DM
424 int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
425 int thresh = max_retries;
a019d6fe
ACM
426 unsigned long now = jiffies;
427 struct request_sock **reqp, *req;
428 int i, budget;
429
430 if (lopt == NULL || lopt->qlen == 0)
431 return;
432
433 /* Normally all the openreqs are young and become mature
434 * (i.e. converted to established socket) for first timeout.
435 * If synack was not acknowledged for 3 seconds, it means
436 * one of the following things: synack was lost, ack was lost,
437 * rtt is high or nobody planned to ack (i.e. synflood).
438 * When server is a bit loaded, queue is populated with old
439 * open requests, reducing effective size of queue.
440 * When server is well loaded, queue size reduces to zero
441 * after several minutes of work. It is not synflood,
442 * it is normal operation. The solution is pruning
443 * too old entries overriding normal timeout, when
444 * situation becomes dangerous.
445 *
446 * Essentially, we reserve half of room for young
447 * embrions; and abort old ones without pity, if old
448 * ones are about to clog our table.
449 */
450 if (lopt->qlen>>(lopt->max_qlen_log-1)) {
451 int young = (lopt->qlen_young<<1);
452
453 while (thresh > 2) {
454 if (lopt->qlen < young)
455 break;
456 thresh--;
457 young <<= 1;
458 }
459 }
460
ec0a1966
DM
461 if (queue->rskq_defer_accept)
462 max_retries = queue->rskq_defer_accept;
463
a019d6fe
ACM
464 budget = 2 * (lopt->nr_table_entries / (timeout / interval));
465 i = lopt->clock_hand;
466
467 do {
468 reqp=&lopt->syn_table[i];
469 while ((req = *reqp) != NULL) {
470 if (time_after_eq(now, req->expires)) {
93653e04
DM
471 if ((req->retrans < thresh ||
472 (inet_rsk(req)->acked && req->retrans < max_retries))
473 && !req->rsk_ops->rtx_syn_ack(parent, req)) {
a019d6fe
ACM
474 unsigned long timeo;
475
476 if (req->retrans++ == 0)
477 lopt->qlen_young--;
478 timeo = min((timeout << req->retrans), max_rto);
479 req->expires = now + timeo;
480 reqp = &req->dl_next;
481 continue;
482 }
483
484 /* Drop this request */
485 inet_csk_reqsk_queue_unlink(parent, req, reqp);
486 reqsk_queue_removed(queue, req);
487 reqsk_free(req);
488 continue;
489 }
490 reqp = &req->dl_next;
491 }
492
493 i = (i + 1) & (lopt->nr_table_entries - 1);
494
495 } while (--budget > 0);
496
497 lopt->clock_hand = i;
498
499 if (lopt->qlen)
500 inet_csk_reset_keepalive_timer(parent, interval);
501}
502
503EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
504
9f1d2604 505struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
dd0fc66f 506 const gfp_t priority)
9f1d2604
ACM
507{
508 struct sock *newsk = sk_clone(sk, priority);
509
510 if (newsk != NULL) {
511 struct inet_connection_sock *newicsk = inet_csk(newsk);
512
513 newsk->sk_state = TCP_SYN_RECV;
514 newicsk->icsk_bind_hash = NULL;
515
516 inet_sk(newsk)->dport = inet_rsk(req)->rmt_port;
517 newsk->sk_write_space = sk_stream_write_space;
518
519 newicsk->icsk_retransmits = 0;
6687e988
ACM
520 newicsk->icsk_backoff = 0;
521 newicsk->icsk_probes_out = 0;
9f1d2604
ACM
522
523 /* Deinitialize accept_queue to trap illegal accesses. */
524 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
4237c75c
VY
525
526 security_inet_csk_clone(newsk, req);
9f1d2604
ACM
527 }
528 return newsk;
529}
530
531EXPORT_SYMBOL_GPL(inet_csk_clone);
a019d6fe
ACM
532
533/*
534 * At this point, there should be no process reference to this
535 * socket, and thus no user references at all. Therefore we
536 * can assume the socket waitqueue is inactive and nobody will
537 * try to jump onto it.
538 */
539void inet_csk_destroy_sock(struct sock *sk)
540{
541 BUG_TRAP(sk->sk_state == TCP_CLOSE);
542 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
543
544 /* It cannot be in hash table! */
545 BUG_TRAP(sk_unhashed(sk));
546
547 /* If it has not 0 inet_sk(sk)->num, it must be bound */
548 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
549
550 sk->sk_prot->destroy(sk);
551
552 sk_stream_kill_queues(sk);
553
554 xfrm_sk_free_policy(sk);
555
556 sk_refcnt_debug_release(sk);
557
558 atomic_dec(sk->sk_prot->orphan_count);
559 sock_put(sk);
560}
561
562EXPORT_SYMBOL(inet_csk_destroy_sock);
563
564int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
565{
566 struct inet_sock *inet = inet_sk(sk);
567 struct inet_connection_sock *icsk = inet_csk(sk);
568 int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
569
570 if (rc != 0)
571 return rc;
572
573 sk->sk_max_ack_backlog = 0;
574 sk->sk_ack_backlog = 0;
575 inet_csk_delack_init(sk);
576
577 /* There is race window here: we announce ourselves listening,
578 * but this transition is still not validated by get_port().
579 * It is OK, because this socket enters to hash table only
580 * after validation is complete.
581 */
582 sk->sk_state = TCP_LISTEN;
583 if (!sk->sk_prot->get_port(sk, inet->num)) {
584 inet->sport = htons(inet->num);
585
586 sk_dst_reset(sk);
587 sk->sk_prot->hash(sk);
588
589 return 0;
590 }
591
592 sk->sk_state = TCP_CLOSE;
593 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
594 return -EADDRINUSE;
595}
596
597EXPORT_SYMBOL_GPL(inet_csk_listen_start);
598
599/*
600 * This routine closes sockets which have been at least partially
601 * opened, but not yet accepted.
602 */
603void inet_csk_listen_stop(struct sock *sk)
604{
605 struct inet_connection_sock *icsk = inet_csk(sk);
606 struct request_sock *acc_req;
607 struct request_sock *req;
608
609 inet_csk_delete_keepalive_timer(sk);
610
611 /* make all the listen_opt local to us */
612 acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
613
614 /* Following specs, it would be better either to send FIN
615 * (and enter FIN-WAIT-1, it is normal close)
616 * or to send active reset (abort).
617 * Certainly, it is pretty dangerous while synflood, but it is
618 * bad justification for our negligence 8)
619 * To be honest, we are not able to make either
620 * of the variants now. --ANK
621 */
622 reqsk_queue_destroy(&icsk->icsk_accept_queue);
623
624 while ((req = acc_req) != NULL) {
625 struct sock *child = req->sk;
626
627 acc_req = req->dl_next;
628
629 local_bh_disable();
630 bh_lock_sock(child);
631 BUG_TRAP(!sock_owned_by_user(child));
632 sock_hold(child);
633
634 sk->sk_prot->disconnect(child, O_NONBLOCK);
635
636 sock_orphan(child);
637
638 atomic_inc(sk->sk_prot->orphan_count);
639
640 inet_csk_destroy_sock(child);
641
642 bh_unlock_sock(child);
643 local_bh_enable();
644 sock_put(child);
645
646 sk_acceptq_removed(sk);
647 __reqsk_free(req);
648 }
649 BUG_TRAP(!sk->sk_ack_backlog);
650}
651
652EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
af05dc93
ACM
653
654void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
655{
656 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
657 const struct inet_sock *inet = inet_sk(sk);
658
659 sin->sin_family = AF_INET;
660 sin->sin_addr.s_addr = inet->daddr;
661 sin->sin_port = inet->dport;
662}
663
664EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
c4d93909 665
dec73ff0
ACM
666#ifdef CONFIG_COMPAT
667int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
668 char __user *optval, int __user *optlen)
669{
dbeff12b 670 const struct inet_connection_sock *icsk = inet_csk(sk);
dec73ff0
ACM
671
672 if (icsk->icsk_af_ops->compat_getsockopt != NULL)
673 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
674 optval, optlen);
675 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
676 optval, optlen);
677}
678
679EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
680
681int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
682 char __user *optval, int optlen)
683{
dbeff12b 684 const struct inet_connection_sock *icsk = inet_csk(sk);
dec73ff0
ACM
685
686 if (icsk->icsk_af_ops->compat_setsockopt != NULL)
687 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
688 optval, optlen);
689 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
690 optval, optlen);
691}
692
693EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
694#endif