]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/tcp_ipv6.c
[NETLINK]: Encapsulate eff_cap usage within security framework.
[net-next-2.6.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <linux/module.h>
29#include <linux/config.h>
30#include <linux/errno.h>
31#include <linux/types.h>
32#include <linux/socket.h>
33#include <linux/sockios.h>
34#include <linux/net.h>
35#include <linux/jiffies.h>
36#include <linux/in.h>
37#include <linux/in6.h>
38#include <linux/netdevice.h>
39#include <linux/init.h>
40#include <linux/jhash.h>
41#include <linux/ipsec.h>
42#include <linux/times.h>
43
44#include <linux/ipv6.h>
45#include <linux/icmpv6.h>
46#include <linux/random.h>
47
48#include <net/tcp.h>
49#include <net/ndisc.h>
5324a040 50#include <net/inet6_hashtables.h>
8129765a 51#include <net/inet6_connection_sock.h>
1da177e4
LT
52#include <net/ipv6.h>
53#include <net/transp_v6.h>
54#include <net/addrconf.h>
55#include <net/ip6_route.h>
56#include <net/ip6_checksum.h>
57#include <net/inet_ecn.h>
58#include <net/protocol.h>
59#include <net/xfrm.h>
60#include <net/addrconf.h>
61#include <net/snmp.h>
62#include <net/dsfield.h>
6d6ee43e 63#include <net/timewait_sock.h>
1da177e4
LT
64
65#include <asm/uaccess.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
ae0f7d5f
DW
70/* Socket used for sending RSTs and ACKs */
71static struct socket *tcp6_socket;
72
1da177e4 73static void tcp_v6_send_reset(struct sk_buff *skb);
60236fdd 74static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
8292a17a 75static void tcp_v6_send_check(struct sock *sk, int len,
1da177e4
LT
76 struct sk_buff *skb);
77
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 79
8292a17a
ACM
80static struct inet_connection_sock_af_ops ipv6_mapped;
81static struct inet_connection_sock_af_ops ipv6_specific;
1da177e4 82
1da177e4
LT
83static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
84{
971af18b
ACM
85 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
86 inet6_csk_bind_conflict);
1da177e4
LT
87}
88
1da177e4
LT
89static void tcp_v6_hash(struct sock *sk)
90{
91 if (sk->sk_state != TCP_CLOSE) {
8292a17a 92 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
93 tcp_prot.hash(sk);
94 return;
95 }
96 local_bh_disable();
90b19d31 97 __inet6_hash(&tcp_hashinfo, sk);
1da177e4
LT
98 local_bh_enable();
99 }
100}
101
1da177e4
LT
102static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
103 struct in6_addr *saddr,
104 struct in6_addr *daddr,
105 unsigned long base)
106{
107 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
108}
109
110static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
111{
112 if (skb->protocol == htons(ETH_P_IPV6)) {
113 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
114 skb->nh.ipv6h->saddr.s6_addr32,
115 skb->h.th->dest,
116 skb->h.th->source);
117 } else {
118 return secure_tcp_sequence_number(skb->nh.iph->daddr,
119 skb->nh.iph->saddr,
120 skb->h.th->dest,
121 skb->h.th->source);
122 }
123}
124
1da177e4
LT
125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
126 int addr_len)
127{
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
d83d8461
ACM
129 struct inet_sock *inet = inet_sk(sk);
130 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
133 struct in6_addr *saddr = NULL, *final_p = NULL, final;
134 struct flowi fl;
135 struct dst_entry *dst;
136 int addr_type;
137 int err;
138
139 if (addr_len < SIN6_LEN_RFC2133)
140 return -EINVAL;
141
142 if (usin->sin6_family != AF_INET6)
143 return(-EAFNOSUPPORT);
144
145 memset(&fl, 0, sizeof(fl));
146
147 if (np->sndflow) {
148 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
149 IP6_ECN_flow_init(fl.fl6_flowlabel);
150 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
151 struct ip6_flowlabel *flowlabel;
152 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
153 if (flowlabel == NULL)
154 return -EINVAL;
155 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
156 fl6_sock_release(flowlabel);
157 }
158 }
159
160 /*
161 * connect() to INADDR_ANY means loopback (BSD'ism).
162 */
163
164 if(ipv6_addr_any(&usin->sin6_addr))
165 usin->sin6_addr.s6_addr[15] = 0x1;
166
167 addr_type = ipv6_addr_type(&usin->sin6_addr);
168
169 if(addr_type & IPV6_ADDR_MULTICAST)
170 return -ENETUNREACH;
171
172 if (addr_type&IPV6_ADDR_LINKLOCAL) {
173 if (addr_len >= sizeof(struct sockaddr_in6) &&
174 usin->sin6_scope_id) {
175 /* If interface is set while binding, indices
176 * must coincide.
177 */
178 if (sk->sk_bound_dev_if &&
179 sk->sk_bound_dev_if != usin->sin6_scope_id)
180 return -EINVAL;
181
182 sk->sk_bound_dev_if = usin->sin6_scope_id;
183 }
184
185 /* Connect to link-local address requires an interface */
186 if (!sk->sk_bound_dev_if)
187 return -EINVAL;
188 }
189
190 if (tp->rx_opt.ts_recent_stamp &&
191 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
194 tp->write_seq = 0;
195 }
196
197 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
198 np->flow_label = fl.fl6_flowlabel;
199
200 /*
201 * TCP over IPv4
202 */
203
204 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 205 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
206 struct sockaddr_in sin;
207
208 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209
210 if (__ipv6_only_sock(sk))
211 return -ENETUNREACH;
212
213 sin.sin_family = AF_INET;
214 sin.sin_port = usin->sin6_port;
215 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216
d83d8461 217 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4
LT
218 sk->sk_backlog_rcv = tcp_v4_do_rcv;
219
220 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
221
222 if (err) {
d83d8461
ACM
223 icsk->icsk_ext_hdr_len = exthdrlen;
224 icsk->icsk_af_ops = &ipv6_specific;
1da177e4
LT
225 sk->sk_backlog_rcv = tcp_v6_do_rcv;
226 goto failure;
227 } else {
228 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
229 inet->saddr);
230 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
231 inet->rcv_saddr);
232 }
233
234 return err;
235 }
236
237 if (!ipv6_addr_any(&np->rcv_saddr))
238 saddr = &np->rcv_saddr;
239
240 fl.proto = IPPROTO_TCP;
241 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
242 ipv6_addr_copy(&fl.fl6_src,
243 (saddr ? saddr : &np->saddr));
244 fl.oif = sk->sk_bound_dev_if;
245 fl.fl_ip_dport = usin->sin6_port;
246 fl.fl_ip_sport = inet->sport;
247
248 if (np->opt && np->opt->srcrt) {
249 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
250 ipv6_addr_copy(&final, &fl.fl6_dst);
251 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
252 final_p = &final;
253 }
254
255 err = ip6_dst_lookup(sk, &dst, &fl);
256 if (err)
257 goto failure;
258 if (final_p)
259 ipv6_addr_copy(&fl.fl6_dst, final_p);
260
e104411b 261 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1da177e4 262 goto failure;
1da177e4
LT
263
264 if (saddr == NULL) {
265 saddr = &fl.fl6_src;
266 ipv6_addr_copy(&np->rcv_saddr, saddr);
267 }
268
269 /* set the source address */
270 ipv6_addr_copy(&np->saddr, saddr);
271 inet->rcv_saddr = LOOPBACK4_IPV6;
272
273 ip6_dst_store(sk, dst, NULL);
274 sk->sk_route_caps = dst->dev->features &
275 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
276
d83d8461 277 icsk->icsk_ext_hdr_len = 0;
1da177e4 278 if (np->opt)
d83d8461
ACM
279 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
280 np->opt->opt_nflen);
1da177e4
LT
281
282 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
283
284 inet->dport = usin->sin6_port;
285
286 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 287 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
288 if (err)
289 goto late_failure;
290
291 if (!tp->write_seq)
292 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
293 np->daddr.s6_addr32,
294 inet->sport,
295 inet->dport);
296
297 err = tcp_connect(sk);
298 if (err)
299 goto late_failure;
300
301 return 0;
302
303late_failure:
304 tcp_set_state(sk, TCP_CLOSE);
305 __sk_dst_reset(sk);
306failure:
307 inet->dport = 0;
308 sk->sk_route_caps = 0;
309 return err;
310}
311
312static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
313 int type, int code, int offset, __u32 info)
314{
315 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 316 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
317 struct ipv6_pinfo *np;
318 struct sock *sk;
319 int err;
320 struct tcp_sock *tp;
321 __u32 seq;
322
505cbfc5
ACM
323 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
324 th->source, skb->dev->ifindex);
1da177e4
LT
325
326 if (sk == NULL) {
327 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
328 return;
329 }
330
331 if (sk->sk_state == TCP_TIME_WAIT) {
8feaf0c0 332 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
333 return;
334 }
335
336 bh_lock_sock(sk);
337 if (sock_owned_by_user(sk))
338 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
339
340 if (sk->sk_state == TCP_CLOSE)
341 goto out;
342
343 tp = tcp_sk(sk);
344 seq = ntohl(th->seq);
345 if (sk->sk_state != TCP_LISTEN &&
346 !between(seq, tp->snd_una, tp->snd_nxt)) {
347 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
348 goto out;
349 }
350
351 np = inet6_sk(sk);
352
353 if (type == ICMPV6_PKT_TOOBIG) {
354 struct dst_entry *dst = NULL;
355
356 if (sock_owned_by_user(sk))
357 goto out;
358 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
359 goto out;
360
361 /* icmp should have updated the destination cache entry */
362 dst = __sk_dst_check(sk, np->dst_cookie);
363
364 if (dst == NULL) {
365 struct inet_sock *inet = inet_sk(sk);
366 struct flowi fl;
367
368 /* BUGGG_FUTURE: Again, it is not clear how
369 to handle rthdr case. Ignore this complexity
370 for now.
371 */
372 memset(&fl, 0, sizeof(fl));
373 fl.proto = IPPROTO_TCP;
374 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
375 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
376 fl.oif = sk->sk_bound_dev_if;
377 fl.fl_ip_dport = inet->dport;
378 fl.fl_ip_sport = inet->sport;
379
380 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
381 sk->sk_err_soft = -err;
382 goto out;
383 }
384
385 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
386 sk->sk_err_soft = -err;
387 goto out;
388 }
389
390 } else
391 dst_hold(dst);
392
d83d8461 393 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
394 tcp_sync_mss(sk, dst_mtu(dst));
395 tcp_simple_retransmit(sk);
396 } /* else let the usual retransmit timer handle it */
397 dst_release(dst);
398 goto out;
399 }
400
401 icmpv6_err_convert(type, code, &err);
402
60236fdd 403 /* Might be for an request_sock */
1da177e4 404 switch (sk->sk_state) {
60236fdd 405 struct request_sock *req, **prev;
1da177e4
LT
406 case TCP_LISTEN:
407 if (sock_owned_by_user(sk))
408 goto out;
409
8129765a
ACM
410 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
411 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
412 if (!req)
413 goto out;
414
415 /* ICMPs are not backlogged, hence we cannot get
416 * an established socket here.
417 */
418 BUG_TRAP(req->sk == NULL);
419
2e6599cb 420 if (seq != tcp_rsk(req)->snt_isn) {
1da177e4
LT
421 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
422 goto out;
423 }
424
463c84b9 425 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
426 goto out;
427
428 case TCP_SYN_SENT:
429 case TCP_SYN_RECV: /* Cannot happen.
430 It can, it SYNs are crossed. --ANK */
431 if (!sock_owned_by_user(sk)) {
432 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
433 sk->sk_err = err;
434 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
435
436 tcp_done(sk);
437 } else
438 sk->sk_err_soft = err;
439 goto out;
440 }
441
442 if (!sock_owned_by_user(sk) && np->recverr) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk);
445 } else
446 sk->sk_err_soft = err;
447
448out:
449 bh_unlock_sock(sk);
450 sock_put(sk);
451}
452
453
60236fdd 454static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
1da177e4
LT
455 struct dst_entry *dst)
456{
ca304b61 457 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
458 struct ipv6_pinfo *np = inet6_sk(sk);
459 struct sk_buff * skb;
460 struct ipv6_txoptions *opt = NULL;
461 struct in6_addr * final_p = NULL, final;
462 struct flowi fl;
463 int err = -1;
464
465 memset(&fl, 0, sizeof(fl));
466 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
467 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
468 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 469 fl.fl6_flowlabel = 0;
2e6599cb
ACM
470 fl.oif = treq->iif;
471 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4
LT
472 fl.fl_ip_sport = inet_sk(sk)->sport;
473
474 if (dst == NULL) {
475 opt = np->opt;
476 if (opt == NULL &&
333fad53 477 np->rxopt.bits.osrcrt == 2 &&
2e6599cb
ACM
478 treq->pktopts) {
479 struct sk_buff *pktopts = treq->pktopts;
1da177e4
LT
480 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
481 if (rxopt->srcrt)
482 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
483 }
484
485 if (opt && opt->srcrt) {
486 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
487 ipv6_addr_copy(&final, &fl.fl6_dst);
488 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
489 final_p = &final;
490 }
491
492 err = ip6_dst_lookup(sk, &dst, &fl);
493 if (err)
494 goto done;
495 if (final_p)
496 ipv6_addr_copy(&fl.fl6_dst, final_p);
497 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
498 goto done;
499 }
500
501 skb = tcp_make_synack(sk, dst, req);
502 if (skb) {
503 struct tcphdr *th = skb->h.th;
504
505 th->check = tcp_v6_check(th, skb->len,
2e6599cb 506 &treq->loc_addr, &treq->rmt_addr,
1da177e4
LT
507 csum_partial((char *)th, skb->len, skb->csum));
508
2e6599cb 509 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
510 err = ip6_xmit(sk, skb, &fl, opt, 0);
511 if (err == NET_XMIT_CN)
512 err = 0;
513 }
514
515done:
1da177e4
LT
516 if (opt && opt != np->opt)
517 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 518 dst_release(dst);
1da177e4
LT
519 return err;
520}
521
60236fdd 522static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 523{
ca304b61
ACM
524 if (inet6_rsk(req)->pktopts)
525 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
526}
527
60236fdd 528static struct request_sock_ops tcp6_request_sock_ops = {
1da177e4 529 .family = AF_INET6,
2e6599cb 530 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 531 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
532 .send_ack = tcp_v6_reqsk_send_ack,
533 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
534 .send_reset = tcp_v6_send_reset
535};
536
6d6ee43e
ACM
537static struct timewait_sock_ops tcp6_timewait_sock_ops = {
538 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
539 .twsk_unique = tcp_twsk_unique,
540};
541
8292a17a 542static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
543{
544 struct ipv6_pinfo *np = inet6_sk(sk);
8292a17a 545 struct tcphdr *th = skb->h.th;
1da177e4
LT
546
547 if (skb->ip_summed == CHECKSUM_HW) {
548 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
549 skb->csum = offsetof(struct tcphdr, check);
550 } else {
551 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
552 csum_partial((char *)th, th->doff<<2,
553 skb->csum));
554 }
555}
556
557
558static void tcp_v6_send_reset(struct sk_buff *skb)
559{
560 struct tcphdr *th = skb->h.th, *t1;
561 struct sk_buff *buff;
562 struct flowi fl;
563
564 if (th->rst)
565 return;
566
567 if (!ipv6_unicast_destination(skb))
568 return;
569
570 /*
571 * We need to grab some memory, and put together an RST,
572 * and then put it into the queue to be sent.
573 */
574
575 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
576 GFP_ATOMIC);
577 if (buff == NULL)
578 return;
579
580 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
581
582 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
583
584 /* Swap the send and the receive. */
585 memset(t1, 0, sizeof(*t1));
586 t1->dest = th->source;
587 t1->source = th->dest;
588 t1->doff = sizeof(*t1)/4;
589 t1->rst = 1;
590
591 if(th->ack) {
592 t1->seq = th->ack_seq;
593 } else {
594 t1->ack = 1;
595 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
596 + skb->len - (th->doff<<2));
597 }
598
599 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
600
601 memset(&fl, 0, sizeof(fl));
602 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
603 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
604
605 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
606 sizeof(*t1), IPPROTO_TCP,
607 buff->csum);
608
609 fl.proto = IPPROTO_TCP;
505cbfc5 610 fl.oif = inet6_iif(skb);
1da177e4
LT
611 fl.fl_ip_dport = t1->dest;
612 fl.fl_ip_sport = t1->source;
613
614 /* sk = NULL, but it is safe for now. RST socket required. */
615 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
616
ecc51b6d 617 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
ae0f7d5f 618 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
ecc51b6d
ACM
619 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
620 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1da177e4 621 return;
ecc51b6d 622 }
1da177e4
LT
623 }
624
625 kfree_skb(buff);
626}
627
628static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
629{
630 struct tcphdr *th = skb->h.th, *t1;
631 struct sk_buff *buff;
632 struct flowi fl;
633 int tot_len = sizeof(struct tcphdr);
634
635 if (ts)
636 tot_len += 3*4;
637
638 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
639 GFP_ATOMIC);
640 if (buff == NULL)
641 return;
642
643 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
644
645 t1 = (struct tcphdr *) skb_push(buff,tot_len);
646
647 /* Swap the send and the receive. */
648 memset(t1, 0, sizeof(*t1));
649 t1->dest = th->source;
650 t1->source = th->dest;
651 t1->doff = tot_len/4;
652 t1->seq = htonl(seq);
653 t1->ack_seq = htonl(ack);
654 t1->ack = 1;
655 t1->window = htons(win);
656
657 if (ts) {
658 u32 *ptr = (u32*)(t1 + 1);
659 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
660 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
661 *ptr++ = htonl(tcp_time_stamp);
662 *ptr = htonl(ts);
663 }
664
665 buff->csum = csum_partial((char *)t1, tot_len, 0);
666
667 memset(&fl, 0, sizeof(fl));
668 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
669 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
670
671 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
672 tot_len, IPPROTO_TCP,
673 buff->csum);
674
675 fl.proto = IPPROTO_TCP;
505cbfc5 676 fl.oif = inet6_iif(skb);
1da177e4
LT
677 fl.fl_ip_dport = t1->dest;
678 fl.fl_ip_sport = t1->source;
679
680 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ecc51b6d 681 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
ae0f7d5f 682 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
ecc51b6d 683 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1da177e4 684 return;
ecc51b6d 685 }
1da177e4
LT
686 }
687
688 kfree_skb(buff);
689}
690
691static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
692{
8feaf0c0
ACM
693 struct inet_timewait_sock *tw = inet_twsk(sk);
694 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 695
8feaf0c0
ACM
696 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
697 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
698 tcptw->tw_ts_recent);
1da177e4 699
8feaf0c0 700 inet_twsk_put(tw);
1da177e4
LT
701}
702
60236fdd 703static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1da177e4 704{
2e6599cb 705 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1da177e4
LT
706}
707
708
709static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
710{
60236fdd 711 struct request_sock *req, **prev;
505cbfc5 712 const struct tcphdr *th = skb->h.th;
1da177e4
LT
713 struct sock *nsk;
714
715 /* Find possible connection requests. */
8129765a
ACM
716 req = inet6_csk_search_req(sk, &prev, th->source,
717 &skb->nh.ipv6h->saddr,
718 &skb->nh.ipv6h->daddr, inet6_iif(skb));
1da177e4
LT
719 if (req)
720 return tcp_check_req(sk, skb, req, prev);
721
505cbfc5
ACM
722 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
723 th->source, &skb->nh.ipv6h->daddr,
724 ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
725
726 if (nsk) {
727 if (nsk->sk_state != TCP_TIME_WAIT) {
728 bh_lock_sock(nsk);
729 return nsk;
730 }
8feaf0c0 731 inet_twsk_put((struct inet_timewait_sock *)nsk);
1da177e4
LT
732 return NULL;
733 }
734
735#if 0 /*def CONFIG_SYN_COOKIES*/
736 if (!th->rst && !th->syn && th->ack)
737 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
738#endif
739 return sk;
740}
741
1da177e4
LT
742/* FIXME: this is substantially similar to the ipv4 code.
743 * Can some kind of merge be done? -- erics
744 */
745static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
746{
ca304b61 747 struct inet6_request_sock *treq;
1da177e4
LT
748 struct ipv6_pinfo *np = inet6_sk(sk);
749 struct tcp_options_received tmp_opt;
750 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 751 struct request_sock *req = NULL;
1da177e4
LT
752 __u32 isn = TCP_SKB_CB(skb)->when;
753
754 if (skb->protocol == htons(ETH_P_IP))
755 return tcp_v4_conn_request(sk, skb);
756
757 if (!ipv6_unicast_destination(skb))
758 goto drop;
759
760 /*
761 * There are no SYN attacks on IPv6, yet...
762 */
463c84b9 763 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
764 if (net_ratelimit())
765 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
766 goto drop;
767 }
768
463c84b9 769 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
770 goto drop;
771
ca304b61 772 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
773 if (req == NULL)
774 goto drop;
775
776 tcp_clear_options(&tmp_opt);
777 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
778 tmp_opt.user_mss = tp->rx_opt.user_mss;
779
780 tcp_parse_options(skb, &tmp_opt, 0);
781
782 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
783 tcp_openreq_init(req, &tmp_opt, skb);
784
ca304b61 785 treq = inet6_rsk(req);
2e6599cb
ACM
786 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
787 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1da177e4 788 TCP_ECN_create_request(req, skb->h.th);
2e6599cb 789 treq->pktopts = NULL;
1da177e4 790 if (ipv6_opt_accepted(sk, skb) ||
333fad53
YH
791 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
792 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1da177e4 793 atomic_inc(&skb->users);
2e6599cb 794 treq->pktopts = skb;
1da177e4 795 }
2e6599cb 796 treq->iif = sk->sk_bound_dev_if;
1da177e4
LT
797
798 /* So that link locals have meaning */
799 if (!sk->sk_bound_dev_if &&
2e6599cb 800 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
505cbfc5 801 treq->iif = inet6_iif(skb);
1da177e4
LT
802
803 if (isn == 0)
804 isn = tcp_v6_init_sequence(sk,skb);
805
2e6599cb 806 tcp_rsk(req)->snt_isn = isn;
1da177e4
LT
807
808 if (tcp_v6_send_synack(sk, req, NULL))
809 goto drop;
810
8129765a 811 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
812 return 0;
813
814drop:
815 if (req)
60236fdd 816 reqsk_free(req);
1da177e4
LT
817
818 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
819 return 0; /* don't send reset */
820}
821
822static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 823 struct request_sock *req,
1da177e4
LT
824 struct dst_entry *dst)
825{
ca304b61 826 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
827 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
828 struct tcp6_sock *newtcp6sk;
829 struct inet_sock *newinet;
830 struct tcp_sock *newtp;
831 struct sock *newsk;
832 struct ipv6_txoptions *opt;
833
834 if (skb->protocol == htons(ETH_P_IP)) {
835 /*
836 * v6 mapped
837 */
838
839 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
840
841 if (newsk == NULL)
842 return NULL;
843
844 newtcp6sk = (struct tcp6_sock *)newsk;
845 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
846
847 newinet = inet_sk(newsk);
848 newnp = inet6_sk(newsk);
849 newtp = tcp_sk(newsk);
850
851 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
852
853 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
854 newinet->daddr);
855
856 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
857 newinet->saddr);
858
859 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
860
8292a17a 861 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4
LT
862 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
863 newnp->pktoptions = NULL;
864 newnp->opt = NULL;
505cbfc5 865 newnp->mcast_oif = inet6_iif(skb);
1da177e4
LT
866 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
867
e6848976
ACM
868 /*
869 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
870 * here, tcp_create_openreq_child now does this for us, see the comment in
871 * that function for the gory details. -acme
1da177e4 872 */
1da177e4
LT
873
874 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 875 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
876 Sync it now.
877 */
d83d8461 878 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
879
880 return newsk;
881 }
882
883 opt = np->opt;
884
885 if (sk_acceptq_is_full(sk))
886 goto out_overflow;
887
333fad53 888 if (np->rxopt.bits.osrcrt == 2 &&
2e6599cb
ACM
889 opt == NULL && treq->pktopts) {
890 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1da177e4 891 if (rxopt->srcrt)
2e6599cb 892 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1da177e4
LT
893 }
894
895 if (dst == NULL) {
896 struct in6_addr *final_p = NULL, final;
897 struct flowi fl;
898
899 memset(&fl, 0, sizeof(fl));
900 fl.proto = IPPROTO_TCP;
2e6599cb 901 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
902 if (opt && opt->srcrt) {
903 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
904 ipv6_addr_copy(&final, &fl.fl6_dst);
905 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
906 final_p = &final;
907 }
2e6599cb 908 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 909 fl.oif = sk->sk_bound_dev_if;
2e6599cb 910 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4
LT
911 fl.fl_ip_sport = inet_sk(sk)->sport;
912
913 if (ip6_dst_lookup(sk, &dst, &fl))
914 goto out;
915
916 if (final_p)
917 ipv6_addr_copy(&fl.fl6_dst, final_p);
918
919 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
920 goto out;
921 }
922
923 newsk = tcp_create_openreq_child(sk, req, skb);
924 if (newsk == NULL)
925 goto out;
926
e6848976
ACM
927 /*
928 * No need to charge this sock to the relevant IPv6 refcnt debug socks
929 * count here, tcp_create_openreq_child now does this for us, see the
930 * comment in that function for the gory details. -acme
931 */
1da177e4
LT
932
933 ip6_dst_store(newsk, dst, NULL);
934 newsk->sk_route_caps = dst->dev->features &
935 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
936
937 newtcp6sk = (struct tcp6_sock *)newsk;
938 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
939
940 newtp = tcp_sk(newsk);
941 newinet = inet_sk(newsk);
942 newnp = inet6_sk(newsk);
943
944 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
945
2e6599cb
ACM
946 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
947 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
948 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
949 newsk->sk_bound_dev_if = treq->iif;
1da177e4
LT
950
951 /* Now IPv6 options...
952
953 First: no IPv4 options.
954 */
955 newinet->opt = NULL;
956
957 /* Clone RX bits */
958 newnp->rxopt.all = np->rxopt.all;
959
960 /* Clone pktoptions received with SYN */
961 newnp->pktoptions = NULL;
2e6599cb
ACM
962 if (treq->pktopts != NULL) {
963 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
964 kfree_skb(treq->pktopts);
965 treq->pktopts = NULL;
1da177e4
LT
966 if (newnp->pktoptions)
967 skb_set_owner_r(newnp->pktoptions, newsk);
968 }
969 newnp->opt = NULL;
505cbfc5 970 newnp->mcast_oif = inet6_iif(skb);
1da177e4
LT
971 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
972
973 /* Clone native IPv6 options from listening socket (if any)
974
975 Yes, keeping reference count would be much more clever,
976 but we make one more one thing there: reattach optmem
977 to newsk.
978 */
979 if (opt) {
980 newnp->opt = ipv6_dup_options(newsk, opt);
981 if (opt != np->opt)
982 sock_kfree_s(sk, opt, opt->tot_len);
983 }
984
d83d8461 985 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 986 if (newnp->opt)
d83d8461
ACM
987 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
988 newnp->opt->opt_flen);
1da177e4 989
5d424d5a 990 tcp_mtup_init(newsk);
1da177e4
LT
991 tcp_sync_mss(newsk, dst_mtu(dst));
992 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
993 tcp_initialize_rcv_mss(newsk);
994
995 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
996
90b19d31 997 __inet6_hash(&tcp_hashinfo, newsk);
2d8c4ce5 998 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1da177e4
LT
999
1000 return newsk;
1001
1002out_overflow:
1003 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1004out:
1005 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1006 if (opt && opt != np->opt)
1007 sock_kfree_s(sk, opt, opt->tot_len);
1008 dst_release(dst);
1009 return NULL;
1010}
1011
1012static int tcp_v6_checksum_init(struct sk_buff *skb)
1013{
1014 if (skb->ip_summed == CHECKSUM_HW) {
1da177e4 1015 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
fb286bb2
HX
1016 &skb->nh.ipv6h->daddr,skb->csum)) {
1017 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1018 return 0;
fb286bb2 1019 }
1da177e4 1020 }
fb286bb2
HX
1021
1022 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1023 &skb->nh.ipv6h->daddr, 0);
1024
1da177e4 1025 if (skb->len <= 76) {
fb286bb2 1026 return __skb_checksum_complete(skb);
1da177e4
LT
1027 }
1028 return 0;
1029}
1030
1031/* The socket must have it's spinlock held when we get
1032 * here.
1033 *
1034 * We have a potential double-lock case here, so even when
1035 * doing backlog processing we use the BH locking scheme.
1036 * This is because we cannot sleep with the original spinlock
1037 * held.
1038 */
1039static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1040{
1041 struct ipv6_pinfo *np = inet6_sk(sk);
1042 struct tcp_sock *tp;
1043 struct sk_buff *opt_skb = NULL;
1044
1045 /* Imagine: socket is IPv6. IPv4 packet arrives,
1046 goes to IPv4 receive handler and backlogged.
1047 From backlog it always goes here. Kerboom...
1048 Fortunately, tcp_rcv_established and rcv_established
1049 handle them correctly, but it is not case with
1050 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1051 */
1052
1053 if (skb->protocol == htons(ETH_P_IP))
1054 return tcp_v4_do_rcv(sk, skb);
1055
1056 if (sk_filter(sk, skb, 0))
1057 goto discard;
1058
1059 /*
1060 * socket locking is here for SMP purposes as backlog rcv
1061 * is currently called with bh processing disabled.
1062 */
1063
1064 /* Do Stevens' IPV6_PKTOPTIONS.
1065
1066 Yes, guys, it is the only place in our code, where we
1067 may make it not affecting IPv4.
1068 The rest of code is protocol independent,
1069 and I do not like idea to uglify IPv4.
1070
1071 Actually, all the idea behind IPV6_PKTOPTIONS
1072 looks not very well thought. For now we latch
1073 options, received in the last packet, enqueued
1074 by tcp. Feel free to propose better solution.
1075 --ANK (980728)
1076 */
1077 if (np->rxopt.all)
1078 opt_skb = skb_clone(skb, GFP_ATOMIC);
1079
1080 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1081 TCP_CHECK_TIMER(sk);
1082 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1083 goto reset;
1084 TCP_CHECK_TIMER(sk);
1085 if (opt_skb)
1086 goto ipv6_pktoptions;
1087 return 0;
1088 }
1089
1090 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1091 goto csum_err;
1092
1093 if (sk->sk_state == TCP_LISTEN) {
1094 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1095 if (!nsk)
1096 goto discard;
1097
1098 /*
1099 * Queue it on the new socket if the new socket is active,
1100 * otherwise we just shortcircuit this and continue with
1101 * the new socket..
1102 */
1103 if(nsk != sk) {
1104 if (tcp_child_process(sk, nsk, skb))
1105 goto reset;
1106 if (opt_skb)
1107 __kfree_skb(opt_skb);
1108 return 0;
1109 }
1110 }
1111
1112 TCP_CHECK_TIMER(sk);
1113 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1114 goto reset;
1115 TCP_CHECK_TIMER(sk);
1116 if (opt_skb)
1117 goto ipv6_pktoptions;
1118 return 0;
1119
1120reset:
1121 tcp_v6_send_reset(skb);
1122discard:
1123 if (opt_skb)
1124 __kfree_skb(opt_skb);
1125 kfree_skb(skb);
1126 return 0;
1127csum_err:
1128 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1129 goto discard;
1130
1131
1132ipv6_pktoptions:
1133 /* Do you ask, what is it?
1134
1135 1. skb was enqueued by tcp.
1136 2. skb is added to tail of read queue, rather than out of order.
1137 3. socket is not in passive state.
1138 4. Finally, it really contains options, which user wants to receive.
1139 */
1140 tp = tcp_sk(sk);
1141 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1142 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1143 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1144 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1145 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1da177e4
LT
1146 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1147 if (ipv6_opt_accepted(sk, opt_skb)) {
1148 skb_set_owner_r(opt_skb, sk);
1149 opt_skb = xchg(&np->pktoptions, opt_skb);
1150 } else {
1151 __kfree_skb(opt_skb);
1152 opt_skb = xchg(&np->pktoptions, NULL);
1153 }
1154 }
1155
1156 if (opt_skb)
1157 kfree_skb(opt_skb);
1158 return 0;
1159}
1160
951dbc8a 1161static int tcp_v6_rcv(struct sk_buff **pskb)
1da177e4
LT
1162{
1163 struct sk_buff *skb = *pskb;
1164 struct tcphdr *th;
1165 struct sock *sk;
1166 int ret;
1167
1168 if (skb->pkt_type != PACKET_HOST)
1169 goto discard_it;
1170
1171 /*
1172 * Count it even if it's bad.
1173 */
1174 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1175
1176 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1177 goto discard_it;
1178
1179 th = skb->h.th;
1180
1181 if (th->doff < sizeof(struct tcphdr)/4)
1182 goto bad_packet;
1183 if (!pskb_may_pull(skb, th->doff*4))
1184 goto discard_it;
1185
1186 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
fb286bb2 1187 tcp_v6_checksum_init(skb)))
1da177e4
LT
1188 goto bad_packet;
1189
1190 th = skb->h.th;
1191 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1192 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1193 skb->len - th->doff*4);
1194 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1195 TCP_SKB_CB(skb)->when = 0;
1196 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1197 TCP_SKB_CB(skb)->sacked = 0;
1198
505cbfc5
ACM
1199 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1200 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1201 inet6_iif(skb));
1da177e4
LT
1202
1203 if (!sk)
1204 goto no_tcp_socket;
1205
1206process:
1207 if (sk->sk_state == TCP_TIME_WAIT)
1208 goto do_time_wait;
1209
1210 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1211 goto discard_and_relse;
1212
1213 if (sk_filter(sk, skb, 0))
1214 goto discard_and_relse;
1215
1216 skb->dev = NULL;
1217
1218 bh_lock_sock(sk);
1219 ret = 0;
1220 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1221#ifdef CONFIG_NET_DMA
1222 struct tcp_sock *tp = tcp_sk(sk);
1223 if (tp->ucopy.dma_chan)
1224 ret = tcp_v6_do_rcv(sk, skb);
1225 else
1226#endif
1227 {
1228 if (!tcp_prequeue(sk, skb))
1229 ret = tcp_v6_do_rcv(sk, skb);
1230 }
1da177e4
LT
1231 } else
1232 sk_add_backlog(sk, skb);
1233 bh_unlock_sock(sk);
1234
1235 sock_put(sk);
1236 return ret ? -1 : 0;
1237
1238no_tcp_socket:
1239 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1240 goto discard_it;
1241
1242 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1243bad_packet:
1244 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1245 } else {
1246 tcp_v6_send_reset(skb);
1247 }
1248
1249discard_it:
1250
1251 /*
1252 * Discard frame
1253 */
1254
1255 kfree_skb(skb);
1256 return 0;
1257
1258discard_and_relse:
1259 sock_put(sk);
1260 goto discard_it;
1261
1262do_time_wait:
1263 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
8feaf0c0 1264 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
1265 goto discard_it;
1266 }
1267
1268 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1269 TCP_INC_STATS_BH(TCP_MIB_INERRS);
8feaf0c0 1270 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
1271 goto discard_it;
1272 }
1273
8feaf0c0
ACM
1274 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1275 skb, th)) {
1da177e4
LT
1276 case TCP_TW_SYN:
1277 {
1278 struct sock *sk2;
1279
505cbfc5
ACM
1280 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1281 &skb->nh.ipv6h->daddr,
1282 ntohs(th->dest), inet6_iif(skb));
1da177e4 1283 if (sk2 != NULL) {
295ff7ed
ACM
1284 struct inet_timewait_sock *tw = inet_twsk(sk);
1285 inet_twsk_deschedule(tw, &tcp_death_row);
1286 inet_twsk_put(tw);
1da177e4
LT
1287 sk = sk2;
1288 goto process;
1289 }
1290 /* Fall through to ACK */
1291 }
1292 case TCP_TW_ACK:
1293 tcp_v6_timewait_ack(sk, skb);
1294 break;
1295 case TCP_TW_RST:
1296 goto no_tcp_socket;
1297 case TCP_TW_SUCCESS:;
1298 }
1299 goto discard_it;
1300}
1301
1da177e4
LT
1302static int tcp_v6_remember_stamp(struct sock *sk)
1303{
1304 /* Alas, not yet... */
1305 return 0;
1306}
1307
8292a17a 1308static struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1309 .queue_xmit = inet6_csk_xmit,
1310 .send_check = tcp_v6_send_check,
1311 .rebuild_header = inet6_sk_rebuild_header,
1312 .conn_request = tcp_v6_conn_request,
1313 .syn_recv_sock = tcp_v6_syn_recv_sock,
1314 .remember_stamp = tcp_v6_remember_stamp,
1315 .net_header_len = sizeof(struct ipv6hdr),
1316 .setsockopt = ipv6_setsockopt,
1317 .getsockopt = ipv6_getsockopt,
1318 .addr2sockaddr = inet6_csk_addr2sockaddr,
1319 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1320#ifdef CONFIG_COMPAT
543d9cfe
ACM
1321 .compat_setsockopt = compat_ipv6_setsockopt,
1322 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1323#endif
1da177e4
LT
1324};
1325
1326/*
1327 * TCP over IPv4 via INET6 API
1328 */
1329
8292a17a 1330static struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1331 .queue_xmit = ip_queue_xmit,
1332 .send_check = tcp_v4_send_check,
1333 .rebuild_header = inet_sk_rebuild_header,
1334 .conn_request = tcp_v6_conn_request,
1335 .syn_recv_sock = tcp_v6_syn_recv_sock,
1336 .remember_stamp = tcp_v4_remember_stamp,
1337 .net_header_len = sizeof(struct iphdr),
1338 .setsockopt = ipv6_setsockopt,
1339 .getsockopt = ipv6_getsockopt,
1340 .addr2sockaddr = inet6_csk_addr2sockaddr,
1341 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1342#ifdef CONFIG_COMPAT
543d9cfe
ACM
1343 .compat_setsockopt = compat_ipv6_setsockopt,
1344 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1345#endif
1da177e4
LT
1346};
1347
1da177e4
LT
1348/* NOTE: A lot of things set to zero explicitly by call to
1349 * sk_alloc() so need not be done here.
1350 */
1351static int tcp_v6_init_sock(struct sock *sk)
1352{
6687e988 1353 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1354 struct tcp_sock *tp = tcp_sk(sk);
1355
1356 skb_queue_head_init(&tp->out_of_order_queue);
1357 tcp_init_xmit_timers(sk);
1358 tcp_prequeue_init(tp);
1359
6687e988 1360 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1361 tp->mdev = TCP_TIMEOUT_INIT;
1362
1363 /* So many TCP implementations out there (incorrectly) count the
1364 * initial SYN frame in their delayed-ACK and congestion control
1365 * algorithms that we must have the following bandaid to talk
1366 * efficiently to them. -DaveM
1367 */
1368 tp->snd_cwnd = 2;
1369
1370 /* See draft-stevens-tcpca-spec-01 for discussion of the
1371 * initialization of these values.
1372 */
1373 tp->snd_ssthresh = 0x7fffffff;
1374 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1375 tp->mss_cache = 536;
1da177e4
LT
1376
1377 tp->reordering = sysctl_tcp_reordering;
1378
1379 sk->sk_state = TCP_CLOSE;
1380
8292a17a 1381 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1382 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1383 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1384 sk->sk_write_space = sk_stream_write_space;
1385 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1386
1387 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1388 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1389
1390 atomic_inc(&tcp_sockets_allocated);
1391
1392 return 0;
1393}
1394
1395static int tcp_v6_destroy_sock(struct sock *sk)
1396{
1da177e4
LT
1397 tcp_v4_destroy_sock(sk);
1398 return inet6_destroy_sock(sk);
1399}
1400
1401/* Proc filesystem TCPv6 sock list dumping. */
1402static void get_openreq6(struct seq_file *seq,
60236fdd 1403 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1404{
1da177e4 1405 int ttd = req->expires - jiffies;
ca304b61
ACM
1406 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1407 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1408
1409 if (ttd < 0)
1410 ttd = 0;
1411
1da177e4
LT
1412 seq_printf(seq,
1413 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1414 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1415 i,
1416 src->s6_addr32[0], src->s6_addr32[1],
1417 src->s6_addr32[2], src->s6_addr32[3],
1418 ntohs(inet_sk(sk)->sport),
1419 dest->s6_addr32[0], dest->s6_addr32[1],
1420 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1421 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1422 TCP_SYN_RECV,
1423 0,0, /* could print option size, but that is af dependent. */
1424 1, /* timers active (only the expire timer) */
1425 jiffies_to_clock_t(ttd),
1426 req->retrans,
1427 uid,
1428 0, /* non standard timer */
1429 0, /* open_requests have no inode */
1430 0, req);
1431}
1432
1433static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1434{
1435 struct in6_addr *dest, *src;
1436 __u16 destp, srcp;
1437 int timer_active;
1438 unsigned long timer_expires;
1439 struct inet_sock *inet = inet_sk(sp);
1440 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1441 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1442 struct ipv6_pinfo *np = inet6_sk(sp);
1443
1444 dest = &np->daddr;
1445 src = &np->rcv_saddr;
1446 destp = ntohs(inet->dport);
1447 srcp = ntohs(inet->sport);
463c84b9
ACM
1448
1449 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1450 timer_active = 1;
463c84b9
ACM
1451 timer_expires = icsk->icsk_timeout;
1452 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1453 timer_active = 4;
463c84b9 1454 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1455 } else if (timer_pending(&sp->sk_timer)) {
1456 timer_active = 2;
1457 timer_expires = sp->sk_timer.expires;
1458 } else {
1459 timer_active = 0;
1460 timer_expires = jiffies;
1461 }
1462
1463 seq_printf(seq,
1464 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1465 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1466 i,
1467 src->s6_addr32[0], src->s6_addr32[1],
1468 src->s6_addr32[2], src->s6_addr32[3], srcp,
1469 dest->s6_addr32[0], dest->s6_addr32[1],
1470 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1471 sp->sk_state,
1472 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1473 timer_active,
1474 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1475 icsk->icsk_retransmits,
1da177e4 1476 sock_i_uid(sp),
6687e988 1477 icsk->icsk_probes_out,
1da177e4
LT
1478 sock_i_ino(sp),
1479 atomic_read(&sp->sk_refcnt), sp,
463c84b9
ACM
1480 icsk->icsk_rto,
1481 icsk->icsk_ack.ato,
1482 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1483 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1484 );
1485}
1486
1487static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1488 struct inet_timewait_sock *tw, int i)
1da177e4
LT
1489{
1490 struct in6_addr *dest, *src;
1491 __u16 destp, srcp;
0fa1a53e 1492 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1493 int ttd = tw->tw_ttd - jiffies;
1494
1495 if (ttd < 0)
1496 ttd = 0;
1497
0fa1a53e
ACM
1498 dest = &tw6->tw_v6_daddr;
1499 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1500 destp = ntohs(tw->tw_dport);
1501 srcp = ntohs(tw->tw_sport);
1502
1503 seq_printf(seq,
1504 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1505 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1506 i,
1507 src->s6_addr32[0], src->s6_addr32[1],
1508 src->s6_addr32[2], src->s6_addr32[3], srcp,
1509 dest->s6_addr32[0], dest->s6_addr32[1],
1510 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1511 tw->tw_substate, 0, 0,
1512 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1513 atomic_read(&tw->tw_refcnt), tw);
1514}
1515
1516#ifdef CONFIG_PROC_FS
1517static int tcp6_seq_show(struct seq_file *seq, void *v)
1518{
1519 struct tcp_iter_state *st;
1520
1521 if (v == SEQ_START_TOKEN) {
1522 seq_puts(seq,
1523 " sl "
1524 "local_address "
1525 "remote_address "
1526 "st tx_queue rx_queue tr tm->when retrnsmt"
1527 " uid timeout inode\n");
1528 goto out;
1529 }
1530 st = seq->private;
1531
1532 switch (st->state) {
1533 case TCP_SEQ_STATE_LISTENING:
1534 case TCP_SEQ_STATE_ESTABLISHED:
1535 get_tcp6_sock(seq, v, st->num);
1536 break;
1537 case TCP_SEQ_STATE_OPENREQ:
1538 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1539 break;
1540 case TCP_SEQ_STATE_TIME_WAIT:
1541 get_timewait6_sock(seq, v, st->num);
1542 break;
1543 }
1544out:
1545 return 0;
1546}
1547
1548static struct file_operations tcp6_seq_fops;
1549static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1550 .owner = THIS_MODULE,
1551 .name = "tcp6",
1552 .family = AF_INET6,
1553 .seq_show = tcp6_seq_show,
1554 .seq_fops = &tcp6_seq_fops,
1555};
1556
1557int __init tcp6_proc_init(void)
1558{
1559 return tcp_proc_register(&tcp6_seq_afinfo);
1560}
1561
1562void tcp6_proc_exit(void)
1563{
1564 tcp_proc_unregister(&tcp6_seq_afinfo);
1565}
1566#endif
1567
1568struct proto tcpv6_prot = {
1569 .name = "TCPv6",
1570 .owner = THIS_MODULE,
1571 .close = tcp_close,
1572 .connect = tcp_v6_connect,
1573 .disconnect = tcp_disconnect,
463c84b9 1574 .accept = inet_csk_accept,
1da177e4
LT
1575 .ioctl = tcp_ioctl,
1576 .init = tcp_v6_init_sock,
1577 .destroy = tcp_v6_destroy_sock,
1578 .shutdown = tcp_shutdown,
1579 .setsockopt = tcp_setsockopt,
1580 .getsockopt = tcp_getsockopt,
1581 .sendmsg = tcp_sendmsg,
1582 .recvmsg = tcp_recvmsg,
1583 .backlog_rcv = tcp_v6_do_rcv,
1584 .hash = tcp_v6_hash,
1585 .unhash = tcp_unhash,
1586 .get_port = tcp_v6_get_port,
1587 .enter_memory_pressure = tcp_enter_memory_pressure,
1588 .sockets_allocated = &tcp_sockets_allocated,
1589 .memory_allocated = &tcp_memory_allocated,
1590 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1591 .orphan_count = &tcp_orphan_count,
1da177e4
LT
1592 .sysctl_mem = sysctl_tcp_mem,
1593 .sysctl_wmem = sysctl_tcp_wmem,
1594 .sysctl_rmem = sysctl_tcp_rmem,
1595 .max_header = MAX_TCP_HEADER,
1596 .obj_size = sizeof(struct tcp6_sock),
6d6ee43e 1597 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1598 .rsk_prot = &tcp6_request_sock_ops,
543d9cfe
ACM
1599#ifdef CONFIG_COMPAT
1600 .compat_setsockopt = compat_tcp_setsockopt,
1601 .compat_getsockopt = compat_tcp_getsockopt,
1602#endif
1da177e4
LT
1603};
1604
1605static struct inet6_protocol tcpv6_protocol = {
1606 .handler = tcp_v6_rcv,
1607 .err_handler = tcp_v6_err,
1608 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1609};
1610
1da177e4
LT
1611static struct inet_protosw tcpv6_protosw = {
1612 .type = SOCK_STREAM,
1613 .protocol = IPPROTO_TCP,
1614 .prot = &tcpv6_prot,
1615 .ops = &inet6_stream_ops,
1616 .capability = -1,
1617 .no_check = 0,
d83d8461
ACM
1618 .flags = INET_PROTOSW_PERMANENT |
1619 INET_PROTOSW_ICSK,
1da177e4
LT
1620};
1621
1622void __init tcpv6_init(void)
1623{
1624 /* register inet6 protocol */
1625 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1626 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1627 inet6_register_protosw(&tcpv6_protosw);
ae0f7d5f 1628
c4d93909
ACM
1629 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
1630 IPPROTO_TCP) < 0)
ae0f7d5f 1631 panic("Failed to create the TCPv6 control socket.\n");
1da177e4 1632}