]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/tcp_ipv6.c
[SCTP]: Cleanup of the sctp state table code.
[net-next-2.6.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <linux/module.h>
1da177e4
LT
29#include <linux/errno.h>
30#include <linux/types.h>
31#include <linux/socket.h>
32#include <linux/sockios.h>
33#include <linux/net.h>
34#include <linux/jiffies.h>
35#include <linux/in.h>
36#include <linux/in6.h>
37#include <linux/netdevice.h>
38#include <linux/init.h>
39#include <linux/jhash.h>
40#include <linux/ipsec.h>
41#include <linux/times.h>
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/addrconf.h>
60#include <net/snmp.h>
61#include <net/dsfield.h>
6d6ee43e 62#include <net/timewait_sock.h>
1da177e4
LT
63
64#include <asm/uaccess.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
ae0f7d5f
DW
69/* Socket used for sending RSTs and ACKs */
70static struct socket *tcp6_socket;
71
1da177e4 72static void tcp_v6_send_reset(struct sk_buff *skb);
60236fdd 73static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
8292a17a 74static void tcp_v6_send_check(struct sock *sk, int len,
1da177e4
LT
75 struct sk_buff *skb);
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 78
8292a17a
ACM
79static struct inet_connection_sock_af_ops ipv6_mapped;
80static struct inet_connection_sock_af_ops ipv6_specific;
1da177e4 81
1da177e4
LT
82static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
83{
971af18b
ACM
84 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
85 inet6_csk_bind_conflict);
1da177e4
LT
86}
87
1da177e4
LT
88static void tcp_v6_hash(struct sock *sk)
89{
90 if (sk->sk_state != TCP_CLOSE) {
8292a17a 91 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
92 tcp_prot.hash(sk);
93 return;
94 }
95 local_bh_disable();
90b19d31 96 __inet6_hash(&tcp_hashinfo, sk);
1da177e4
LT
97 local_bh_enable();
98 }
99}
100
1da177e4
LT
101static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
102 struct in6_addr *saddr,
103 struct in6_addr *daddr,
104 unsigned long base)
105{
106 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
107}
108
a94f723d 109static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 110{
a94f723d
GR
111 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
112 skb->nh.ipv6h->saddr.s6_addr32,
113 skb->h.th->dest,
114 skb->h.th->source);
1da177e4
LT
115}
116
1da177e4
LT
117static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
118 int addr_len)
119{
120 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
d83d8461
ACM
121 struct inet_sock *inet = inet_sk(sk);
122 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
123 struct ipv6_pinfo *np = inet6_sk(sk);
124 struct tcp_sock *tp = tcp_sk(sk);
125 struct in6_addr *saddr = NULL, *final_p = NULL, final;
126 struct flowi fl;
127 struct dst_entry *dst;
128 int addr_type;
129 int err;
130
131 if (addr_len < SIN6_LEN_RFC2133)
132 return -EINVAL;
133
134 if (usin->sin6_family != AF_INET6)
135 return(-EAFNOSUPPORT);
136
137 memset(&fl, 0, sizeof(fl));
138
139 if (np->sndflow) {
140 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
141 IP6_ECN_flow_init(fl.fl6_flowlabel);
142 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
143 struct ip6_flowlabel *flowlabel;
144 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
145 if (flowlabel == NULL)
146 return -EINVAL;
147 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
148 fl6_sock_release(flowlabel);
149 }
150 }
151
152 /*
153 * connect() to INADDR_ANY means loopback (BSD'ism).
154 */
155
156 if(ipv6_addr_any(&usin->sin6_addr))
157 usin->sin6_addr.s6_addr[15] = 0x1;
158
159 addr_type = ipv6_addr_type(&usin->sin6_addr);
160
161 if(addr_type & IPV6_ADDR_MULTICAST)
162 return -ENETUNREACH;
163
164 if (addr_type&IPV6_ADDR_LINKLOCAL) {
165 if (addr_len >= sizeof(struct sockaddr_in6) &&
166 usin->sin6_scope_id) {
167 /* If interface is set while binding, indices
168 * must coincide.
169 */
170 if (sk->sk_bound_dev_if &&
171 sk->sk_bound_dev_if != usin->sin6_scope_id)
172 return -EINVAL;
173
174 sk->sk_bound_dev_if = usin->sin6_scope_id;
175 }
176
177 /* Connect to link-local address requires an interface */
178 if (!sk->sk_bound_dev_if)
179 return -EINVAL;
180 }
181
182 if (tp->rx_opt.ts_recent_stamp &&
183 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
184 tp->rx_opt.ts_recent = 0;
185 tp->rx_opt.ts_recent_stamp = 0;
186 tp->write_seq = 0;
187 }
188
189 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
190 np->flow_label = fl.fl6_flowlabel;
191
192 /*
193 * TCP over IPv4
194 */
195
196 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 197 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
198 struct sockaddr_in sin;
199
200 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
201
202 if (__ipv6_only_sock(sk))
203 return -ENETUNREACH;
204
205 sin.sin_family = AF_INET;
206 sin.sin_port = usin->sin6_port;
207 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
208
d83d8461 209 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4
LT
210 sk->sk_backlog_rcv = tcp_v4_do_rcv;
211
212 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213
214 if (err) {
d83d8461
ACM
215 icsk->icsk_ext_hdr_len = exthdrlen;
216 icsk->icsk_af_ops = &ipv6_specific;
1da177e4
LT
217 sk->sk_backlog_rcv = tcp_v6_do_rcv;
218 goto failure;
219 } else {
220 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
221 inet->saddr);
222 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
223 inet->rcv_saddr);
224 }
225
226 return err;
227 }
228
229 if (!ipv6_addr_any(&np->rcv_saddr))
230 saddr = &np->rcv_saddr;
231
232 fl.proto = IPPROTO_TCP;
233 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
234 ipv6_addr_copy(&fl.fl6_src,
235 (saddr ? saddr : &np->saddr));
236 fl.oif = sk->sk_bound_dev_if;
237 fl.fl_ip_dport = usin->sin6_port;
238 fl.fl_ip_sport = inet->sport;
239
240 if (np->opt && np->opt->srcrt) {
241 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
242 ipv6_addr_copy(&final, &fl.fl6_dst);
243 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
244 final_p = &final;
245 }
246
beb8d13b
VY
247 security_sk_classify_flow(sk, &fl);
248
1da177e4
LT
249 err = ip6_dst_lookup(sk, &dst, &fl);
250 if (err)
251 goto failure;
252 if (final_p)
253 ipv6_addr_copy(&fl.fl6_dst, final_p);
254
e104411b 255 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1da177e4 256 goto failure;
1da177e4
LT
257
258 if (saddr == NULL) {
259 saddr = &fl.fl6_src;
260 ipv6_addr_copy(&np->rcv_saddr, saddr);
261 }
262
263 /* set the source address */
264 ipv6_addr_copy(&np->saddr, saddr);
265 inet->rcv_saddr = LOOPBACK4_IPV6;
266
f83ef8c0 267 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 268 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 269
d83d8461 270 icsk->icsk_ext_hdr_len = 0;
1da177e4 271 if (np->opt)
d83d8461
ACM
272 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
273 np->opt->opt_nflen);
1da177e4
LT
274
275 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
276
277 inet->dport = usin->sin6_port;
278
279 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 280 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
281 if (err)
282 goto late_failure;
283
284 if (!tp->write_seq)
285 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
286 np->daddr.s6_addr32,
287 inet->sport,
288 inet->dport);
289
290 err = tcp_connect(sk);
291 if (err)
292 goto late_failure;
293
294 return 0;
295
296late_failure:
297 tcp_set_state(sk, TCP_CLOSE);
298 __sk_dst_reset(sk);
299failure:
300 inet->dport = 0;
301 sk->sk_route_caps = 0;
302 return err;
303}
304
305static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
04ce6909 306 int type, int code, int offset, __be32 info)
1da177e4
LT
307{
308 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 309 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
310 struct ipv6_pinfo *np;
311 struct sock *sk;
312 int err;
313 struct tcp_sock *tp;
314 __u32 seq;
315
505cbfc5
ACM
316 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
317 th->source, skb->dev->ifindex);
1da177e4
LT
318
319 if (sk == NULL) {
320 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
321 return;
322 }
323
324 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 325 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
326 return;
327 }
328
329 bh_lock_sock(sk);
330 if (sock_owned_by_user(sk))
331 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
332
333 if (sk->sk_state == TCP_CLOSE)
334 goto out;
335
336 tp = tcp_sk(sk);
337 seq = ntohl(th->seq);
338 if (sk->sk_state != TCP_LISTEN &&
339 !between(seq, tp->snd_una, tp->snd_nxt)) {
340 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
341 goto out;
342 }
343
344 np = inet6_sk(sk);
345
346 if (type == ICMPV6_PKT_TOOBIG) {
347 struct dst_entry *dst = NULL;
348
349 if (sock_owned_by_user(sk))
350 goto out;
351 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
352 goto out;
353
354 /* icmp should have updated the destination cache entry */
355 dst = __sk_dst_check(sk, np->dst_cookie);
356
357 if (dst == NULL) {
358 struct inet_sock *inet = inet_sk(sk);
359 struct flowi fl;
360
361 /* BUGGG_FUTURE: Again, it is not clear how
362 to handle rthdr case. Ignore this complexity
363 for now.
364 */
365 memset(&fl, 0, sizeof(fl));
366 fl.proto = IPPROTO_TCP;
367 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
368 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
369 fl.oif = sk->sk_bound_dev_if;
370 fl.fl_ip_dport = inet->dport;
371 fl.fl_ip_sport = inet->sport;
beb8d13b 372 security_skb_classify_flow(skb, &fl);
1da177e4
LT
373
374 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
375 sk->sk_err_soft = -err;
376 goto out;
377 }
378
379 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
380 sk->sk_err_soft = -err;
381 goto out;
382 }
383
384 } else
385 dst_hold(dst);
386
d83d8461 387 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
388 tcp_sync_mss(sk, dst_mtu(dst));
389 tcp_simple_retransmit(sk);
390 } /* else let the usual retransmit timer handle it */
391 dst_release(dst);
392 goto out;
393 }
394
395 icmpv6_err_convert(type, code, &err);
396
60236fdd 397 /* Might be for an request_sock */
1da177e4 398 switch (sk->sk_state) {
60236fdd 399 struct request_sock *req, **prev;
1da177e4
LT
400 case TCP_LISTEN:
401 if (sock_owned_by_user(sk))
402 goto out;
403
8129765a
ACM
404 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
405 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
406 if (!req)
407 goto out;
408
409 /* ICMPs are not backlogged, hence we cannot get
410 * an established socket here.
411 */
412 BUG_TRAP(req->sk == NULL);
413
2e6599cb 414 if (seq != tcp_rsk(req)->snt_isn) {
1da177e4
LT
415 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
416 goto out;
417 }
418
463c84b9 419 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
420 goto out;
421
422 case TCP_SYN_SENT:
423 case TCP_SYN_RECV: /* Cannot happen.
424 It can, it SYNs are crossed. --ANK */
425 if (!sock_owned_by_user(sk)) {
1da177e4
LT
426 sk->sk_err = err;
427 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
428
429 tcp_done(sk);
430 } else
431 sk->sk_err_soft = err;
432 goto out;
433 }
434
435 if (!sock_owned_by_user(sk) && np->recverr) {
436 sk->sk_err = err;
437 sk->sk_error_report(sk);
438 } else
439 sk->sk_err_soft = err;
440
441out:
442 bh_unlock_sock(sk);
443 sock_put(sk);
444}
445
446
60236fdd 447static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
1da177e4
LT
448 struct dst_entry *dst)
449{
ca304b61 450 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
451 struct ipv6_pinfo *np = inet6_sk(sk);
452 struct sk_buff * skb;
453 struct ipv6_txoptions *opt = NULL;
454 struct in6_addr * final_p = NULL, final;
455 struct flowi fl;
456 int err = -1;
457
458 memset(&fl, 0, sizeof(fl));
459 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
460 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
461 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 462 fl.fl6_flowlabel = 0;
2e6599cb
ACM
463 fl.oif = treq->iif;
464 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4 465 fl.fl_ip_sport = inet_sk(sk)->sport;
4237c75c 466 security_req_classify_flow(req, &fl);
1da177e4
LT
467
468 if (dst == NULL) {
469 opt = np->opt;
470 if (opt == NULL &&
333fad53 471 np->rxopt.bits.osrcrt == 2 &&
2e6599cb
ACM
472 treq->pktopts) {
473 struct sk_buff *pktopts = treq->pktopts;
1da177e4
LT
474 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
475 if (rxopt->srcrt)
476 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
477 }
478
479 if (opt && opt->srcrt) {
480 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
481 ipv6_addr_copy(&final, &fl.fl6_dst);
482 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
483 final_p = &final;
484 }
485
486 err = ip6_dst_lookup(sk, &dst, &fl);
487 if (err)
488 goto done;
489 if (final_p)
490 ipv6_addr_copy(&fl.fl6_dst, final_p);
491 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
492 goto done;
493 }
494
495 skb = tcp_make_synack(sk, dst, req);
496 if (skb) {
497 struct tcphdr *th = skb->h.th;
498
499 th->check = tcp_v6_check(th, skb->len,
2e6599cb 500 &treq->loc_addr, &treq->rmt_addr,
1da177e4
LT
501 csum_partial((char *)th, skb->len, skb->csum));
502
2e6599cb 503 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
504 err = ip6_xmit(sk, skb, &fl, opt, 0);
505 if (err == NET_XMIT_CN)
506 err = 0;
507 }
508
509done:
1da177e4
LT
510 if (opt && opt != np->opt)
511 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 512 dst_release(dst);
1da177e4
LT
513 return err;
514}
515
60236fdd 516static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 517{
ca304b61
ACM
518 if (inet6_rsk(req)->pktopts)
519 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
520}
521
9ec75fe8 522static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 523 .family = AF_INET6,
2e6599cb 524 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 525 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
526 .send_ack = tcp_v6_reqsk_send_ack,
527 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
528 .send_reset = tcp_v6_send_reset
529};
530
6d6ee43e
ACM
531static struct timewait_sock_ops tcp6_timewait_sock_ops = {
532 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
533 .twsk_unique = tcp_twsk_unique,
534};
535
8292a17a 536static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
537{
538 struct ipv6_pinfo *np = inet6_sk(sk);
8292a17a 539 struct tcphdr *th = skb->h.th;
1da177e4 540
84fa7933 541 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
542 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
543 skb->csum = offsetof(struct tcphdr, check);
544 } else {
545 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
546 csum_partial((char *)th, th->doff<<2,
547 skb->csum));
548 }
549}
550
a430a43d
HX
551static int tcp_v6_gso_send_check(struct sk_buff *skb)
552{
553 struct ipv6hdr *ipv6h;
554 struct tcphdr *th;
555
556 if (!pskb_may_pull(skb, sizeof(*th)))
557 return -EINVAL;
558
559 ipv6h = skb->nh.ipv6h;
560 th = skb->h.th;
561
562 th->check = 0;
563 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
564 IPPROTO_TCP, 0);
565 skb->csum = offsetof(struct tcphdr, check);
84fa7933 566 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
567 return 0;
568}
1da177e4
LT
569
570static void tcp_v6_send_reset(struct sk_buff *skb)
571{
572 struct tcphdr *th = skb->h.th, *t1;
573 struct sk_buff *buff;
574 struct flowi fl;
575
576 if (th->rst)
577 return;
578
579 if (!ipv6_unicast_destination(skb))
580 return;
581
582 /*
583 * We need to grab some memory, and put together an RST,
584 * and then put it into the queue to be sent.
585 */
586
587 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
588 GFP_ATOMIC);
589 if (buff == NULL)
590 return;
591
592 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
593
594 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
595
596 /* Swap the send and the receive. */
597 memset(t1, 0, sizeof(*t1));
598 t1->dest = th->source;
599 t1->source = th->dest;
600 t1->doff = sizeof(*t1)/4;
601 t1->rst = 1;
602
603 if(th->ack) {
604 t1->seq = th->ack_seq;
605 } else {
606 t1->ack = 1;
607 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
608 + skb->len - (th->doff<<2));
609 }
610
611 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
612
613 memset(&fl, 0, sizeof(fl));
614 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
615 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
616
617 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
618 sizeof(*t1), IPPROTO_TCP,
619 buff->csum);
620
621 fl.proto = IPPROTO_TCP;
505cbfc5 622 fl.oif = inet6_iif(skb);
1da177e4
LT
623 fl.fl_ip_dport = t1->dest;
624 fl.fl_ip_sport = t1->source;
beb8d13b 625 security_skb_classify_flow(skb, &fl);
1da177e4
LT
626
627 /* sk = NULL, but it is safe for now. RST socket required. */
628 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
629
ecc51b6d 630 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
ae0f7d5f 631 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
ecc51b6d
ACM
632 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
633 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1da177e4 634 return;
ecc51b6d 635 }
1da177e4
LT
636 }
637
638 kfree_skb(buff);
639}
640
641static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
642{
643 struct tcphdr *th = skb->h.th, *t1;
644 struct sk_buff *buff;
645 struct flowi fl;
646 int tot_len = sizeof(struct tcphdr);
647
648 if (ts)
4244f8a9 649 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
650
651 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
652 GFP_ATOMIC);
653 if (buff == NULL)
654 return;
655
656 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
657
658 t1 = (struct tcphdr *) skb_push(buff,tot_len);
659
660 /* Swap the send and the receive. */
661 memset(t1, 0, sizeof(*t1));
662 t1->dest = th->source;
663 t1->source = th->dest;
664 t1->doff = tot_len/4;
665 t1->seq = htonl(seq);
666 t1->ack_seq = htonl(ack);
667 t1->ack = 1;
668 t1->window = htons(win);
669
670 if (ts) {
671 u32 *ptr = (u32*)(t1 + 1);
672 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
673 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
674 *ptr++ = htonl(tcp_time_stamp);
675 *ptr = htonl(ts);
676 }
677
678 buff->csum = csum_partial((char *)t1, tot_len, 0);
679
680 memset(&fl, 0, sizeof(fl));
681 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
682 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
683
684 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
685 tot_len, IPPROTO_TCP,
686 buff->csum);
687
688 fl.proto = IPPROTO_TCP;
505cbfc5 689 fl.oif = inet6_iif(skb);
1da177e4
LT
690 fl.fl_ip_dport = t1->dest;
691 fl.fl_ip_sport = t1->source;
beb8d13b 692 security_skb_classify_flow(skb, &fl);
1da177e4
LT
693
694 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ecc51b6d 695 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
ae0f7d5f 696 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
ecc51b6d 697 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1da177e4 698 return;
ecc51b6d 699 }
1da177e4
LT
700 }
701
702 kfree_skb(buff);
703}
704
705static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
706{
8feaf0c0
ACM
707 struct inet_timewait_sock *tw = inet_twsk(sk);
708 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 709
8feaf0c0
ACM
710 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
711 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
712 tcptw->tw_ts_recent);
1da177e4 713
8feaf0c0 714 inet_twsk_put(tw);
1da177e4
LT
715}
716
60236fdd 717static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1da177e4 718{
2e6599cb 719 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1da177e4
LT
720}
721
722
723static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
724{
60236fdd 725 struct request_sock *req, **prev;
505cbfc5 726 const struct tcphdr *th = skb->h.th;
1da177e4
LT
727 struct sock *nsk;
728
729 /* Find possible connection requests. */
8129765a
ACM
730 req = inet6_csk_search_req(sk, &prev, th->source,
731 &skb->nh.ipv6h->saddr,
732 &skb->nh.ipv6h->daddr, inet6_iif(skb));
1da177e4
LT
733 if (req)
734 return tcp_check_req(sk, skb, req, prev);
735
505cbfc5
ACM
736 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
737 th->source, &skb->nh.ipv6h->daddr,
738 ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
739
740 if (nsk) {
741 if (nsk->sk_state != TCP_TIME_WAIT) {
742 bh_lock_sock(nsk);
743 return nsk;
744 }
9469c7b4 745 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
746 return NULL;
747 }
748
749#if 0 /*def CONFIG_SYN_COOKIES*/
750 if (!th->rst && !th->syn && th->ack)
751 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
752#endif
753 return sk;
754}
755
1da177e4
LT
756/* FIXME: this is substantially similar to the ipv4 code.
757 * Can some kind of merge be done? -- erics
758 */
759static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
760{
ca304b61 761 struct inet6_request_sock *treq;
1da177e4
LT
762 struct ipv6_pinfo *np = inet6_sk(sk);
763 struct tcp_options_received tmp_opt;
764 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 765 struct request_sock *req = NULL;
1da177e4
LT
766 __u32 isn = TCP_SKB_CB(skb)->when;
767
768 if (skb->protocol == htons(ETH_P_IP))
769 return tcp_v4_conn_request(sk, skb);
770
771 if (!ipv6_unicast_destination(skb))
772 goto drop;
773
774 /*
775 * There are no SYN attacks on IPv6, yet...
776 */
463c84b9 777 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
778 if (net_ratelimit())
779 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
780 goto drop;
781 }
782
463c84b9 783 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
784 goto drop;
785
ca304b61 786 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
787 if (req == NULL)
788 goto drop;
789
790 tcp_clear_options(&tmp_opt);
791 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
792 tmp_opt.user_mss = tp->rx_opt.user_mss;
793
794 tcp_parse_options(skb, &tmp_opt, 0);
795
796 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
797 tcp_openreq_init(req, &tmp_opt, skb);
798
ca304b61 799 treq = inet6_rsk(req);
2e6599cb
ACM
800 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
801 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1da177e4 802 TCP_ECN_create_request(req, skb->h.th);
2e6599cb 803 treq->pktopts = NULL;
1da177e4 804 if (ipv6_opt_accepted(sk, skb) ||
333fad53
YH
805 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
806 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1da177e4 807 atomic_inc(&skb->users);
2e6599cb 808 treq->pktopts = skb;
1da177e4 809 }
2e6599cb 810 treq->iif = sk->sk_bound_dev_if;
1da177e4
LT
811
812 /* So that link locals have meaning */
813 if (!sk->sk_bound_dev_if &&
2e6599cb 814 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
505cbfc5 815 treq->iif = inet6_iif(skb);
1da177e4
LT
816
817 if (isn == 0)
a94f723d 818 isn = tcp_v6_init_sequence(skb);
1da177e4 819
2e6599cb 820 tcp_rsk(req)->snt_isn = isn;
1da177e4 821
4237c75c
VY
822 security_inet_conn_request(sk, skb, req);
823
1da177e4
LT
824 if (tcp_v6_send_synack(sk, req, NULL))
825 goto drop;
826
8129765a 827 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
828 return 0;
829
830drop:
831 if (req)
60236fdd 832 reqsk_free(req);
1da177e4 833
1da177e4
LT
834 return 0; /* don't send reset */
835}
836
837static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 838 struct request_sock *req,
1da177e4
LT
839 struct dst_entry *dst)
840{
ca304b61 841 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
842 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
843 struct tcp6_sock *newtcp6sk;
844 struct inet_sock *newinet;
845 struct tcp_sock *newtp;
846 struct sock *newsk;
847 struct ipv6_txoptions *opt;
848
849 if (skb->protocol == htons(ETH_P_IP)) {
850 /*
851 * v6 mapped
852 */
853
854 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
855
856 if (newsk == NULL)
857 return NULL;
858
859 newtcp6sk = (struct tcp6_sock *)newsk;
860 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
861
862 newinet = inet_sk(newsk);
863 newnp = inet6_sk(newsk);
864 newtp = tcp_sk(newsk);
865
866 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
867
868 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
869 newinet->daddr);
870
871 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
872 newinet->saddr);
873
874 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
875
8292a17a 876 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4
LT
877 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
878 newnp->pktoptions = NULL;
879 newnp->opt = NULL;
505cbfc5 880 newnp->mcast_oif = inet6_iif(skb);
1da177e4
LT
881 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
882
e6848976
ACM
883 /*
884 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
885 * here, tcp_create_openreq_child now does this for us, see the comment in
886 * that function for the gory details. -acme
1da177e4 887 */
1da177e4
LT
888
889 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 890 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
891 Sync it now.
892 */
d83d8461 893 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
894
895 return newsk;
896 }
897
898 opt = np->opt;
899
900 if (sk_acceptq_is_full(sk))
901 goto out_overflow;
902
333fad53 903 if (np->rxopt.bits.osrcrt == 2 &&
2e6599cb
ACM
904 opt == NULL && treq->pktopts) {
905 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1da177e4 906 if (rxopt->srcrt)
2e6599cb 907 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1da177e4
LT
908 }
909
910 if (dst == NULL) {
911 struct in6_addr *final_p = NULL, final;
912 struct flowi fl;
913
914 memset(&fl, 0, sizeof(fl));
915 fl.proto = IPPROTO_TCP;
2e6599cb 916 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
917 if (opt && opt->srcrt) {
918 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
919 ipv6_addr_copy(&final, &fl.fl6_dst);
920 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
921 final_p = &final;
922 }
2e6599cb 923 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 924 fl.oif = sk->sk_bound_dev_if;
2e6599cb 925 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4 926 fl.fl_ip_sport = inet_sk(sk)->sport;
4237c75c 927 security_req_classify_flow(req, &fl);
1da177e4
LT
928
929 if (ip6_dst_lookup(sk, &dst, &fl))
930 goto out;
931
932 if (final_p)
933 ipv6_addr_copy(&fl.fl6_dst, final_p);
934
935 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
936 goto out;
937 }
938
939 newsk = tcp_create_openreq_child(sk, req, skb);
940 if (newsk == NULL)
941 goto out;
942
e6848976
ACM
943 /*
944 * No need to charge this sock to the relevant IPv6 refcnt debug socks
945 * count here, tcp_create_openreq_child now does this for us, see the
946 * comment in that function for the gory details. -acme
947 */
1da177e4 948
59eed279 949 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 950 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
951
952 newtcp6sk = (struct tcp6_sock *)newsk;
953 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
954
955 newtp = tcp_sk(newsk);
956 newinet = inet_sk(newsk);
957 newnp = inet6_sk(newsk);
958
959 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
960
2e6599cb
ACM
961 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
962 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
963 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
964 newsk->sk_bound_dev_if = treq->iif;
1da177e4
LT
965
966 /* Now IPv6 options...
967
968 First: no IPv4 options.
969 */
970 newinet->opt = NULL;
971
972 /* Clone RX bits */
973 newnp->rxopt.all = np->rxopt.all;
974
975 /* Clone pktoptions received with SYN */
976 newnp->pktoptions = NULL;
2e6599cb
ACM
977 if (treq->pktopts != NULL) {
978 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
979 kfree_skb(treq->pktopts);
980 treq->pktopts = NULL;
1da177e4
LT
981 if (newnp->pktoptions)
982 skb_set_owner_r(newnp->pktoptions, newsk);
983 }
984 newnp->opt = NULL;
505cbfc5 985 newnp->mcast_oif = inet6_iif(skb);
1da177e4
LT
986 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
987
988 /* Clone native IPv6 options from listening socket (if any)
989
990 Yes, keeping reference count would be much more clever,
991 but we make one more one thing there: reattach optmem
992 to newsk.
993 */
994 if (opt) {
995 newnp->opt = ipv6_dup_options(newsk, opt);
996 if (opt != np->opt)
997 sock_kfree_s(sk, opt, opt->tot_len);
998 }
999
d83d8461 1000 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1001 if (newnp->opt)
d83d8461
ACM
1002 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1003 newnp->opt->opt_flen);
1da177e4 1004
5d424d5a 1005 tcp_mtup_init(newsk);
1da177e4
LT
1006 tcp_sync_mss(newsk, dst_mtu(dst));
1007 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1008 tcp_initialize_rcv_mss(newsk);
1009
1010 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1011
90b19d31 1012 __inet6_hash(&tcp_hashinfo, newsk);
2d8c4ce5 1013 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1da177e4
LT
1014
1015 return newsk;
1016
1017out_overflow:
1018 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1019out:
1020 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1021 if (opt && opt != np->opt)
1022 sock_kfree_s(sk, opt, opt->tot_len);
1023 dst_release(dst);
1024 return NULL;
1025}
1026
1027static int tcp_v6_checksum_init(struct sk_buff *skb)
1028{
84fa7933 1029 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1da177e4 1030 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
fb286bb2
HX
1031 &skb->nh.ipv6h->daddr,skb->csum)) {
1032 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1033 return 0;
fb286bb2 1034 }
1da177e4 1035 }
fb286bb2
HX
1036
1037 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1038 &skb->nh.ipv6h->daddr, 0);
1039
1da177e4 1040 if (skb->len <= 76) {
fb286bb2 1041 return __skb_checksum_complete(skb);
1da177e4
LT
1042 }
1043 return 0;
1044}
1045
1046/* The socket must have it's spinlock held when we get
1047 * here.
1048 *
1049 * We have a potential double-lock case here, so even when
1050 * doing backlog processing we use the BH locking scheme.
1051 * This is because we cannot sleep with the original spinlock
1052 * held.
1053 */
1054static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1055{
1056 struct ipv6_pinfo *np = inet6_sk(sk);
1057 struct tcp_sock *tp;
1058 struct sk_buff *opt_skb = NULL;
1059
1060 /* Imagine: socket is IPv6. IPv4 packet arrives,
1061 goes to IPv4 receive handler and backlogged.
1062 From backlog it always goes here. Kerboom...
1063 Fortunately, tcp_rcv_established and rcv_established
1064 handle them correctly, but it is not case with
1065 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1066 */
1067
1068 if (skb->protocol == htons(ETH_P_IP))
1069 return tcp_v4_do_rcv(sk, skb);
1070
fda9ef5d 1071 if (sk_filter(sk, skb))
1da177e4
LT
1072 goto discard;
1073
1074 /*
1075 * socket locking is here for SMP purposes as backlog rcv
1076 * is currently called with bh processing disabled.
1077 */
1078
1079 /* Do Stevens' IPV6_PKTOPTIONS.
1080
1081 Yes, guys, it is the only place in our code, where we
1082 may make it not affecting IPv4.
1083 The rest of code is protocol independent,
1084 and I do not like idea to uglify IPv4.
1085
1086 Actually, all the idea behind IPV6_PKTOPTIONS
1087 looks not very well thought. For now we latch
1088 options, received in the last packet, enqueued
1089 by tcp. Feel free to propose better solution.
1090 --ANK (980728)
1091 */
1092 if (np->rxopt.all)
1093 opt_skb = skb_clone(skb, GFP_ATOMIC);
1094
1095 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1096 TCP_CHECK_TIMER(sk);
1097 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1098 goto reset;
1099 TCP_CHECK_TIMER(sk);
1100 if (opt_skb)
1101 goto ipv6_pktoptions;
1102 return 0;
1103 }
1104
1105 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1106 goto csum_err;
1107
1108 if (sk->sk_state == TCP_LISTEN) {
1109 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1110 if (!nsk)
1111 goto discard;
1112
1113 /*
1114 * Queue it on the new socket if the new socket is active,
1115 * otherwise we just shortcircuit this and continue with
1116 * the new socket..
1117 */
1118 if(nsk != sk) {
1119 if (tcp_child_process(sk, nsk, skb))
1120 goto reset;
1121 if (opt_skb)
1122 __kfree_skb(opt_skb);
1123 return 0;
1124 }
1125 }
1126
1127 TCP_CHECK_TIMER(sk);
1128 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1129 goto reset;
1130 TCP_CHECK_TIMER(sk);
1131 if (opt_skb)
1132 goto ipv6_pktoptions;
1133 return 0;
1134
1135reset:
1136 tcp_v6_send_reset(skb);
1137discard:
1138 if (opt_skb)
1139 __kfree_skb(opt_skb);
1140 kfree_skb(skb);
1141 return 0;
1142csum_err:
1143 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1144 goto discard;
1145
1146
1147ipv6_pktoptions:
1148 /* Do you ask, what is it?
1149
1150 1. skb was enqueued by tcp.
1151 2. skb is added to tail of read queue, rather than out of order.
1152 3. socket is not in passive state.
1153 4. Finally, it really contains options, which user wants to receive.
1154 */
1155 tp = tcp_sk(sk);
1156 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1157 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1158 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1159 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1160 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1da177e4
LT
1161 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1162 if (ipv6_opt_accepted(sk, opt_skb)) {
1163 skb_set_owner_r(opt_skb, sk);
1164 opt_skb = xchg(&np->pktoptions, opt_skb);
1165 } else {
1166 __kfree_skb(opt_skb);
1167 opt_skb = xchg(&np->pktoptions, NULL);
1168 }
1169 }
1170
1171 if (opt_skb)
1172 kfree_skb(opt_skb);
1173 return 0;
1174}
1175
951dbc8a 1176static int tcp_v6_rcv(struct sk_buff **pskb)
1da177e4
LT
1177{
1178 struct sk_buff *skb = *pskb;
1179 struct tcphdr *th;
1180 struct sock *sk;
1181 int ret;
1182
1183 if (skb->pkt_type != PACKET_HOST)
1184 goto discard_it;
1185
1186 /*
1187 * Count it even if it's bad.
1188 */
1189 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1190
1191 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1192 goto discard_it;
1193
1194 th = skb->h.th;
1195
1196 if (th->doff < sizeof(struct tcphdr)/4)
1197 goto bad_packet;
1198 if (!pskb_may_pull(skb, th->doff*4))
1199 goto discard_it;
1200
1201 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
fb286bb2 1202 tcp_v6_checksum_init(skb)))
1da177e4
LT
1203 goto bad_packet;
1204
1205 th = skb->h.th;
1206 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1207 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1208 skb->len - th->doff*4);
1209 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1210 TCP_SKB_CB(skb)->when = 0;
1211 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1212 TCP_SKB_CB(skb)->sacked = 0;
1213
505cbfc5
ACM
1214 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1215 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1216 inet6_iif(skb));
1da177e4
LT
1217
1218 if (!sk)
1219 goto no_tcp_socket;
1220
1221process:
1222 if (sk->sk_state == TCP_TIME_WAIT)
1223 goto do_time_wait;
1224
1225 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1226 goto discard_and_relse;
1227
fda9ef5d 1228 if (sk_filter(sk, skb))
1da177e4
LT
1229 goto discard_and_relse;
1230
1231 skb->dev = NULL;
1232
293b9c42 1233 bh_lock_sock_nested(sk);
1da177e4
LT
1234 ret = 0;
1235 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1236#ifdef CONFIG_NET_DMA
1237 struct tcp_sock *tp = tcp_sk(sk);
1238 if (tp->ucopy.dma_chan)
1239 ret = tcp_v6_do_rcv(sk, skb);
1240 else
1241#endif
1242 {
1243 if (!tcp_prequeue(sk, skb))
1244 ret = tcp_v6_do_rcv(sk, skb);
1245 }
1da177e4
LT
1246 } else
1247 sk_add_backlog(sk, skb);
1248 bh_unlock_sock(sk);
1249
1250 sock_put(sk);
1251 return ret ? -1 : 0;
1252
1253no_tcp_socket:
1254 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1255 goto discard_it;
1256
1257 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1258bad_packet:
1259 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1260 } else {
1261 tcp_v6_send_reset(skb);
1262 }
1263
1264discard_it:
1265
1266 /*
1267 * Discard frame
1268 */
1269
1270 kfree_skb(skb);
1271 return 0;
1272
1273discard_and_relse:
1274 sock_put(sk);
1275 goto discard_it;
1276
1277do_time_wait:
1278 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1279 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1280 goto discard_it;
1281 }
1282
1283 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1284 TCP_INC_STATS_BH(TCP_MIB_INERRS);
9469c7b4 1285 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1286 goto discard_it;
1287 }
1288
9469c7b4 1289 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1290 case TCP_TW_SYN:
1291 {
1292 struct sock *sk2;
1293
505cbfc5
ACM
1294 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1295 &skb->nh.ipv6h->daddr,
1296 ntohs(th->dest), inet6_iif(skb));
1da177e4 1297 if (sk2 != NULL) {
295ff7ed
ACM
1298 struct inet_timewait_sock *tw = inet_twsk(sk);
1299 inet_twsk_deschedule(tw, &tcp_death_row);
1300 inet_twsk_put(tw);
1da177e4
LT
1301 sk = sk2;
1302 goto process;
1303 }
1304 /* Fall through to ACK */
1305 }
1306 case TCP_TW_ACK:
1307 tcp_v6_timewait_ack(sk, skb);
1308 break;
1309 case TCP_TW_RST:
1310 goto no_tcp_socket;
1311 case TCP_TW_SUCCESS:;
1312 }
1313 goto discard_it;
1314}
1315
1da177e4
LT
1316static int tcp_v6_remember_stamp(struct sock *sk)
1317{
1318 /* Alas, not yet... */
1319 return 0;
1320}
1321
8292a17a 1322static struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1323 .queue_xmit = inet6_csk_xmit,
1324 .send_check = tcp_v6_send_check,
1325 .rebuild_header = inet6_sk_rebuild_header,
1326 .conn_request = tcp_v6_conn_request,
1327 .syn_recv_sock = tcp_v6_syn_recv_sock,
1328 .remember_stamp = tcp_v6_remember_stamp,
1329 .net_header_len = sizeof(struct ipv6hdr),
1330 .setsockopt = ipv6_setsockopt,
1331 .getsockopt = ipv6_getsockopt,
1332 .addr2sockaddr = inet6_csk_addr2sockaddr,
1333 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1334#ifdef CONFIG_COMPAT
543d9cfe
ACM
1335 .compat_setsockopt = compat_ipv6_setsockopt,
1336 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1337#endif
1da177e4
LT
1338};
1339
1340/*
1341 * TCP over IPv4 via INET6 API
1342 */
1343
8292a17a 1344static struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1345 .queue_xmit = ip_queue_xmit,
1346 .send_check = tcp_v4_send_check,
1347 .rebuild_header = inet_sk_rebuild_header,
1348 .conn_request = tcp_v6_conn_request,
1349 .syn_recv_sock = tcp_v6_syn_recv_sock,
1350 .remember_stamp = tcp_v4_remember_stamp,
1351 .net_header_len = sizeof(struct iphdr),
1352 .setsockopt = ipv6_setsockopt,
1353 .getsockopt = ipv6_getsockopt,
1354 .addr2sockaddr = inet6_csk_addr2sockaddr,
1355 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1356#ifdef CONFIG_COMPAT
543d9cfe
ACM
1357 .compat_setsockopt = compat_ipv6_setsockopt,
1358 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1359#endif
1da177e4
LT
1360};
1361
1da177e4
LT
1362/* NOTE: A lot of things set to zero explicitly by call to
1363 * sk_alloc() so need not be done here.
1364 */
1365static int tcp_v6_init_sock(struct sock *sk)
1366{
6687e988 1367 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1368 struct tcp_sock *tp = tcp_sk(sk);
1369
1370 skb_queue_head_init(&tp->out_of_order_queue);
1371 tcp_init_xmit_timers(sk);
1372 tcp_prequeue_init(tp);
1373
6687e988 1374 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1375 tp->mdev = TCP_TIMEOUT_INIT;
1376
1377 /* So many TCP implementations out there (incorrectly) count the
1378 * initial SYN frame in their delayed-ACK and congestion control
1379 * algorithms that we must have the following bandaid to talk
1380 * efficiently to them. -DaveM
1381 */
1382 tp->snd_cwnd = 2;
1383
1384 /* See draft-stevens-tcpca-spec-01 for discussion of the
1385 * initialization of these values.
1386 */
1387 tp->snd_ssthresh = 0x7fffffff;
1388 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1389 tp->mss_cache = 536;
1da177e4
LT
1390
1391 tp->reordering = sysctl_tcp_reordering;
1392
1393 sk->sk_state = TCP_CLOSE;
1394
8292a17a 1395 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1396 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1397 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1398 sk->sk_write_space = sk_stream_write_space;
1399 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1400
1401 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1402 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1403
1404 atomic_inc(&tcp_sockets_allocated);
1405
1406 return 0;
1407}
1408
1409static int tcp_v6_destroy_sock(struct sock *sk)
1410{
1da177e4
LT
1411 tcp_v4_destroy_sock(sk);
1412 return inet6_destroy_sock(sk);
1413}
1414
1415/* Proc filesystem TCPv6 sock list dumping. */
1416static void get_openreq6(struct seq_file *seq,
60236fdd 1417 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1418{
1da177e4 1419 int ttd = req->expires - jiffies;
ca304b61
ACM
1420 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1421 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1422
1423 if (ttd < 0)
1424 ttd = 0;
1425
1da177e4
LT
1426 seq_printf(seq,
1427 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1428 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1429 i,
1430 src->s6_addr32[0], src->s6_addr32[1],
1431 src->s6_addr32[2], src->s6_addr32[3],
1432 ntohs(inet_sk(sk)->sport),
1433 dest->s6_addr32[0], dest->s6_addr32[1],
1434 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1435 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1436 TCP_SYN_RECV,
1437 0,0, /* could print option size, but that is af dependent. */
1438 1, /* timers active (only the expire timer) */
1439 jiffies_to_clock_t(ttd),
1440 req->retrans,
1441 uid,
1442 0, /* non standard timer */
1443 0, /* open_requests have no inode */
1444 0, req);
1445}
1446
1447static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1448{
1449 struct in6_addr *dest, *src;
1450 __u16 destp, srcp;
1451 int timer_active;
1452 unsigned long timer_expires;
1453 struct inet_sock *inet = inet_sk(sp);
1454 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1455 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1456 struct ipv6_pinfo *np = inet6_sk(sp);
1457
1458 dest = &np->daddr;
1459 src = &np->rcv_saddr;
1460 destp = ntohs(inet->dport);
1461 srcp = ntohs(inet->sport);
463c84b9
ACM
1462
1463 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1464 timer_active = 1;
463c84b9
ACM
1465 timer_expires = icsk->icsk_timeout;
1466 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1467 timer_active = 4;
463c84b9 1468 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1469 } else if (timer_pending(&sp->sk_timer)) {
1470 timer_active = 2;
1471 timer_expires = sp->sk_timer.expires;
1472 } else {
1473 timer_active = 0;
1474 timer_expires = jiffies;
1475 }
1476
1477 seq_printf(seq,
1478 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1479 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1480 i,
1481 src->s6_addr32[0], src->s6_addr32[1],
1482 src->s6_addr32[2], src->s6_addr32[3], srcp,
1483 dest->s6_addr32[0], dest->s6_addr32[1],
1484 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1485 sp->sk_state,
47da8ee6
SS
1486 tp->write_seq-tp->snd_una,
1487 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1488 timer_active,
1489 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1490 icsk->icsk_retransmits,
1da177e4 1491 sock_i_uid(sp),
6687e988 1492 icsk->icsk_probes_out,
1da177e4
LT
1493 sock_i_ino(sp),
1494 atomic_read(&sp->sk_refcnt), sp,
463c84b9
ACM
1495 icsk->icsk_rto,
1496 icsk->icsk_ack.ato,
1497 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1498 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1499 );
1500}
1501
1502static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1503 struct inet_timewait_sock *tw, int i)
1da177e4
LT
1504{
1505 struct in6_addr *dest, *src;
1506 __u16 destp, srcp;
0fa1a53e 1507 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1508 int ttd = tw->tw_ttd - jiffies;
1509
1510 if (ttd < 0)
1511 ttd = 0;
1512
0fa1a53e
ACM
1513 dest = &tw6->tw_v6_daddr;
1514 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1515 destp = ntohs(tw->tw_dport);
1516 srcp = ntohs(tw->tw_sport);
1517
1518 seq_printf(seq,
1519 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1520 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1521 i,
1522 src->s6_addr32[0], src->s6_addr32[1],
1523 src->s6_addr32[2], src->s6_addr32[3], srcp,
1524 dest->s6_addr32[0], dest->s6_addr32[1],
1525 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1526 tw->tw_substate, 0, 0,
1527 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1528 atomic_read(&tw->tw_refcnt), tw);
1529}
1530
1531#ifdef CONFIG_PROC_FS
1532static int tcp6_seq_show(struct seq_file *seq, void *v)
1533{
1534 struct tcp_iter_state *st;
1535
1536 if (v == SEQ_START_TOKEN) {
1537 seq_puts(seq,
1538 " sl "
1539 "local_address "
1540 "remote_address "
1541 "st tx_queue rx_queue tr tm->when retrnsmt"
1542 " uid timeout inode\n");
1543 goto out;
1544 }
1545 st = seq->private;
1546
1547 switch (st->state) {
1548 case TCP_SEQ_STATE_LISTENING:
1549 case TCP_SEQ_STATE_ESTABLISHED:
1550 get_tcp6_sock(seq, v, st->num);
1551 break;
1552 case TCP_SEQ_STATE_OPENREQ:
1553 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1554 break;
1555 case TCP_SEQ_STATE_TIME_WAIT:
1556 get_timewait6_sock(seq, v, st->num);
1557 break;
1558 }
1559out:
1560 return 0;
1561}
1562
1563static struct file_operations tcp6_seq_fops;
1564static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1565 .owner = THIS_MODULE,
1566 .name = "tcp6",
1567 .family = AF_INET6,
1568 .seq_show = tcp6_seq_show,
1569 .seq_fops = &tcp6_seq_fops,
1570};
1571
1572int __init tcp6_proc_init(void)
1573{
1574 return tcp_proc_register(&tcp6_seq_afinfo);
1575}
1576
1577void tcp6_proc_exit(void)
1578{
1579 tcp_proc_unregister(&tcp6_seq_afinfo);
1580}
1581#endif
1582
1583struct proto tcpv6_prot = {
1584 .name = "TCPv6",
1585 .owner = THIS_MODULE,
1586 .close = tcp_close,
1587 .connect = tcp_v6_connect,
1588 .disconnect = tcp_disconnect,
463c84b9 1589 .accept = inet_csk_accept,
1da177e4
LT
1590 .ioctl = tcp_ioctl,
1591 .init = tcp_v6_init_sock,
1592 .destroy = tcp_v6_destroy_sock,
1593 .shutdown = tcp_shutdown,
1594 .setsockopt = tcp_setsockopt,
1595 .getsockopt = tcp_getsockopt,
1596 .sendmsg = tcp_sendmsg,
1597 .recvmsg = tcp_recvmsg,
1598 .backlog_rcv = tcp_v6_do_rcv,
1599 .hash = tcp_v6_hash,
1600 .unhash = tcp_unhash,
1601 .get_port = tcp_v6_get_port,
1602 .enter_memory_pressure = tcp_enter_memory_pressure,
1603 .sockets_allocated = &tcp_sockets_allocated,
1604 .memory_allocated = &tcp_memory_allocated,
1605 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1606 .orphan_count = &tcp_orphan_count,
1da177e4
LT
1607 .sysctl_mem = sysctl_tcp_mem,
1608 .sysctl_wmem = sysctl_tcp_wmem,
1609 .sysctl_rmem = sysctl_tcp_rmem,
1610 .max_header = MAX_TCP_HEADER,
1611 .obj_size = sizeof(struct tcp6_sock),
6d6ee43e 1612 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1613 .rsk_prot = &tcp6_request_sock_ops,
543d9cfe
ACM
1614#ifdef CONFIG_COMPAT
1615 .compat_setsockopt = compat_tcp_setsockopt,
1616 .compat_getsockopt = compat_tcp_getsockopt,
1617#endif
1da177e4
LT
1618};
1619
1620static struct inet6_protocol tcpv6_protocol = {
1621 .handler = tcp_v6_rcv,
1622 .err_handler = tcp_v6_err,
a430a43d 1623 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 1624 .gso_segment = tcp_tso_segment,
1da177e4
LT
1625 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1626};
1627
1da177e4
LT
1628static struct inet_protosw tcpv6_protosw = {
1629 .type = SOCK_STREAM,
1630 .protocol = IPPROTO_TCP,
1631 .prot = &tcpv6_prot,
1632 .ops = &inet6_stream_ops,
1633 .capability = -1,
1634 .no_check = 0,
d83d8461
ACM
1635 .flags = INET_PROTOSW_PERMANENT |
1636 INET_PROTOSW_ICSK,
1da177e4
LT
1637};
1638
1639void __init tcpv6_init(void)
1640{
1641 /* register inet6 protocol */
1642 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1643 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1644 inet6_register_protosw(&tcpv6_protosw);
ae0f7d5f 1645
c4d93909
ACM
1646 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
1647 IPPROTO_TCP) < 0)
ae0f7d5f 1648 panic("Failed to create the TCPv6 control socket.\n");
1da177e4 1649}