]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/tcp_ipv6.c
tcp: Handle CHECKSUM_PARTIAL for SYNACK packets for IPv4
[net-next-2.6.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
1da177e4
LT
64
65#include <asm/uaccess.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
cfb6eeb4
YH
70#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
cfb6eeb4 73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
74static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
1da177e4
LT
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 78
3b401a81
SH
79static const struct inet_connection_sock_af_ops ipv6_mapped;
80static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 81#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
82static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
84#else
85static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 struct in6_addr *addr)
87{
88 return NULL;
89}
a928630a 90#endif
1da177e4 91
1da177e4
LT
92static void tcp_v6_hash(struct sock *sk)
93{
94 if (sk->sk_state != TCP_CLOSE) {
8292a17a 95 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
96 tcp_prot.hash(sk);
97 return;
98 }
99 local_bh_disable();
9327f705 100 __inet6_hash(sk, NULL);
1da177e4
LT
101 local_bh_enable();
102 }
103}
104
684f2176 105static __inline__ __sum16 tcp_v6_check(int len,
1ab1457c
YH
106 struct in6_addr *saddr,
107 struct in6_addr *daddr,
868c86bc 108 __wsum base)
1da177e4
LT
109{
110 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
111}
112
a94f723d 113static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 114{
0660e03f
ACM
115 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
116 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
117 tcp_hdr(skb)->dest,
118 tcp_hdr(skb)->source);
1da177e4
LT
119}
120
1ab1457c 121static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
122 int addr_len)
123{
124 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 125 struct inet_sock *inet = inet_sk(sk);
d83d8461 126 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
127 struct ipv6_pinfo *np = inet6_sk(sk);
128 struct tcp_sock *tp = tcp_sk(sk);
129 struct in6_addr *saddr = NULL, *final_p = NULL, final;
130 struct flowi fl;
131 struct dst_entry *dst;
132 int addr_type;
133 int err;
134
1ab1457c 135 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
136 return -EINVAL;
137
1ab1457c 138 if (usin->sin6_family != AF_INET6)
1da177e4
LT
139 return(-EAFNOSUPPORT);
140
141 memset(&fl, 0, sizeof(fl));
142
143 if (np->sndflow) {
144 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
145 IP6_ECN_flow_init(fl.fl6_flowlabel);
146 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
147 struct ip6_flowlabel *flowlabel;
148 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
149 if (flowlabel == NULL)
150 return -EINVAL;
151 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
152 fl6_sock_release(flowlabel);
153 }
154 }
155
156 /*
1ab1457c
YH
157 * connect() to INADDR_ANY means loopback (BSD'ism).
158 */
159
160 if(ipv6_addr_any(&usin->sin6_addr))
161 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
162
163 addr_type = ipv6_addr_type(&usin->sin6_addr);
164
165 if(addr_type & IPV6_ADDR_MULTICAST)
166 return -ENETUNREACH;
167
168 if (addr_type&IPV6_ADDR_LINKLOCAL) {
169 if (addr_len >= sizeof(struct sockaddr_in6) &&
170 usin->sin6_scope_id) {
171 /* If interface is set while binding, indices
172 * must coincide.
173 */
174 if (sk->sk_bound_dev_if &&
175 sk->sk_bound_dev_if != usin->sin6_scope_id)
176 return -EINVAL;
177
178 sk->sk_bound_dev_if = usin->sin6_scope_id;
179 }
180
181 /* Connect to link-local address requires an interface */
182 if (!sk->sk_bound_dev_if)
183 return -EINVAL;
184 }
185
186 if (tp->rx_opt.ts_recent_stamp &&
187 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
188 tp->rx_opt.ts_recent = 0;
189 tp->rx_opt.ts_recent_stamp = 0;
190 tp->write_seq = 0;
191 }
192
193 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
194 np->flow_label = fl.fl6_flowlabel;
195
196 /*
197 * TCP over IPv4
198 */
199
200 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 201 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
202 struct sockaddr_in sin;
203
204 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
205
206 if (__ipv6_only_sock(sk))
207 return -ENETUNREACH;
208
209 sin.sin_family = AF_INET;
210 sin.sin_port = usin->sin6_port;
211 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
212
d83d8461 213 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 214 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
215#ifdef CONFIG_TCP_MD5SIG
216 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
217#endif
1da177e4
LT
218
219 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
220
221 if (err) {
d83d8461
ACM
222 icsk->icsk_ext_hdr_len = exthdrlen;
223 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 224 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
225#ifdef CONFIG_TCP_MD5SIG
226 tp->af_specific = &tcp_sock_ipv6_specific;
227#endif
1da177e4
LT
228 goto failure;
229 } else {
c720c7e8
ED
230 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
231 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
232 &np->rcv_saddr);
1da177e4
LT
233 }
234
235 return err;
236 }
237
238 if (!ipv6_addr_any(&np->rcv_saddr))
239 saddr = &np->rcv_saddr;
240
241 fl.proto = IPPROTO_TCP;
242 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
243 ipv6_addr_copy(&fl.fl6_src,
244 (saddr ? saddr : &np->saddr));
245 fl.oif = sk->sk_bound_dev_if;
51953d5b 246 fl.mark = sk->sk_mark;
1da177e4 247 fl.fl_ip_dport = usin->sin6_port;
c720c7e8 248 fl.fl_ip_sport = inet->inet_sport;
1da177e4
LT
249
250 if (np->opt && np->opt->srcrt) {
251 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
252 ipv6_addr_copy(&final, &fl.fl6_dst);
253 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
254 final_p = &final;
255 }
256
beb8d13b
VY
257 security_sk_classify_flow(sk, &fl);
258
1da177e4
LT
259 err = ip6_dst_lookup(sk, &dst, &fl);
260 if (err)
261 goto failure;
262 if (final_p)
263 ipv6_addr_copy(&fl.fl6_dst, final_p);
264
52479b62
AD
265 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
266 if (err < 0) {
14e50e57
DM
267 if (err == -EREMOTE)
268 err = ip6_dst_blackhole(sk, &dst, &fl);
269 if (err < 0)
270 goto failure;
271 }
1da177e4
LT
272
273 if (saddr == NULL) {
274 saddr = &fl.fl6_src;
275 ipv6_addr_copy(&np->rcv_saddr, saddr);
276 }
277
278 /* set the source address */
279 ipv6_addr_copy(&np->saddr, saddr);
c720c7e8 280 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 281
f83ef8c0 282 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 283 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 284
d83d8461 285 icsk->icsk_ext_hdr_len = 0;
1da177e4 286 if (np->opt)
d83d8461
ACM
287 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
288 np->opt->opt_nflen);
1da177e4
LT
289
290 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291
c720c7e8 292 inet->inet_dport = usin->sin6_port;
1da177e4
LT
293
294 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 295 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
296 if (err)
297 goto late_failure;
298
299 if (!tp->write_seq)
300 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
301 np->daddr.s6_addr32,
c720c7e8
ED
302 inet->inet_sport,
303 inet->inet_dport);
1da177e4
LT
304
305 err = tcp_connect(sk);
306 if (err)
307 goto late_failure;
308
309 return 0;
310
311late_failure:
312 tcp_set_state(sk, TCP_CLOSE);
313 __sk_dst_reset(sk);
314failure:
c720c7e8 315 inet->inet_dport = 0;
1da177e4
LT
316 sk->sk_route_caps = 0;
317 return err;
318}
319
320static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 321 u8 type, u8 code, int offset, __be32 info)
1da177e4
LT
322{
323 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 324 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
325 struct ipv6_pinfo *np;
326 struct sock *sk;
327 int err;
1ab1457c 328 struct tcp_sock *tp;
1da177e4 329 __u32 seq;
ca12a1a4 330 struct net *net = dev_net(skb->dev);
1da177e4 331
ca12a1a4 332 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 333 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
334
335 if (sk == NULL) {
e41b5368
DL
336 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
337 ICMP6_MIB_INERRORS);
1da177e4
LT
338 return;
339 }
340
341 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 342 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
343 return;
344 }
345
346 bh_lock_sock(sk);
347 if (sock_owned_by_user(sk))
de0744af 348 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
349
350 if (sk->sk_state == TCP_CLOSE)
351 goto out;
352
353 tp = tcp_sk(sk);
1ab1457c 354 seq = ntohl(th->seq);
1da177e4
LT
355 if (sk->sk_state != TCP_LISTEN &&
356 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 357 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
358 goto out;
359 }
360
361 np = inet6_sk(sk);
362
363 if (type == ICMPV6_PKT_TOOBIG) {
364 struct dst_entry *dst = NULL;
365
366 if (sock_owned_by_user(sk))
367 goto out;
368 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
369 goto out;
370
371 /* icmp should have updated the destination cache entry */
372 dst = __sk_dst_check(sk, np->dst_cookie);
373
374 if (dst == NULL) {
375 struct inet_sock *inet = inet_sk(sk);
376 struct flowi fl;
377
378 /* BUGGG_FUTURE: Again, it is not clear how
379 to handle rthdr case. Ignore this complexity
380 for now.
381 */
382 memset(&fl, 0, sizeof(fl));
383 fl.proto = IPPROTO_TCP;
384 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
385 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
386 fl.oif = sk->sk_bound_dev_if;
51953d5b 387 fl.mark = sk->sk_mark;
c720c7e8
ED
388 fl.fl_ip_dport = inet->inet_dport;
389 fl.fl_ip_sport = inet->inet_sport;
beb8d13b 390 security_skb_classify_flow(skb, &fl);
1da177e4
LT
391
392 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
393 sk->sk_err_soft = -err;
394 goto out;
395 }
396
52479b62 397 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
1da177e4
LT
398 sk->sk_err_soft = -err;
399 goto out;
400 }
401
402 } else
403 dst_hold(dst);
404
d83d8461 405 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
406 tcp_sync_mss(sk, dst_mtu(dst));
407 tcp_simple_retransmit(sk);
408 } /* else let the usual retransmit timer handle it */
409 dst_release(dst);
410 goto out;
411 }
412
413 icmpv6_err_convert(type, code, &err);
414
60236fdd 415 /* Might be for an request_sock */
1da177e4 416 switch (sk->sk_state) {
60236fdd 417 struct request_sock *req, **prev;
1da177e4
LT
418 case TCP_LISTEN:
419 if (sock_owned_by_user(sk))
420 goto out;
421
8129765a
ACM
422 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
423 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
424 if (!req)
425 goto out;
426
427 /* ICMPs are not backlogged, hence we cannot get
428 * an established socket here.
429 */
547b792c 430 WARN_ON(req->sk != NULL);
1da177e4 431
2e6599cb 432 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 433 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
434 goto out;
435 }
436
463c84b9 437 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
438 goto out;
439
440 case TCP_SYN_SENT:
441 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 442 It can, it SYNs are crossed. --ANK */
1da177e4 443 if (!sock_owned_by_user(sk)) {
1da177e4
LT
444 sk->sk_err = err;
445 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
446
447 tcp_done(sk);
448 } else
449 sk->sk_err_soft = err;
450 goto out;
451 }
452
453 if (!sock_owned_by_user(sk) && np->recverr) {
454 sk->sk_err = err;
455 sk->sk_error_report(sk);
456 } else
457 sk->sk_err_soft = err;
458
459out:
460 bh_unlock_sock(sk);
461 sock_put(sk);
462}
463
464
e6b4d113
WAS
465static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
466 struct request_values *rvp)
1da177e4 467{
ca304b61 468 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
469 struct ipv6_pinfo *np = inet6_sk(sk);
470 struct sk_buff * skb;
471 struct ipv6_txoptions *opt = NULL;
472 struct in6_addr * final_p = NULL, final;
473 struct flowi fl;
fd80eb94 474 struct dst_entry *dst;
1da177e4
LT
475 int err = -1;
476
477 memset(&fl, 0, sizeof(fl));
478 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
479 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
480 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 481 fl.fl6_flowlabel = 0;
2e6599cb 482 fl.oif = treq->iif;
51953d5b 483 fl.mark = sk->sk_mark;
2e6599cb 484 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 485 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 486 security_req_classify_flow(req, &fl);
1da177e4 487
fd80eb94
DL
488 opt = np->opt;
489 if (opt && opt->srcrt) {
490 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
491 ipv6_addr_copy(&final, &fl.fl6_dst);
492 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
493 final_p = &final;
1da177e4
LT
494 }
495
fd80eb94
DL
496 err = ip6_dst_lookup(sk, &dst, &fl);
497 if (err)
498 goto done;
499 if (final_p)
500 ipv6_addr_copy(&fl.fl6_dst, final_p);
52479b62 501 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
fd80eb94
DL
502 goto done;
503
e6b4d113 504 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4 505 if (skb) {
aa8223c7 506 struct tcphdr *th = tcp_hdr(skb);
1da177e4 507
684f2176 508 th->check = tcp_v6_check(skb->len,
2e6599cb 509 &treq->loc_addr, &treq->rmt_addr,
07f0757a 510 csum_partial(th, skb->len, skb->csum));
1da177e4 511
2e6599cb 512 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4 513 err = ip6_xmit(sk, skb, &fl, opt, 0);
b9df3cb8 514 err = net_xmit_eval(err);
1da177e4
LT
515 }
516
517done:
1ab1457c 518 if (opt && opt != np->opt)
1da177e4 519 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 520 dst_release(dst);
1da177e4
LT
521 return err;
522}
523
72659ecc
OP
524static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
525 struct request_values *rvp)
526{
527 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
528 return tcp_v6_send_synack(sk, req, rvp);
529}
530
c6aefafb
GG
531static inline void syn_flood_warning(struct sk_buff *skb)
532{
533#ifdef CONFIG_SYN_COOKIES
534 if (sysctl_tcp_syncookies)
535 printk(KERN_INFO
536 "TCPv6: Possible SYN flooding on port %d. "
537 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
538 else
539#endif
540 printk(KERN_INFO
541 "TCPv6: Possible SYN flooding on port %d. "
542 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
543}
544
60236fdd 545static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 546{
800d55f1 547 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
548}
549
cfb6eeb4
YH
550#ifdef CONFIG_TCP_MD5SIG
551static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
552 struct in6_addr *addr)
553{
554 struct tcp_sock *tp = tcp_sk(sk);
555 int i;
556
557 BUG_ON(tp == NULL);
558
559 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
560 return NULL;
561
562 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 563 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 564 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
565 }
566 return NULL;
567}
568
569static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
570 struct sock *addr_sk)
571{
572 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
573}
574
575static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
576 struct request_sock *req)
577{
578 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
579}
580
581static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
582 char *newkey, u8 newkeylen)
583{
584 /* Add key to the list */
b0a713e9 585 struct tcp_md5sig_key *key;
cfb6eeb4
YH
586 struct tcp_sock *tp = tcp_sk(sk);
587 struct tcp6_md5sig_key *keys;
588
b0a713e9 589 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
590 if (key) {
591 /* modify existing entry - just update that one */
b0a713e9
MD
592 kfree(key->key);
593 key->key = newkey;
594 key->keylen = newkeylen;
cfb6eeb4
YH
595 } else {
596 /* reallocate new list if current one is full. */
597 if (!tp->md5sig_info) {
598 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
599 if (!tp->md5sig_info) {
600 kfree(newkey);
601 return -ENOMEM;
602 }
3d7dbeac 603 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 604 }
aa133076 605 if (tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
606 kfree(newkey);
607 return -ENOMEM;
608 }
cfb6eeb4
YH
609 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
610 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
611 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
612
613 if (!keys) {
614 tcp_free_md5sig_pool();
615 kfree(newkey);
616 return -ENOMEM;
617 }
618
619 if (tp->md5sig_info->entries6)
620 memmove(keys, tp->md5sig_info->keys6,
621 (sizeof (tp->md5sig_info->keys6[0]) *
622 tp->md5sig_info->entries6));
623
624 kfree(tp->md5sig_info->keys6);
625 tp->md5sig_info->keys6 = keys;
626 tp->md5sig_info->alloced6++;
627 }
628
629 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
630 peer);
f8ab18d2
DM
631 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
632 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
633
634 tp->md5sig_info->entries6++;
635 }
636 return 0;
637}
638
639static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
640 u8 *newkey, __u8 newkeylen)
641{
642 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
643 newkey, newkeylen);
644}
645
646static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
647{
648 struct tcp_sock *tp = tcp_sk(sk);
649 int i;
650
651 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 652 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 653 /* Free the key */
f8ab18d2 654 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
655 tp->md5sig_info->entries6--;
656
657 if (tp->md5sig_info->entries6 == 0) {
658 kfree(tp->md5sig_info->keys6);
659 tp->md5sig_info->keys6 = NULL;
ca983cef 660 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
661 } else {
662 /* shrink the database */
663 if (tp->md5sig_info->entries6 != i)
664 memmove(&tp->md5sig_info->keys6[i],
665 &tp->md5sig_info->keys6[i+1],
666 (tp->md5sig_info->entries6 - i)
667 * sizeof (tp->md5sig_info->keys6[0]));
668 }
77adefdc
YH
669 tcp_free_md5sig_pool();
670 return 0;
cfb6eeb4
YH
671 }
672 }
673 return -ENOENT;
674}
675
676static void tcp_v6_clear_md5_list (struct sock *sk)
677{
678 struct tcp_sock *tp = tcp_sk(sk);
679 int i;
680
681 if (tp->md5sig_info->entries6) {
682 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 683 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
684 tp->md5sig_info->entries6 = 0;
685 tcp_free_md5sig_pool();
686 }
687
688 kfree(tp->md5sig_info->keys6);
689 tp->md5sig_info->keys6 = NULL;
690 tp->md5sig_info->alloced6 = 0;
691
692 if (tp->md5sig_info->entries4) {
693 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 694 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
695 tp->md5sig_info->entries4 = 0;
696 tcp_free_md5sig_pool();
697 }
698
699 kfree(tp->md5sig_info->keys4);
700 tp->md5sig_info->keys4 = NULL;
701 tp->md5sig_info->alloced4 = 0;
702}
703
704static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
705 int optlen)
706{
707 struct tcp_md5sig cmd;
708 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
709 u8 *newkey;
710
711 if (optlen < sizeof(cmd))
712 return -EINVAL;
713
714 if (copy_from_user(&cmd, optval, sizeof(cmd)))
715 return -EFAULT;
716
717 if (sin6->sin6_family != AF_INET6)
718 return -EINVAL;
719
720 if (!cmd.tcpm_keylen) {
721 if (!tcp_sk(sk)->md5sig_info)
722 return -ENOENT;
e773e4fa 723 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
724 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
725 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
726 }
727
728 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
729 return -EINVAL;
730
731 if (!tcp_sk(sk)->md5sig_info) {
732 struct tcp_sock *tp = tcp_sk(sk);
733 struct tcp_md5sig_info *p;
734
735 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
736 if (!p)
737 return -ENOMEM;
738
739 tp->md5sig_info = p;
3d7dbeac 740 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
741 }
742
af879cc7 743 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
744 if (!newkey)
745 return -ENOMEM;
e773e4fa 746 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
747 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
748 newkey, cmd.tcpm_keylen);
749 }
750 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
751}
752
49a72dfb
AL
753static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
754 struct in6_addr *daddr,
755 struct in6_addr *saddr, int nbytes)
cfb6eeb4 756{
cfb6eeb4 757 struct tcp6_pseudohdr *bp;
49a72dfb 758 struct scatterlist sg;
8d26d76d 759
cfb6eeb4 760 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
761 /* 1. TCP pseudo-header (RFC2460) */
762 ipv6_addr_copy(&bp->saddr, saddr);
763 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 764 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 765 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 766
49a72dfb
AL
767 sg_init_one(&sg, bp, sizeof(*bp));
768 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
769}
c7da57a1 770
49a72dfb
AL
771static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
772 struct in6_addr *daddr, struct in6_addr *saddr,
773 struct tcphdr *th)
774{
775 struct tcp_md5sig_pool *hp;
776 struct hash_desc *desc;
777
778 hp = tcp_get_md5sig_pool();
779 if (!hp)
780 goto clear_hash_noput;
781 desc = &hp->md5_desc;
782
783 if (crypto_hash_init(desc))
784 goto clear_hash;
785 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
786 goto clear_hash;
787 if (tcp_md5_hash_header(hp, th))
788 goto clear_hash;
789 if (tcp_md5_hash_key(hp, key))
790 goto clear_hash;
791 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 792 goto clear_hash;
cfb6eeb4 793
cfb6eeb4 794 tcp_put_md5sig_pool();
cfb6eeb4 795 return 0;
49a72dfb 796
cfb6eeb4
YH
797clear_hash:
798 tcp_put_md5sig_pool();
799clear_hash_noput:
800 memset(md5_hash, 0, 16);
49a72dfb 801 return 1;
cfb6eeb4
YH
802}
803
49a72dfb
AL
804static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
805 struct sock *sk, struct request_sock *req,
806 struct sk_buff *skb)
cfb6eeb4
YH
807{
808 struct in6_addr *saddr, *daddr;
49a72dfb
AL
809 struct tcp_md5sig_pool *hp;
810 struct hash_desc *desc;
811 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
812
813 if (sk) {
814 saddr = &inet6_sk(sk)->saddr;
815 daddr = &inet6_sk(sk)->daddr;
49a72dfb 816 } else if (req) {
cfb6eeb4
YH
817 saddr = &inet6_rsk(req)->loc_addr;
818 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
819 } else {
820 struct ipv6hdr *ip6h = ipv6_hdr(skb);
821 saddr = &ip6h->saddr;
822 daddr = &ip6h->daddr;
cfb6eeb4 823 }
49a72dfb
AL
824
825 hp = tcp_get_md5sig_pool();
826 if (!hp)
827 goto clear_hash_noput;
828 desc = &hp->md5_desc;
829
830 if (crypto_hash_init(desc))
831 goto clear_hash;
832
833 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
834 goto clear_hash;
835 if (tcp_md5_hash_header(hp, th))
836 goto clear_hash;
837 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
838 goto clear_hash;
839 if (tcp_md5_hash_key(hp, key))
840 goto clear_hash;
841 if (crypto_hash_final(desc, md5_hash))
842 goto clear_hash;
843
844 tcp_put_md5sig_pool();
845 return 0;
846
847clear_hash:
848 tcp_put_md5sig_pool();
849clear_hash_noput:
850 memset(md5_hash, 0, 16);
851 return 1;
cfb6eeb4
YH
852}
853
854static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
855{
856 __u8 *hash_location = NULL;
857 struct tcp_md5sig_key *hash_expected;
0660e03f 858 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 859 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 860 int genhash;
cfb6eeb4
YH
861 u8 newhash[16];
862
863 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 864 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 865
785957d3
DM
866 /* We've parsed the options - do we have a hash? */
867 if (!hash_expected && !hash_location)
868 return 0;
869
870 if (hash_expected && !hash_location) {
871 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
872 return 1;
873 }
874
785957d3
DM
875 if (!hash_expected && hash_location) {
876 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
877 return 1;
878 }
879
880 /* check the signature */
49a72dfb
AL
881 genhash = tcp_v6_md5_hash_skb(newhash,
882 hash_expected,
883 NULL, NULL, skb);
884
cfb6eeb4
YH
885 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
886 if (net_ratelimit()) {
5856b606 887 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
cfb6eeb4 888 genhash ? "failed" : "mismatch",
0c6ce78a
HH
889 &ip6h->saddr, ntohs(th->source),
890 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
891 }
892 return 1;
893 }
894 return 0;
895}
896#endif
897
c6aefafb 898struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 899 .family = AF_INET6,
2e6599cb 900 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 901 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
902 .send_ack = tcp_v6_reqsk_send_ack,
903 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
904 .send_reset = tcp_v6_send_reset,
905 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
906};
907
cfb6eeb4 908#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 909static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 910 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 911 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 912};
b6332e6c 913#endif
cfb6eeb4 914
6d6ee43e
ACM
915static struct timewait_sock_ops tcp6_timewait_sock_ops = {
916 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
917 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 918 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
919};
920
8292a17a 921static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
922{
923 struct ipv6_pinfo *np = inet6_sk(sk);
aa8223c7 924 struct tcphdr *th = tcp_hdr(skb);
1da177e4 925
84fa7933 926 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4 927 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
663ead3b 928 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 929 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 930 } else {
1ab1457c 931 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
07f0757a 932 csum_partial(th, th->doff<<2,
1da177e4
LT
933 skb->csum));
934 }
935}
936
a430a43d
HX
937static int tcp_v6_gso_send_check(struct sk_buff *skb)
938{
939 struct ipv6hdr *ipv6h;
940 struct tcphdr *th;
941
942 if (!pskb_may_pull(skb, sizeof(*th)))
943 return -EINVAL;
944
0660e03f 945 ipv6h = ipv6_hdr(skb);
aa8223c7 946 th = tcp_hdr(skb);
a430a43d
HX
947
948 th->check = 0;
949 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
950 IPPROTO_TCP, 0);
663ead3b 951 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 952 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 953 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
954 return 0;
955}
1da177e4 956
36990673
HX
957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
958 struct sk_buff *skb)
684f2176 959{
36e7b1b8 960 struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
961
962 switch (skb->ip_summed) {
963 case CHECKSUM_COMPLETE:
86911732 964 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
965 skb->csum)) {
966 skb->ip_summed = CHECKSUM_UNNECESSARY;
967 break;
968 }
969
970 /* fall through */
971 case CHECKSUM_NONE:
972 NAPI_GRO_CB(skb)->flush = 1;
973 return NULL;
974 }
975
976 return tcp_gro_receive(head, skb);
977}
684f2176 978
36990673 979static int tcp6_gro_complete(struct sk_buff *skb)
684f2176
HX
980{
981 struct ipv6hdr *iph = ipv6_hdr(skb);
982 struct tcphdr *th = tcp_hdr(skb);
983
984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
985 &iph->saddr, &iph->daddr, 0);
986 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
987
988 return tcp_gro_complete(skb);
989}
684f2176 990
626e264d
IJ
991static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
992 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 993{
aa8223c7 994 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
995 struct sk_buff *buff;
996 struct flowi fl;
adf30907 997 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 998 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 999 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 1000 struct dst_entry *dst;
81ada62d 1001 __be32 *topt;
1da177e4 1002
626e264d
IJ
1003 if (ts)
1004 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 1005#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1006 if (key)
1007 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1008#endif
1009
cfb6eeb4 1010 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 1011 GFP_ATOMIC);
1ab1457c
YH
1012 if (buff == NULL)
1013 return;
1da177e4 1014
cfb6eeb4 1015 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1016
cfb6eeb4 1017 t1 = (struct tcphdr *) skb_push(buff, tot_len);
a8fdf2b3 1018 skb_reset_transport_header(skb);
1da177e4
LT
1019
1020 /* Swap the send and the receive. */
1021 memset(t1, 0, sizeof(*t1));
1022 t1->dest = th->source;
1023 t1->source = th->dest;
cfb6eeb4 1024 t1->doff = tot_len / 4;
626e264d
IJ
1025 t1->seq = htonl(seq);
1026 t1->ack_seq = htonl(ack);
1027 t1->ack = !rst || !th->ack;
1028 t1->rst = rst;
1029 t1->window = htons(win);
1da177e4 1030
81ada62d
IJ
1031 topt = (__be32 *)(t1 + 1);
1032
626e264d
IJ
1033 if (ts) {
1034 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1035 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1036 *topt++ = htonl(tcp_time_stamp);
1037 *topt++ = htonl(ts);
1038 }
1039
cfb6eeb4
YH
1040#ifdef CONFIG_TCP_MD5SIG
1041 if (key) {
81ada62d
IJ
1042 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1043 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1044 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1045 &ipv6_hdr(skb)->saddr,
1046 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1047 }
1048#endif
1049
07f0757a 1050 buff->csum = csum_partial(t1, tot_len, 0);
1da177e4
LT
1051
1052 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1053 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1054 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
1055
1056 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
52cd5750 1057 tot_len, IPPROTO_TCP,
1da177e4
LT
1058 buff->csum);
1059
1060 fl.proto = IPPROTO_TCP;
505cbfc5 1061 fl.oif = inet6_iif(skb);
1da177e4
LT
1062 fl.fl_ip_dport = t1->dest;
1063 fl.fl_ip_sport = t1->source;
beb8d13b 1064 security_skb_classify_flow(skb, &fl);
1da177e4 1065
c20121ae
DL
1066 /* Pass a socket to ip6_dst_lookup either it is for RST
1067 * Underlying function will use this to retrieve the network
1068 * namespace
1069 */
adf30907
ED
1070 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1071 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1072 skb_dst_set(buff, dst);
e5047992 1073 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
63231bdd 1074 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
626e264d
IJ
1075 if (rst)
1076 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4 1077 return;
ecc51b6d 1078 }
1da177e4
LT
1079 }
1080
1081 kfree_skb(buff);
1082}
1083
626e264d 1084static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1085{
626e264d
IJ
1086 struct tcphdr *th = tcp_hdr(skb);
1087 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1088 struct tcp_md5sig_key *key = NULL;
1da177e4 1089
626e264d 1090 if (th->rst)
1da177e4
LT
1091 return;
1092
626e264d
IJ
1093 if (!ipv6_unicast_destination(skb))
1094 return;
1da177e4 1095
cfb6eeb4 1096#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1097 if (sk)
1098 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1099#endif
1100
626e264d
IJ
1101 if (th->ack)
1102 seq = ntohl(th->ack_seq);
1103 else
1104 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1105 (th->doff << 2);
1da177e4 1106
626e264d
IJ
1107 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1108}
1da177e4 1109
626e264d
IJ
1110static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1111 struct tcp_md5sig_key *key)
1112{
1113 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1114}
1115
1116static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1117{
8feaf0c0 1118 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1119 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1120
9501f972 1121 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1122 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1123 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1124
8feaf0c0 1125 inet_twsk_put(tw);
1da177e4
LT
1126}
1127
6edafaaf
GJ
1128static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1129 struct request_sock *req)
1da177e4 1130{
9501f972 1131 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1132 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1133}
1134
1135
1136static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1137{
60236fdd 1138 struct request_sock *req, **prev;
aa8223c7 1139 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1140 struct sock *nsk;
1141
1142 /* Find possible connection requests. */
8129765a 1143 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1144 &ipv6_hdr(skb)->saddr,
1145 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1146 if (req)
1147 return tcp_check_req(sk, skb, req, prev);
1148
3b1e0a65 1149 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1150 &ipv6_hdr(skb)->saddr, th->source,
1151 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1152
1153 if (nsk) {
1154 if (nsk->sk_state != TCP_TIME_WAIT) {
1155 bh_lock_sock(nsk);
1156 return nsk;
1157 }
9469c7b4 1158 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1159 return NULL;
1160 }
1161
c6aefafb 1162#ifdef CONFIG_SYN_COOKIES
1da177e4 1163 if (!th->rst && !th->syn && th->ack)
c6aefafb 1164 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1165#endif
1166 return sk;
1167}
1168
1da177e4
LT
1169/* FIXME: this is substantially similar to the ipv4 code.
1170 * Can some kind of merge be done? -- erics
1171 */
1172static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1173{
4957faad 1174 struct tcp_extend_values tmp_ext;
e6b4d113 1175 struct tcp_options_received tmp_opt;
4957faad 1176 u8 *hash_location;
e6b4d113 1177 struct request_sock *req;
ca304b61 1178 struct inet6_request_sock *treq;
1da177e4 1179 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1180 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1181 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1182#ifdef CONFIG_SYN_COOKIES
1183 int want_cookie = 0;
1184#else
1185#define want_cookie 0
1186#endif
1da177e4
LT
1187
1188 if (skb->protocol == htons(ETH_P_IP))
1189 return tcp_v4_conn_request(sk, skb);
1190
1191 if (!ipv6_unicast_destination(skb))
1ab1457c 1192 goto drop;
1da177e4 1193
463c84b9 1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1195 if (net_ratelimit())
c6aefafb
GG
1196 syn_flood_warning(skb);
1197#ifdef CONFIG_SYN_COOKIES
1198 if (sysctl_tcp_syncookies)
1199 want_cookie = 1;
1200 else
1201#endif
1ab1457c 1202 goto drop;
1da177e4
LT
1203 }
1204
463c84b9 1205 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1206 goto drop;
1207
ca304b61 1208 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1209 if (req == NULL)
1210 goto drop;
1211
cfb6eeb4
YH
1212#ifdef CONFIG_TCP_MD5SIG
1213 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1214#endif
1215
1da177e4
LT
1216 tcp_clear_options(&tmp_opt);
1217 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1218 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1219 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1220
1221 if (tmp_opt.cookie_plus > 0 &&
1222 tmp_opt.saw_tstamp &&
1223 !tp->rx_opt.cookie_out_never &&
1224 (sysctl_tcp_cookie_size > 0 ||
1225 (tp->cookie_values != NULL &&
1226 tp->cookie_values->cookie_desired > 0))) {
1227 u8 *c;
1228 u32 *d;
1229 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1230 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1231
1232 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1233 goto drop_and_free;
1234
1235 /* Secret recipe starts with IP addresses */
1236 d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
1237 *mess++ ^= *d++;
1238 *mess++ ^= *d++;
1239 *mess++ ^= *d++;
1240 *mess++ ^= *d++;
1241 d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1242 *mess++ ^= *d++;
1243 *mess++ ^= *d++;
1244 *mess++ ^= *d++;
1245 *mess++ ^= *d++;
1246
1247 /* plus variable length Initiator Cookie */
1248 c = (u8 *)mess;
1249 while (l-- > 0)
1250 *c++ ^= *hash_location++;
1da177e4 1251
4957faad
WAS
1252#ifdef CONFIG_SYN_COOKIES
1253 want_cookie = 0; /* not our kind of cookie */
1254#endif
1255 tmp_ext.cookie_out_never = 0; /* false */
1256 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 } else if (!tp->rx_opt.cookie_in_always) {
1258 /* redundant indications, but ensure initialization. */
1259 tmp_ext.cookie_out_never = 1; /* true */
1260 tmp_ext.cookie_plus = 0;
1261 } else {
1262 goto drop_and_free;
1263 }
1264 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1265
4dfc2817 1266 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1267 tcp_clear_options(&tmp_opt);
c6aefafb 1268
1da177e4
LT
1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270 tcp_openreq_init(req, &tmp_opt, skb);
1271
ca304b61 1272 treq = inet6_rsk(req);
0660e03f
ACM
1273 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1275 if (!want_cookie)
1276 TCP_ECN_create_request(req, tcp_hdr(skb));
1277
1278 if (want_cookie) {
1279 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1280 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1281 } else if (!isn) {
1282 if (ipv6_opt_accepted(sk, skb) ||
1283 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1284 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1285 atomic_inc(&skb->users);
1286 treq->pktopts = skb;
1287 }
1288 treq->iif = sk->sk_bound_dev_if;
1da177e4 1289
c6aefafb
GG
1290 /* So that link locals have meaning */
1291 if (!sk->sk_bound_dev_if &&
1292 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1293 treq->iif = inet6_iif(skb);
1da177e4 1294
a94f723d 1295 isn = tcp_v6_init_sequence(skb);
c6aefafb 1296 }
2e6599cb 1297 tcp_rsk(req)->snt_isn = isn;
1da177e4 1298
4237c75c
VY
1299 security_inet_conn_request(sk, skb, req);
1300
4957faad
WAS
1301 if (tcp_v6_send_synack(sk, req,
1302 (struct request_values *)&tmp_ext) ||
1303 want_cookie)
e6b4d113 1304 goto drop_and_free;
1da177e4 1305
e6b4d113
WAS
1306 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1307 return 0;
1da177e4 1308
e6b4d113
WAS
1309drop_and_free:
1310 reqsk_free(req);
1da177e4 1311drop:
1da177e4
LT
1312 return 0; /* don't send reset */
1313}
1314
1315static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1316 struct request_sock *req,
1da177e4
LT
1317 struct dst_entry *dst)
1318{
78d15e82 1319 struct inet6_request_sock *treq;
1da177e4
LT
1320 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1321 struct tcp6_sock *newtcp6sk;
1322 struct inet_sock *newinet;
1323 struct tcp_sock *newtp;
1324 struct sock *newsk;
1325 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1326#ifdef CONFIG_TCP_MD5SIG
1327 struct tcp_md5sig_key *key;
1328#endif
1da177e4
LT
1329
1330 if (skb->protocol == htons(ETH_P_IP)) {
1331 /*
1332 * v6 mapped
1333 */
1334
1335 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1336
1ab1457c 1337 if (newsk == NULL)
1da177e4
LT
1338 return NULL;
1339
1340 newtcp6sk = (struct tcp6_sock *)newsk;
1341 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1342
1343 newinet = inet_sk(newsk);
1344 newnp = inet6_sk(newsk);
1345 newtp = tcp_sk(newsk);
1346
1347 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1348
c720c7e8 1349 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1350
c720c7e8 1351 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4
LT
1352
1353 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1354
8292a17a 1355 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1356 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1357#ifdef CONFIG_TCP_MD5SIG
1358 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1359#endif
1360
1da177e4
LT
1361 newnp->pktoptions = NULL;
1362 newnp->opt = NULL;
505cbfc5 1363 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1364 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1365
e6848976
ACM
1366 /*
1367 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1368 * here, tcp_create_openreq_child now does this for us, see the comment in
1369 * that function for the gory details. -acme
1da177e4 1370 */
1da177e4
LT
1371
1372 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1373 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1374 Sync it now.
1375 */
d83d8461 1376 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1377
1378 return newsk;
1379 }
1380
78d15e82 1381 treq = inet6_rsk(req);
1da177e4
LT
1382 opt = np->opt;
1383
1384 if (sk_acceptq_is_full(sk))
1385 goto out_overflow;
1386
1da177e4
LT
1387 if (dst == NULL) {
1388 struct in6_addr *final_p = NULL, final;
1389 struct flowi fl;
1390
1391 memset(&fl, 0, sizeof(fl));
1392 fl.proto = IPPROTO_TCP;
2e6599cb 1393 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1394 if (opt && opt->srcrt) {
1395 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1396 ipv6_addr_copy(&final, &fl.fl6_dst);
1397 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1398 final_p = &final;
1399 }
2e6599cb 1400 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1401 fl.oif = sk->sk_bound_dev_if;
51953d5b 1402 fl.mark = sk->sk_mark;
2e6599cb 1403 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 1404 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 1405 security_req_classify_flow(req, &fl);
1da177e4
LT
1406
1407 if (ip6_dst_lookup(sk, &dst, &fl))
1408 goto out;
1409
1410 if (final_p)
1411 ipv6_addr_copy(&fl.fl6_dst, final_p);
1412
52479b62 1413 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1da177e4 1414 goto out;
1ab1457c 1415 }
1da177e4
LT
1416
1417 newsk = tcp_create_openreq_child(sk, req, skb);
1418 if (newsk == NULL)
1419 goto out;
1420
e6848976
ACM
1421 /*
1422 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1423 * count here, tcp_create_openreq_child now does this for us, see the
1424 * comment in that function for the gory details. -acme
1425 */
1da177e4 1426
59eed279 1427 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1428 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1429
1430 newtcp6sk = (struct tcp6_sock *)newsk;
1431 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1432
1433 newtp = tcp_sk(newsk);
1434 newinet = inet_sk(newsk);
1435 newnp = inet6_sk(newsk);
1436
1437 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1438
2e6599cb
ACM
1439 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1440 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1441 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1442 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1443
1ab1457c 1444 /* Now IPv6 options...
1da177e4
LT
1445
1446 First: no IPv4 options.
1447 */
1448 newinet->opt = NULL;
d35690be 1449 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1450
1451 /* Clone RX bits */
1452 newnp->rxopt.all = np->rxopt.all;
1453
1454 /* Clone pktoptions received with SYN */
1455 newnp->pktoptions = NULL;
2e6599cb
ACM
1456 if (treq->pktopts != NULL) {
1457 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1458 kfree_skb(treq->pktopts);
1459 treq->pktopts = NULL;
1da177e4
LT
1460 if (newnp->pktoptions)
1461 skb_set_owner_r(newnp->pktoptions, newsk);
1462 }
1463 newnp->opt = NULL;
505cbfc5 1464 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1465 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1466
1467 /* Clone native IPv6 options from listening socket (if any)
1468
1469 Yes, keeping reference count would be much more clever,
1470 but we make one more one thing there: reattach optmem
1471 to newsk.
1472 */
1473 if (opt) {
1474 newnp->opt = ipv6_dup_options(newsk, opt);
1475 if (opt != np->opt)
1476 sock_kfree_s(sk, opt, opt->tot_len);
1477 }
1478
d83d8461 1479 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1480 if (newnp->opt)
d83d8461
ACM
1481 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1482 newnp->opt->opt_flen);
1da177e4 1483
5d424d5a 1484 tcp_mtup_init(newsk);
1da177e4
LT
1485 tcp_sync_mss(newsk, dst_mtu(dst));
1486 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1487 tcp_initialize_rcv_mss(newsk);
1488
c720c7e8
ED
1489 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1490 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1491
cfb6eeb4
YH
1492#ifdef CONFIG_TCP_MD5SIG
1493 /* Copy over the MD5 key from the original socket */
1494 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1495 /* We're using one, so create a matching key
1496 * on the newsk structure. If we fail to get
1497 * memory, then we end up not copying the key
1498 * across. Shucks.
1499 */
af879cc7
ACM
1500 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1501 if (newkey != NULL)
e547bc1e 1502 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1503 newkey, key->keylen);
cfb6eeb4
YH
1504 }
1505#endif
1506
9327f705 1507 __inet6_hash(newsk, NULL);
e56d8b8a 1508 __inet_inherit_port(sk, newsk);
1da177e4
LT
1509
1510 return newsk;
1511
1512out_overflow:
de0744af 1513 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1514out:
de0744af 1515 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1516 if (opt && opt != np->opt)
1517 sock_kfree_s(sk, opt, opt->tot_len);
1518 dst_release(dst);
1519 return NULL;
1520}
1521
b51655b9 1522static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1523{
84fa7933 1524 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1525 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1526 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1527 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1528 return 0;
fb286bb2 1529 }
1da177e4 1530 }
fb286bb2 1531
684f2176 1532 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1533 &ipv6_hdr(skb)->saddr,
1534 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1535
1da177e4 1536 if (skb->len <= 76) {
fb286bb2 1537 return __skb_checksum_complete(skb);
1da177e4
LT
1538 }
1539 return 0;
1540}
1541
1542/* The socket must have it's spinlock held when we get
1543 * here.
1544 *
1545 * We have a potential double-lock case here, so even when
1546 * doing backlog processing we use the BH locking scheme.
1547 * This is because we cannot sleep with the original spinlock
1548 * held.
1549 */
1550static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1551{
1552 struct ipv6_pinfo *np = inet6_sk(sk);
1553 struct tcp_sock *tp;
1554 struct sk_buff *opt_skb = NULL;
1555
1556 /* Imagine: socket is IPv6. IPv4 packet arrives,
1557 goes to IPv4 receive handler and backlogged.
1558 From backlog it always goes here. Kerboom...
1559 Fortunately, tcp_rcv_established and rcv_established
1560 handle them correctly, but it is not case with
1561 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1562 */
1563
1564 if (skb->protocol == htons(ETH_P_IP))
1565 return tcp_v4_do_rcv(sk, skb);
1566
cfb6eeb4
YH
1567#ifdef CONFIG_TCP_MD5SIG
1568 if (tcp_v6_inbound_md5_hash (sk, skb))
1569 goto discard;
1570#endif
1571
fda9ef5d 1572 if (sk_filter(sk, skb))
1da177e4
LT
1573 goto discard;
1574
1575 /*
1576 * socket locking is here for SMP purposes as backlog rcv
1577 * is currently called with bh processing disabled.
1578 */
1579
1580 /* Do Stevens' IPV6_PKTOPTIONS.
1581
1582 Yes, guys, it is the only place in our code, where we
1583 may make it not affecting IPv4.
1584 The rest of code is protocol independent,
1585 and I do not like idea to uglify IPv4.
1586
1587 Actually, all the idea behind IPV6_PKTOPTIONS
1588 looks not very well thought. For now we latch
1589 options, received in the last packet, enqueued
1590 by tcp. Feel free to propose better solution.
1ab1457c 1591 --ANK (980728)
1da177e4
LT
1592 */
1593 if (np->rxopt.all)
1594 opt_skb = skb_clone(skb, GFP_ATOMIC);
1595
1596 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1597 TCP_CHECK_TIMER(sk);
aa8223c7 1598 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1599 goto reset;
1600 TCP_CHECK_TIMER(sk);
1601 if (opt_skb)
1602 goto ipv6_pktoptions;
1603 return 0;
1604 }
1605
ab6a5bb6 1606 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1607 goto csum_err;
1608
1ab1457c 1609 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1610 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1611 if (!nsk)
1612 goto discard;
1613
1614 /*
1615 * Queue it on the new socket if the new socket is active,
1616 * otherwise we just shortcircuit this and continue with
1617 * the new socket..
1618 */
1ab1457c 1619 if(nsk != sk) {
1da177e4
LT
1620 if (tcp_child_process(sk, nsk, skb))
1621 goto reset;
1622 if (opt_skb)
1623 __kfree_skb(opt_skb);
1624 return 0;
1625 }
1626 }
1627
1628 TCP_CHECK_TIMER(sk);
aa8223c7 1629 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1630 goto reset;
1631 TCP_CHECK_TIMER(sk);
1632 if (opt_skb)
1633 goto ipv6_pktoptions;
1634 return 0;
1635
1636reset:
cfb6eeb4 1637 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1638discard:
1639 if (opt_skb)
1640 __kfree_skb(opt_skb);
1641 kfree_skb(skb);
1642 return 0;
1643csum_err:
63231bdd 1644 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1645 goto discard;
1646
1647
1648ipv6_pktoptions:
1649 /* Do you ask, what is it?
1650
1651 1. skb was enqueued by tcp.
1652 2. skb is added to tail of read queue, rather than out of order.
1653 3. socket is not in passive state.
1654 4. Finally, it really contains options, which user wants to receive.
1655 */
1656 tp = tcp_sk(sk);
1657 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1658 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1659 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1660 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1661 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1662 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1663 if (ipv6_opt_accepted(sk, opt_skb)) {
1664 skb_set_owner_r(opt_skb, sk);
1665 opt_skb = xchg(&np->pktoptions, opt_skb);
1666 } else {
1667 __kfree_skb(opt_skb);
1668 opt_skb = xchg(&np->pktoptions, NULL);
1669 }
1670 }
1671
800d55f1 1672 kfree_skb(opt_skb);
1da177e4
LT
1673 return 0;
1674}
1675
e5bbef20 1676static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1677{
1ab1457c 1678 struct tcphdr *th;
1da177e4
LT
1679 struct sock *sk;
1680 int ret;
a86b1e30 1681 struct net *net = dev_net(skb->dev);
1da177e4
LT
1682
1683 if (skb->pkt_type != PACKET_HOST)
1684 goto discard_it;
1685
1686 /*
1687 * Count it even if it's bad.
1688 */
63231bdd 1689 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1690
1691 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1692 goto discard_it;
1693
aa8223c7 1694 th = tcp_hdr(skb);
1da177e4
LT
1695
1696 if (th->doff < sizeof(struct tcphdr)/4)
1697 goto bad_packet;
1698 if (!pskb_may_pull(skb, th->doff*4))
1699 goto discard_it;
1700
60476372 1701 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1702 goto bad_packet;
1703
aa8223c7 1704 th = tcp_hdr(skb);
1da177e4
LT
1705 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1706 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1707 skb->len - th->doff*4);
1708 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1709 TCP_SKB_CB(skb)->when = 0;
0660e03f 1710 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1711 TCP_SKB_CB(skb)->sacked = 0;
1712
9a1f27c4 1713 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1714 if (!sk)
1715 goto no_tcp_socket;
1716
1717process:
1718 if (sk->sk_state == TCP_TIME_WAIT)
1719 goto do_time_wait;
1720
1721 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1722 goto discard_and_relse;
1723
fda9ef5d 1724 if (sk_filter(sk, skb))
1da177e4
LT
1725 goto discard_and_relse;
1726
1727 skb->dev = NULL;
1728
293b9c42 1729 bh_lock_sock_nested(sk);
1da177e4
LT
1730 ret = 0;
1731 if (!sock_owned_by_user(sk)) {
1a2449a8 1732#ifdef CONFIG_NET_DMA
1ab1457c 1733 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1734 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1735 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1736 if (tp->ucopy.dma_chan)
1737 ret = tcp_v6_do_rcv(sk, skb);
1738 else
1a2449a8
CL
1739#endif
1740 {
1741 if (!tcp_prequeue(sk, skb))
1742 ret = tcp_v6_do_rcv(sk, skb);
1743 }
6cce09f8 1744 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1745 bh_unlock_sock(sk);
6cce09f8 1746 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1747 goto discard_and_relse;
1748 }
1da177e4
LT
1749 bh_unlock_sock(sk);
1750
1751 sock_put(sk);
1752 return ret ? -1 : 0;
1753
1754no_tcp_socket:
1755 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1756 goto discard_it;
1757
1758 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1759bad_packet:
63231bdd 1760 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1761 } else {
cfb6eeb4 1762 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1763 }
1764
1765discard_it:
1766
1767 /*
1768 * Discard frame
1769 */
1770
1771 kfree_skb(skb);
1772 return 0;
1773
1774discard_and_relse:
1775 sock_put(sk);
1776 goto discard_it;
1777
1778do_time_wait:
1779 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1780 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1781 goto discard_it;
1782 }
1783
1784 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1785 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1786 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1787 goto discard_it;
1788 }
1789
9469c7b4 1790 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1791 case TCP_TW_SYN:
1792 {
1793 struct sock *sk2;
1794
c346dca1 1795 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1796 &ipv6_hdr(skb)->daddr,
505cbfc5 1797 ntohs(th->dest), inet6_iif(skb));
1da177e4 1798 if (sk2 != NULL) {
295ff7ed
ACM
1799 struct inet_timewait_sock *tw = inet_twsk(sk);
1800 inet_twsk_deschedule(tw, &tcp_death_row);
1801 inet_twsk_put(tw);
1da177e4
LT
1802 sk = sk2;
1803 goto process;
1804 }
1805 /* Fall through to ACK */
1806 }
1807 case TCP_TW_ACK:
1808 tcp_v6_timewait_ack(sk, skb);
1809 break;
1810 case TCP_TW_RST:
1811 goto no_tcp_socket;
1812 case TCP_TW_SUCCESS:;
1813 }
1814 goto discard_it;
1815}
1816
1da177e4
LT
1817static int tcp_v6_remember_stamp(struct sock *sk)
1818{
1819 /* Alas, not yet... */
1820 return 0;
1821}
1822
3b401a81 1823static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1824 .queue_xmit = inet6_csk_xmit,
1825 .send_check = tcp_v6_send_check,
1826 .rebuild_header = inet6_sk_rebuild_header,
1827 .conn_request = tcp_v6_conn_request,
1828 .syn_recv_sock = tcp_v6_syn_recv_sock,
1829 .remember_stamp = tcp_v6_remember_stamp,
1830 .net_header_len = sizeof(struct ipv6hdr),
1831 .setsockopt = ipv6_setsockopt,
1832 .getsockopt = ipv6_getsockopt,
1833 .addr2sockaddr = inet6_csk_addr2sockaddr,
1834 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1835 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1836#ifdef CONFIG_COMPAT
543d9cfe
ACM
1837 .compat_setsockopt = compat_ipv6_setsockopt,
1838 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1839#endif
1da177e4
LT
1840};
1841
cfb6eeb4 1842#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1843static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1844 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1845 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1846 .md5_add = tcp_v6_md5_add_func,
1847 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1848};
a928630a 1849#endif
cfb6eeb4 1850
1da177e4
LT
1851/*
1852 * TCP over IPv4 via INET6 API
1853 */
1854
3b401a81 1855static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1856 .queue_xmit = ip_queue_xmit,
1857 .send_check = tcp_v4_send_check,
1858 .rebuild_header = inet_sk_rebuild_header,
1859 .conn_request = tcp_v6_conn_request,
1860 .syn_recv_sock = tcp_v6_syn_recv_sock,
1861 .remember_stamp = tcp_v4_remember_stamp,
1862 .net_header_len = sizeof(struct iphdr),
1863 .setsockopt = ipv6_setsockopt,
1864 .getsockopt = ipv6_getsockopt,
1865 .addr2sockaddr = inet6_csk_addr2sockaddr,
1866 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1867 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1868#ifdef CONFIG_COMPAT
543d9cfe
ACM
1869 .compat_setsockopt = compat_ipv6_setsockopt,
1870 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1871#endif
1da177e4
LT
1872};
1873
cfb6eeb4 1874#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1875static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1876 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1877 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1878 .md5_add = tcp_v6_md5_add_func,
1879 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1880};
a928630a 1881#endif
cfb6eeb4 1882
1da177e4
LT
1883/* NOTE: A lot of things set to zero explicitly by call to
1884 * sk_alloc() so need not be done here.
1885 */
1886static int tcp_v6_init_sock(struct sock *sk)
1887{
6687e988 1888 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1889 struct tcp_sock *tp = tcp_sk(sk);
1890
1891 skb_queue_head_init(&tp->out_of_order_queue);
1892 tcp_init_xmit_timers(sk);
1893 tcp_prequeue_init(tp);
1894
6687e988 1895 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1896 tp->mdev = TCP_TIMEOUT_INIT;
1897
1898 /* So many TCP implementations out there (incorrectly) count the
1899 * initial SYN frame in their delayed-ACK and congestion control
1900 * algorithms that we must have the following bandaid to talk
1901 * efficiently to them. -DaveM
1902 */
1903 tp->snd_cwnd = 2;
1904
1905 /* See draft-stevens-tcpca-spec-01 for discussion of the
1906 * initialization of these values.
1907 */
0b6a05c1 1908 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1909 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1910 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1911
1912 tp->reordering = sysctl_tcp_reordering;
1913
1914 sk->sk_state = TCP_CLOSE;
1915
8292a17a 1916 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1917 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1918 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1919 sk->sk_write_space = sk_stream_write_space;
1920 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1921
cfb6eeb4
YH
1922#ifdef CONFIG_TCP_MD5SIG
1923 tp->af_specific = &tcp_sock_ipv6_specific;
1924#endif
1925
435cf559
WAS
1926 /* TCP Cookie Transactions */
1927 if (sysctl_tcp_cookie_size > 0) {
1928 /* Default, cookies without s_data_payload. */
1929 tp->cookie_values =
1930 kzalloc(sizeof(*tp->cookie_values),
1931 sk->sk_allocation);
1932 if (tp->cookie_values != NULL)
1933 kref_init(&tp->cookie_values->kref);
1934 }
1935 /* Presumed zeroed, in order of appearance:
1936 * cookie_in_always, cookie_out_never,
1937 * s_data_constant, s_data_in, s_data_out
1938 */
1da177e4
LT
1939 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1940 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1941
eb4dea58 1942 local_bh_disable();
1748376b 1943 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1944 local_bh_enable();
1da177e4
LT
1945
1946 return 0;
1947}
1948
7d06b2e0 1949static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1950{
cfb6eeb4
YH
1951#ifdef CONFIG_TCP_MD5SIG
1952 /* Clean up the MD5 key list */
1953 if (tcp_sk(sk)->md5sig_info)
1954 tcp_v6_clear_md5_list(sk);
1955#endif
1da177e4 1956 tcp_v4_destroy_sock(sk);
7d06b2e0 1957 inet6_destroy_sock(sk);
1da177e4
LT
1958}
1959
952a10be 1960#ifdef CONFIG_PROC_FS
1da177e4 1961/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1962static void get_openreq6(struct seq_file *seq,
60236fdd 1963 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1964{
1da177e4 1965 int ttd = req->expires - jiffies;
ca304b61
ACM
1966 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1967 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1968
1969 if (ttd < 0)
1970 ttd = 0;
1971
1da177e4
LT
1972 seq_printf(seq,
1973 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1974 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1975 i,
1976 src->s6_addr32[0], src->s6_addr32[1],
1977 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1978 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1979 dest->s6_addr32[0], dest->s6_addr32[1],
1980 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1981 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1982 TCP_SYN_RECV,
1983 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1984 1, /* timers active (only the expire timer) */
1985 jiffies_to_clock_t(ttd),
1da177e4
LT
1986 req->retrans,
1987 uid,
1ab1457c 1988 0, /* non standard timer */
1da177e4
LT
1989 0, /* open_requests have no inode */
1990 0, req);
1991}
1992
1993static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1994{
1995 struct in6_addr *dest, *src;
1996 __u16 destp, srcp;
1997 int timer_active;
1998 unsigned long timer_expires;
1999 struct inet_sock *inet = inet_sk(sp);
2000 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 2001 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
2002 struct ipv6_pinfo *np = inet6_sk(sp);
2003
2004 dest = &np->daddr;
2005 src = &np->rcv_saddr;
c720c7e8
ED
2006 destp = ntohs(inet->inet_dport);
2007 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
2008
2009 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2010 timer_active = 1;
463c84b9
ACM
2011 timer_expires = icsk->icsk_timeout;
2012 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2013 timer_active = 4;
463c84b9 2014 timer_expires = icsk->icsk_timeout;
1da177e4
LT
2015 } else if (timer_pending(&sp->sk_timer)) {
2016 timer_active = 2;
2017 timer_expires = sp->sk_timer.expires;
2018 } else {
2019 timer_active = 0;
2020 timer_expires = jiffies;
2021 }
2022
2023 seq_printf(seq,
2024 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 2025 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
2026 i,
2027 src->s6_addr32[0], src->s6_addr32[1],
2028 src->s6_addr32[2], src->s6_addr32[3], srcp,
2029 dest->s6_addr32[0], dest->s6_addr32[1],
2030 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 2031 sp->sk_state,
47da8ee6
SS
2032 tp->write_seq-tp->snd_una,
2033 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2034 timer_active,
2035 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2036 icsk->icsk_retransmits,
1da177e4 2037 sock_i_uid(sp),
6687e988 2038 icsk->icsk_probes_out,
1da177e4
LT
2039 sock_i_ino(sp),
2040 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
2041 jiffies_to_clock_t(icsk->icsk_rto),
2042 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2043 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
2044 tp->snd_cwnd,
2045 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
2046 );
2047}
2048
1ab1457c 2049static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2050 struct inet_timewait_sock *tw, int i)
1da177e4
LT
2051{
2052 struct in6_addr *dest, *src;
2053 __u16 destp, srcp;
0fa1a53e 2054 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2055 int ttd = tw->tw_ttd - jiffies;
2056
2057 if (ttd < 0)
2058 ttd = 0;
2059
0fa1a53e
ACM
2060 dest = &tw6->tw_v6_daddr;
2061 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2062 destp = ntohs(tw->tw_dport);
2063 srcp = ntohs(tw->tw_sport);
2064
2065 seq_printf(seq,
2066 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2067 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2068 i,
2069 src->s6_addr32[0], src->s6_addr32[1],
2070 src->s6_addr32[2], src->s6_addr32[3], srcp,
2071 dest->s6_addr32[0], dest->s6_addr32[1],
2072 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2073 tw->tw_substate, 0, 0,
2074 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2075 atomic_read(&tw->tw_refcnt), tw);
2076}
2077
1da177e4
LT
2078static int tcp6_seq_show(struct seq_file *seq, void *v)
2079{
2080 struct tcp_iter_state *st;
2081
2082 if (v == SEQ_START_TOKEN) {
2083 seq_puts(seq,
2084 " sl "
2085 "local_address "
2086 "remote_address "
2087 "st tx_queue rx_queue tr tm->when retrnsmt"
2088 " uid timeout inode\n");
2089 goto out;
2090 }
2091 st = seq->private;
2092
2093 switch (st->state) {
2094 case TCP_SEQ_STATE_LISTENING:
2095 case TCP_SEQ_STATE_ESTABLISHED:
2096 get_tcp6_sock(seq, v, st->num);
2097 break;
2098 case TCP_SEQ_STATE_OPENREQ:
2099 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2100 break;
2101 case TCP_SEQ_STATE_TIME_WAIT:
2102 get_timewait6_sock(seq, v, st->num);
2103 break;
2104 }
2105out:
2106 return 0;
2107}
2108
1da177e4 2109static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2110 .name = "tcp6",
2111 .family = AF_INET6,
5f4472c5
DL
2112 .seq_fops = {
2113 .owner = THIS_MODULE,
2114 },
9427c4b3
DL
2115 .seq_ops = {
2116 .show = tcp6_seq_show,
2117 },
1da177e4
LT
2118};
2119
2c8c1e72 2120int __net_init tcp6_proc_init(struct net *net)
1da177e4 2121{
6f8b13bc 2122 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2123}
2124
6f8b13bc 2125void tcp6_proc_exit(struct net *net)
1da177e4 2126{
6f8b13bc 2127 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2128}
2129#endif
2130
2131struct proto tcpv6_prot = {
2132 .name = "TCPv6",
2133 .owner = THIS_MODULE,
2134 .close = tcp_close,
2135 .connect = tcp_v6_connect,
2136 .disconnect = tcp_disconnect,
463c84b9 2137 .accept = inet_csk_accept,
1da177e4
LT
2138 .ioctl = tcp_ioctl,
2139 .init = tcp_v6_init_sock,
2140 .destroy = tcp_v6_destroy_sock,
2141 .shutdown = tcp_shutdown,
2142 .setsockopt = tcp_setsockopt,
2143 .getsockopt = tcp_getsockopt,
1da177e4
LT
2144 .recvmsg = tcp_recvmsg,
2145 .backlog_rcv = tcp_v6_do_rcv,
2146 .hash = tcp_v6_hash,
ab1e0a13
ACM
2147 .unhash = inet_unhash,
2148 .get_port = inet_csk_get_port,
1da177e4
LT
2149 .enter_memory_pressure = tcp_enter_memory_pressure,
2150 .sockets_allocated = &tcp_sockets_allocated,
2151 .memory_allocated = &tcp_memory_allocated,
2152 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2153 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2154 .sysctl_mem = sysctl_tcp_mem,
2155 .sysctl_wmem = sysctl_tcp_wmem,
2156 .sysctl_rmem = sysctl_tcp_rmem,
2157 .max_header = MAX_TCP_HEADER,
2158 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2159 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2160 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2161 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2162 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2163#ifdef CONFIG_COMPAT
2164 .compat_setsockopt = compat_tcp_setsockopt,
2165 .compat_getsockopt = compat_tcp_getsockopt,
2166#endif
1da177e4
LT
2167};
2168
41135cc8 2169static const struct inet6_protocol tcpv6_protocol = {
1da177e4
LT
2170 .handler = tcp_v6_rcv,
2171 .err_handler = tcp_v6_err,
a430a43d 2172 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2173 .gso_segment = tcp_tso_segment,
684f2176
HX
2174 .gro_receive = tcp6_gro_receive,
2175 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2176 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2177};
2178
1da177e4
LT
2179static struct inet_protosw tcpv6_protosw = {
2180 .type = SOCK_STREAM,
2181 .protocol = IPPROTO_TCP,
2182 .prot = &tcpv6_prot,
2183 .ops = &inet6_stream_ops,
1da177e4 2184 .no_check = 0,
d83d8461
ACM
2185 .flags = INET_PROTOSW_PERMANENT |
2186 INET_PROTOSW_ICSK,
1da177e4
LT
2187};
2188
2c8c1e72 2189static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2190{
5677242f
DL
2191 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2192 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2193}
2194
2c8c1e72 2195static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2196{
5677242f 2197 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2198}
2199
2c8c1e72 2200static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2201{
2202 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2203}
2204
2205static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2206 .init = tcpv6_net_init,
2207 .exit = tcpv6_net_exit,
2208 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2209};
2210
7f4e4868 2211int __init tcpv6_init(void)
1da177e4 2212{
7f4e4868
DL
2213 int ret;
2214
2215 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2216 if (ret)
2217 goto out;
2218
1da177e4 2219 /* register inet6 protocol */
7f4e4868
DL
2220 ret = inet6_register_protosw(&tcpv6_protosw);
2221 if (ret)
2222 goto out_tcpv6_protocol;
2223
93ec926b 2224 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2225 if (ret)
2226 goto out_tcpv6_protosw;
2227out:
2228 return ret;
ae0f7d5f 2229
7f4e4868
DL
2230out_tcpv6_protocol:
2231 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2232out_tcpv6_protosw:
2233 inet6_unregister_protosw(&tcpv6_protosw);
2234 goto out;
2235}
2236
09f7709f 2237void tcpv6_exit(void)
7f4e4868 2238{
93ec926b 2239 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2240 inet6_unregister_protosw(&tcpv6_protosw);
2241 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2242}