]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/tcp_ipv6.c
gro: Avoid copying headers of unmerged packets
[net-next-2.6.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41
42#include <linux/ipv6.h>
43#include <linux/icmpv6.h>
44#include <linux/random.h>
45
46#include <net/tcp.h>
47#include <net/ndisc.h>
5324a040 48#include <net/inet6_hashtables.h>
8129765a 49#include <net/inet6_connection_sock.h>
1da177e4
LT
50#include <net/ipv6.h>
51#include <net/transp_v6.h>
52#include <net/addrconf.h>
53#include <net/ip6_route.h>
54#include <net/ip6_checksum.h>
55#include <net/inet_ecn.h>
56#include <net/protocol.h>
57#include <net/xfrm.h>
1da177e4
LT
58#include <net/snmp.h>
59#include <net/dsfield.h>
6d6ee43e 60#include <net/timewait_sock.h>
18134bed 61#include <net/netdma.h>
3d58b5fa 62#include <net/inet_common.h>
1da177e4
LT
63
64#include <asm/uaccess.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cfb6eeb4
YH
69#include <linux/crypto.h>
70#include <linux/scatterlist.h>
71
cfb6eeb4 72static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
73static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
8292a17a
ACM
78static struct inet_connection_sock_af_ops ipv6_mapped;
79static struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
81static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
83#else
84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
85 struct in6_addr *addr)
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
1da177e4
LT
91static void tcp_v6_hash(struct sock *sk)
92{
93 if (sk->sk_state != TCP_CLOSE) {
8292a17a 94 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
95 tcp_prot.hash(sk);
96 return;
97 }
98 local_bh_disable();
ab1e0a13 99 __inet6_hash(sk);
1da177e4
LT
100 local_bh_enable();
101 }
102}
103
684f2176 104static __inline__ __sum16 tcp_v6_check(int len,
1ab1457c
YH
105 struct in6_addr *saddr,
106 struct in6_addr *daddr,
868c86bc 107 __wsum base)
1da177e4
LT
108{
109 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
110}
111
a94f723d 112static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 113{
0660e03f
ACM
114 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
116 tcp_hdr(skb)->dest,
117 tcp_hdr(skb)->source);
1da177e4
LT
118}
119
1ab1457c 120static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
121 int addr_len)
122{
123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 124 struct inet_sock *inet = inet_sk(sk);
d83d8461 125 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
126 struct ipv6_pinfo *np = inet6_sk(sk);
127 struct tcp_sock *tp = tcp_sk(sk);
128 struct in6_addr *saddr = NULL, *final_p = NULL, final;
129 struct flowi fl;
130 struct dst_entry *dst;
131 int addr_type;
132 int err;
133
1ab1457c 134 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
135 return -EINVAL;
136
1ab1457c 137 if (usin->sin6_family != AF_INET6)
1da177e4
LT
138 return(-EAFNOSUPPORT);
139
140 memset(&fl, 0, sizeof(fl));
141
142 if (np->sndflow) {
143 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl.fl6_flowlabel);
145 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
146 struct ip6_flowlabel *flowlabel;
147 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
148 if (flowlabel == NULL)
149 return -EINVAL;
150 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
151 fl6_sock_release(flowlabel);
152 }
153 }
154
155 /*
1ab1457c
YH
156 * connect() to INADDR_ANY means loopback (BSD'ism).
157 */
158
159 if(ipv6_addr_any(&usin->sin6_addr))
160 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
161
162 addr_type = ipv6_addr_type(&usin->sin6_addr);
163
164 if(addr_type & IPV6_ADDR_MULTICAST)
165 return -ENETUNREACH;
166
167 if (addr_type&IPV6_ADDR_LINKLOCAL) {
168 if (addr_len >= sizeof(struct sockaddr_in6) &&
169 usin->sin6_scope_id) {
170 /* If interface is set while binding, indices
171 * must coincide.
172 */
173 if (sk->sk_bound_dev_if &&
174 sk->sk_bound_dev_if != usin->sin6_scope_id)
175 return -EINVAL;
176
177 sk->sk_bound_dev_if = usin->sin6_scope_id;
178 }
179
180 /* Connect to link-local address requires an interface */
181 if (!sk->sk_bound_dev_if)
182 return -EINVAL;
183 }
184
185 if (tp->rx_opt.ts_recent_stamp &&
186 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
187 tp->rx_opt.ts_recent = 0;
188 tp->rx_opt.ts_recent_stamp = 0;
189 tp->write_seq = 0;
190 }
191
192 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
193 np->flow_label = fl.fl6_flowlabel;
194
195 /*
196 * TCP over IPv4
197 */
198
199 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 200 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
201 struct sockaddr_in sin;
202
203 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
204
205 if (__ipv6_only_sock(sk))
206 return -ENETUNREACH;
207
208 sin.sin_family = AF_INET;
209 sin.sin_port = usin->sin6_port;
210 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
211
d83d8461 212 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 213 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
214#ifdef CONFIG_TCP_MD5SIG
215 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
216#endif
1da177e4
LT
217
218 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
219
220 if (err) {
d83d8461
ACM
221 icsk->icsk_ext_hdr_len = exthdrlen;
222 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 223 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
224#ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_specific;
226#endif
1da177e4
LT
227 goto failure;
228 } else {
229 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
230 inet->saddr);
231 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
232 inet->rcv_saddr);
233 }
234
235 return err;
236 }
237
238 if (!ipv6_addr_any(&np->rcv_saddr))
239 saddr = &np->rcv_saddr;
240
241 fl.proto = IPPROTO_TCP;
242 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
243 ipv6_addr_copy(&fl.fl6_src,
244 (saddr ? saddr : &np->saddr));
245 fl.oif = sk->sk_bound_dev_if;
246 fl.fl_ip_dport = usin->sin6_port;
247 fl.fl_ip_sport = inet->sport;
248
249 if (np->opt && np->opt->srcrt) {
250 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
251 ipv6_addr_copy(&final, &fl.fl6_dst);
252 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
253 final_p = &final;
254 }
255
beb8d13b
VY
256 security_sk_classify_flow(sk, &fl);
257
1da177e4
LT
258 err = ip6_dst_lookup(sk, &dst, &fl);
259 if (err)
260 goto failure;
261 if (final_p)
262 ipv6_addr_copy(&fl.fl6_dst, final_p);
263
52479b62
AD
264 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
265 if (err < 0) {
14e50e57
DM
266 if (err == -EREMOTE)
267 err = ip6_dst_blackhole(sk, &dst, &fl);
268 if (err < 0)
269 goto failure;
270 }
1da177e4
LT
271
272 if (saddr == NULL) {
273 saddr = &fl.fl6_src;
274 ipv6_addr_copy(&np->rcv_saddr, saddr);
275 }
276
277 /* set the source address */
278 ipv6_addr_copy(&np->saddr, saddr);
279 inet->rcv_saddr = LOOPBACK4_IPV6;
280
f83ef8c0 281 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 282 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 283
d83d8461 284 icsk->icsk_ext_hdr_len = 0;
1da177e4 285 if (np->opt)
d83d8461
ACM
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 np->opt->opt_nflen);
1da177e4
LT
288
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290
291 inet->dport = usin->sin6_port;
292
293 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 294 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
295 if (err)
296 goto late_failure;
297
298 if (!tp->write_seq)
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 np->daddr.s6_addr32,
301 inet->sport,
302 inet->dport);
303
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
312 __sk_dst_reset(sk);
313failure:
314 inet->dport = 0;
315 sk->sk_route_caps = 0;
316 return err;
317}
318
319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
04ce6909 320 int type, int code, int offset, __be32 info)
1da177e4
LT
321{
322 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
324 struct ipv6_pinfo *np;
325 struct sock *sk;
326 int err;
1ab1457c 327 struct tcp_sock *tp;
1da177e4 328 __u32 seq;
ca12a1a4 329 struct net *net = dev_net(skb->dev);
1da177e4 330
ca12a1a4 331 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 332 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
333
334 if (sk == NULL) {
e41b5368
DL
335 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
336 ICMP6_MIB_INERRORS);
1da177e4
LT
337 return;
338 }
339
340 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 341 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
342 return;
343 }
344
345 bh_lock_sock(sk);
346 if (sock_owned_by_user(sk))
de0744af 347 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
348
349 if (sk->sk_state == TCP_CLOSE)
350 goto out;
351
352 tp = tcp_sk(sk);
1ab1457c 353 seq = ntohl(th->seq);
1da177e4
LT
354 if (sk->sk_state != TCP_LISTEN &&
355 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 356 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
357 goto out;
358 }
359
360 np = inet6_sk(sk);
361
362 if (type == ICMPV6_PKT_TOOBIG) {
363 struct dst_entry *dst = NULL;
364
365 if (sock_owned_by_user(sk))
366 goto out;
367 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
368 goto out;
369
370 /* icmp should have updated the destination cache entry */
371 dst = __sk_dst_check(sk, np->dst_cookie);
372
373 if (dst == NULL) {
374 struct inet_sock *inet = inet_sk(sk);
375 struct flowi fl;
376
377 /* BUGGG_FUTURE: Again, it is not clear how
378 to handle rthdr case. Ignore this complexity
379 for now.
380 */
381 memset(&fl, 0, sizeof(fl));
382 fl.proto = IPPROTO_TCP;
383 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
384 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
385 fl.oif = sk->sk_bound_dev_if;
386 fl.fl_ip_dport = inet->dport;
387 fl.fl_ip_sport = inet->sport;
beb8d13b 388 security_skb_classify_flow(skb, &fl);
1da177e4
LT
389
390 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
391 sk->sk_err_soft = -err;
392 goto out;
393 }
394
52479b62 395 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
1da177e4
LT
396 sk->sk_err_soft = -err;
397 goto out;
398 }
399
400 } else
401 dst_hold(dst);
402
d83d8461 403 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
404 tcp_sync_mss(sk, dst_mtu(dst));
405 tcp_simple_retransmit(sk);
406 } /* else let the usual retransmit timer handle it */
407 dst_release(dst);
408 goto out;
409 }
410
411 icmpv6_err_convert(type, code, &err);
412
60236fdd 413 /* Might be for an request_sock */
1da177e4 414 switch (sk->sk_state) {
60236fdd 415 struct request_sock *req, **prev;
1da177e4
LT
416 case TCP_LISTEN:
417 if (sock_owned_by_user(sk))
418 goto out;
419
8129765a
ACM
420 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
421 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
422 if (!req)
423 goto out;
424
425 /* ICMPs are not backlogged, hence we cannot get
426 * an established socket here.
427 */
547b792c 428 WARN_ON(req->sk != NULL);
1da177e4 429
2e6599cb 430 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 431 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
432 goto out;
433 }
434
463c84b9 435 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
436 goto out;
437
438 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 440 It can, it SYNs are crossed. --ANK */
1da177e4 441 if (!sock_owned_by_user(sk)) {
1da177e4
LT
442 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
444
445 tcp_done(sk);
446 } else
447 sk->sk_err_soft = err;
448 goto out;
449 }
450
451 if (!sock_owned_by_user(sk) && np->recverr) {
452 sk->sk_err = err;
453 sk->sk_error_report(sk);
454 } else
455 sk->sk_err_soft = err;
456
457out:
458 bh_unlock_sock(sk);
459 sock_put(sk);
460}
461
462
fd80eb94 463static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
1da177e4 464{
ca304b61 465 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
466 struct ipv6_pinfo *np = inet6_sk(sk);
467 struct sk_buff * skb;
468 struct ipv6_txoptions *opt = NULL;
469 struct in6_addr * final_p = NULL, final;
470 struct flowi fl;
fd80eb94 471 struct dst_entry *dst;
1da177e4
LT
472 int err = -1;
473
474 memset(&fl, 0, sizeof(fl));
475 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
476 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
477 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 478 fl.fl6_flowlabel = 0;
2e6599cb
ACM
479 fl.oif = treq->iif;
480 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 481 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 482 security_req_classify_flow(req, &fl);
1da177e4 483
fd80eb94
DL
484 opt = np->opt;
485 if (opt && opt->srcrt) {
486 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
487 ipv6_addr_copy(&final, &fl.fl6_dst);
488 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
489 final_p = &final;
1da177e4
LT
490 }
491
fd80eb94
DL
492 err = ip6_dst_lookup(sk, &dst, &fl);
493 if (err)
494 goto done;
495 if (final_p)
496 ipv6_addr_copy(&fl.fl6_dst, final_p);
52479b62 497 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
fd80eb94
DL
498 goto done;
499
1da177e4
LT
500 skb = tcp_make_synack(sk, dst, req);
501 if (skb) {
aa8223c7 502 struct tcphdr *th = tcp_hdr(skb);
1da177e4 503
684f2176 504 th->check = tcp_v6_check(skb->len,
2e6599cb 505 &treq->loc_addr, &treq->rmt_addr,
07f0757a 506 csum_partial(th, skb->len, skb->csum));
1da177e4 507
2e6599cb 508 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4 509 err = ip6_xmit(sk, skb, &fl, opt, 0);
b9df3cb8 510 err = net_xmit_eval(err);
1da177e4
LT
511 }
512
513done:
1ab1457c 514 if (opt && opt != np->opt)
1da177e4 515 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 516 dst_release(dst);
1da177e4
LT
517 return err;
518}
519
c6aefafb
GG
520static inline void syn_flood_warning(struct sk_buff *skb)
521{
522#ifdef CONFIG_SYN_COOKIES
523 if (sysctl_tcp_syncookies)
524 printk(KERN_INFO
525 "TCPv6: Possible SYN flooding on port %d. "
526 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
527 else
528#endif
529 printk(KERN_INFO
530 "TCPv6: Possible SYN flooding on port %d. "
531 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
532}
533
60236fdd 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 535{
ca304b61
ACM
536 if (inet6_rsk(req)->pktopts)
537 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
538}
539
cfb6eeb4
YH
540#ifdef CONFIG_TCP_MD5SIG
541static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
542 struct in6_addr *addr)
543{
544 struct tcp_sock *tp = tcp_sk(sk);
545 int i;
546
547 BUG_ON(tp == NULL);
548
549 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
550 return NULL;
551
552 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 553 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 554 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
555 }
556 return NULL;
557}
558
559static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
560 struct sock *addr_sk)
561{
562 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
563}
564
565static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
566 struct request_sock *req)
567{
568 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
569}
570
571static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
572 char *newkey, u8 newkeylen)
573{
574 /* Add key to the list */
b0a713e9 575 struct tcp_md5sig_key *key;
cfb6eeb4
YH
576 struct tcp_sock *tp = tcp_sk(sk);
577 struct tcp6_md5sig_key *keys;
578
b0a713e9 579 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
580 if (key) {
581 /* modify existing entry - just update that one */
b0a713e9
MD
582 kfree(key->key);
583 key->key = newkey;
584 key->keylen = newkeylen;
cfb6eeb4
YH
585 } else {
586 /* reallocate new list if current one is full. */
587 if (!tp->md5sig_info) {
588 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
589 if (!tp->md5sig_info) {
590 kfree(newkey);
591 return -ENOMEM;
592 }
3d7dbeac 593 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 594 }
aacbe8c8
YH
595 if (tcp_alloc_md5sig_pool() == NULL) {
596 kfree(newkey);
597 return -ENOMEM;
598 }
cfb6eeb4
YH
599 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
600 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
602
603 if (!keys) {
604 tcp_free_md5sig_pool();
605 kfree(newkey);
606 return -ENOMEM;
607 }
608
609 if (tp->md5sig_info->entries6)
610 memmove(keys, tp->md5sig_info->keys6,
611 (sizeof (tp->md5sig_info->keys6[0]) *
612 tp->md5sig_info->entries6));
613
614 kfree(tp->md5sig_info->keys6);
615 tp->md5sig_info->keys6 = keys;
616 tp->md5sig_info->alloced6++;
617 }
618
619 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
620 peer);
f8ab18d2
DM
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
622 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
623
624 tp->md5sig_info->entries6++;
625 }
626 return 0;
627}
628
629static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
630 u8 *newkey, __u8 newkeylen)
631{
632 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
633 newkey, newkeylen);
634}
635
636static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
637{
638 struct tcp_sock *tp = tcp_sk(sk);
639 int i;
640
641 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 642 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 643 /* Free the key */
f8ab18d2 644 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
645 tp->md5sig_info->entries6--;
646
647 if (tp->md5sig_info->entries6 == 0) {
648 kfree(tp->md5sig_info->keys6);
649 tp->md5sig_info->keys6 = NULL;
ca983cef 650 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
651 } else {
652 /* shrink the database */
653 if (tp->md5sig_info->entries6 != i)
654 memmove(&tp->md5sig_info->keys6[i],
655 &tp->md5sig_info->keys6[i+1],
656 (tp->md5sig_info->entries6 - i)
657 * sizeof (tp->md5sig_info->keys6[0]));
658 }
77adefdc
YH
659 tcp_free_md5sig_pool();
660 return 0;
cfb6eeb4
YH
661 }
662 }
663 return -ENOENT;
664}
665
666static void tcp_v6_clear_md5_list (struct sock *sk)
667{
668 struct tcp_sock *tp = tcp_sk(sk);
669 int i;
670
671 if (tp->md5sig_info->entries6) {
672 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 673 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
674 tp->md5sig_info->entries6 = 0;
675 tcp_free_md5sig_pool();
676 }
677
678 kfree(tp->md5sig_info->keys6);
679 tp->md5sig_info->keys6 = NULL;
680 tp->md5sig_info->alloced6 = 0;
681
682 if (tp->md5sig_info->entries4) {
683 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 684 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
685 tp->md5sig_info->entries4 = 0;
686 tcp_free_md5sig_pool();
687 }
688
689 kfree(tp->md5sig_info->keys4);
690 tp->md5sig_info->keys4 = NULL;
691 tp->md5sig_info->alloced4 = 0;
692}
693
694static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
695 int optlen)
696{
697 struct tcp_md5sig cmd;
698 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
699 u8 *newkey;
700
701 if (optlen < sizeof(cmd))
702 return -EINVAL;
703
704 if (copy_from_user(&cmd, optval, sizeof(cmd)))
705 return -EFAULT;
706
707 if (sin6->sin6_family != AF_INET6)
708 return -EINVAL;
709
710 if (!cmd.tcpm_keylen) {
711 if (!tcp_sk(sk)->md5sig_info)
712 return -ENOENT;
e773e4fa 713 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
714 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
715 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
716 }
717
718 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
719 return -EINVAL;
720
721 if (!tcp_sk(sk)->md5sig_info) {
722 struct tcp_sock *tp = tcp_sk(sk);
723 struct tcp_md5sig_info *p;
724
725 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
726 if (!p)
727 return -ENOMEM;
728
729 tp->md5sig_info = p;
3d7dbeac 730 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
731 }
732
af879cc7 733 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
734 if (!newkey)
735 return -ENOMEM;
e773e4fa 736 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
737 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
738 newkey, cmd.tcpm_keylen);
739 }
740 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
741}
742
49a72dfb
AL
743static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
744 struct in6_addr *daddr,
745 struct in6_addr *saddr, int nbytes)
cfb6eeb4 746{
cfb6eeb4 747 struct tcp6_pseudohdr *bp;
49a72dfb 748 struct scatterlist sg;
8d26d76d 749
cfb6eeb4 750 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
751 /* 1. TCP pseudo-header (RFC2460) */
752 ipv6_addr_copy(&bp->saddr, saddr);
753 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 754 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 755 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 756
49a72dfb
AL
757 sg_init_one(&sg, bp, sizeof(*bp));
758 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
759}
c7da57a1 760
49a72dfb
AL
761static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
762 struct in6_addr *daddr, struct in6_addr *saddr,
763 struct tcphdr *th)
764{
765 struct tcp_md5sig_pool *hp;
766 struct hash_desc *desc;
767
768 hp = tcp_get_md5sig_pool();
769 if (!hp)
770 goto clear_hash_noput;
771 desc = &hp->md5_desc;
772
773 if (crypto_hash_init(desc))
774 goto clear_hash;
775 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
776 goto clear_hash;
777 if (tcp_md5_hash_header(hp, th))
778 goto clear_hash;
779 if (tcp_md5_hash_key(hp, key))
780 goto clear_hash;
781 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 782 goto clear_hash;
cfb6eeb4 783
cfb6eeb4 784 tcp_put_md5sig_pool();
cfb6eeb4 785 return 0;
49a72dfb 786
cfb6eeb4
YH
787clear_hash:
788 tcp_put_md5sig_pool();
789clear_hash_noput:
790 memset(md5_hash, 0, 16);
49a72dfb 791 return 1;
cfb6eeb4
YH
792}
793
49a72dfb
AL
794static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
795 struct sock *sk, struct request_sock *req,
796 struct sk_buff *skb)
cfb6eeb4
YH
797{
798 struct in6_addr *saddr, *daddr;
49a72dfb
AL
799 struct tcp_md5sig_pool *hp;
800 struct hash_desc *desc;
801 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
802
803 if (sk) {
804 saddr = &inet6_sk(sk)->saddr;
805 daddr = &inet6_sk(sk)->daddr;
49a72dfb 806 } else if (req) {
cfb6eeb4
YH
807 saddr = &inet6_rsk(req)->loc_addr;
808 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
809 } else {
810 struct ipv6hdr *ip6h = ipv6_hdr(skb);
811 saddr = &ip6h->saddr;
812 daddr = &ip6h->daddr;
cfb6eeb4 813 }
49a72dfb
AL
814
815 hp = tcp_get_md5sig_pool();
816 if (!hp)
817 goto clear_hash_noput;
818 desc = &hp->md5_desc;
819
820 if (crypto_hash_init(desc))
821 goto clear_hash;
822
823 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
824 goto clear_hash;
825 if (tcp_md5_hash_header(hp, th))
826 goto clear_hash;
827 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
828 goto clear_hash;
829 if (tcp_md5_hash_key(hp, key))
830 goto clear_hash;
831 if (crypto_hash_final(desc, md5_hash))
832 goto clear_hash;
833
834 tcp_put_md5sig_pool();
835 return 0;
836
837clear_hash:
838 tcp_put_md5sig_pool();
839clear_hash_noput:
840 memset(md5_hash, 0, 16);
841 return 1;
cfb6eeb4
YH
842}
843
844static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
845{
846 __u8 *hash_location = NULL;
847 struct tcp_md5sig_key *hash_expected;
0660e03f 848 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 849 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 850 int genhash;
cfb6eeb4
YH
851 u8 newhash[16];
852
853 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 854 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 855
785957d3
DM
856 /* We've parsed the options - do we have a hash? */
857 if (!hash_expected && !hash_location)
858 return 0;
859
860 if (hash_expected && !hash_location) {
861 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
862 return 1;
863 }
864
785957d3
DM
865 if (!hash_expected && hash_location) {
866 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
867 return 1;
868 }
869
870 /* check the signature */
49a72dfb
AL
871 genhash = tcp_v6_md5_hash_skb(newhash,
872 hash_expected,
873 NULL, NULL, skb);
874
cfb6eeb4
YH
875 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
876 if (net_ratelimit()) {
5b095d98 877 printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n",
cfb6eeb4 878 genhash ? "failed" : "mismatch",
0c6ce78a
HH
879 &ip6h->saddr, ntohs(th->source),
880 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
881 }
882 return 1;
883 }
884 return 0;
885}
886#endif
887
c6aefafb 888struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 889 .family = AF_INET6,
2e6599cb 890 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 891 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
892 .send_ack = tcp_v6_reqsk_send_ack,
893 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
894 .send_reset = tcp_v6_send_reset
895};
896
cfb6eeb4 897#ifdef CONFIG_TCP_MD5SIG
b6332e6c 898static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 899 .md5_lookup = tcp_v6_reqsk_md5_lookup,
cfb6eeb4 900};
b6332e6c 901#endif
cfb6eeb4 902
6d6ee43e
ACM
903static struct timewait_sock_ops tcp6_timewait_sock_ops = {
904 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
905 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 906 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
907};
908
8292a17a 909static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
910{
911 struct ipv6_pinfo *np = inet6_sk(sk);
aa8223c7 912 struct tcphdr *th = tcp_hdr(skb);
1da177e4 913
84fa7933 914 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4 915 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
663ead3b 916 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 917 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 918 } else {
1ab1457c 919 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
07f0757a 920 csum_partial(th, th->doff<<2,
1da177e4
LT
921 skb->csum));
922 }
923}
924
a430a43d
HX
925static int tcp_v6_gso_send_check(struct sk_buff *skb)
926{
927 struct ipv6hdr *ipv6h;
928 struct tcphdr *th;
929
930 if (!pskb_may_pull(skb, sizeof(*th)))
931 return -EINVAL;
932
0660e03f 933 ipv6h = ipv6_hdr(skb);
aa8223c7 934 th = tcp_hdr(skb);
a430a43d
HX
935
936 th->check = 0;
937 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
938 IPPROTO_TCP, 0);
663ead3b 939 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 940 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 941 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
942 return 0;
943}
1da177e4 944
684f2176
HX
945struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
946{
947 struct ipv6hdr *iph = ipv6_hdr(skb);
948
949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE:
86911732 951 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break;
955 }
956
957 /* fall through */
958 case CHECKSUM_NONE:
959 NAPI_GRO_CB(skb)->flush = 1;
960 return NULL;
961 }
962
963 return tcp_gro_receive(head, skb);
964}
965EXPORT_SYMBOL(tcp6_gro_receive);
966
967int tcp6_gro_complete(struct sk_buff *skb)
968{
969 struct ipv6hdr *iph = ipv6_hdr(skb);
970 struct tcphdr *th = tcp_hdr(skb);
971
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
975
976 return tcp_gro_complete(skb);
977}
978EXPORT_SYMBOL(tcp6_gro_complete);
979
626e264d
IJ
980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
981 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 982{
aa8223c7 983 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
984 struct sk_buff *buff;
985 struct flowi fl;
c346dca1 986 struct net *net = dev_net(skb->dst->dev);
e5047992 987 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 988 unsigned int tot_len = sizeof(struct tcphdr);
81ada62d 989 __be32 *topt;
1da177e4 990
626e264d
IJ
991 if (ts)
992 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 993#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
994 if (key)
995 tot_len += TCPOLEN_MD5SIG_ALIGNED;
996#endif
997
cfb6eeb4 998 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 999 GFP_ATOMIC);
1ab1457c
YH
1000 if (buff == NULL)
1001 return;
1da177e4 1002
cfb6eeb4 1003 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1004
cfb6eeb4 1005 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1da177e4
LT
1006
1007 /* Swap the send and the receive. */
1008 memset(t1, 0, sizeof(*t1));
1009 t1->dest = th->source;
1010 t1->source = th->dest;
cfb6eeb4 1011 t1->doff = tot_len / 4;
626e264d
IJ
1012 t1->seq = htonl(seq);
1013 t1->ack_seq = htonl(ack);
1014 t1->ack = !rst || !th->ack;
1015 t1->rst = rst;
1016 t1->window = htons(win);
1da177e4 1017
81ada62d
IJ
1018 topt = (__be32 *)(t1 + 1);
1019
626e264d
IJ
1020 if (ts) {
1021 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1022 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1023 *topt++ = htonl(tcp_time_stamp);
1024 *topt++ = htonl(ts);
1025 }
1026
cfb6eeb4
YH
1027#ifdef CONFIG_TCP_MD5SIG
1028 if (key) {
81ada62d
IJ
1029 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1030 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1031 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1032 &ipv6_hdr(skb)->saddr,
1033 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1034 }
1035#endif
1036
07f0757a 1037 buff->csum = csum_partial(t1, tot_len, 0);
1da177e4
LT
1038
1039 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1040 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1041 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
1042
1043 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
52cd5750 1044 tot_len, IPPROTO_TCP,
1da177e4
LT
1045 buff->csum);
1046
1047 fl.proto = IPPROTO_TCP;
505cbfc5 1048 fl.oif = inet6_iif(skb);
1da177e4
LT
1049 fl.fl_ip_dport = t1->dest;
1050 fl.fl_ip_sport = t1->source;
beb8d13b 1051 security_skb_classify_flow(skb, &fl);
1da177e4 1052
c20121ae
DL
1053 /* Pass a socket to ip6_dst_lookup either it is for RST
1054 * Underlying function will use this to retrieve the network
1055 * namespace
1056 */
e5047992 1057 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
52479b62 1058 if (xfrm_lookup(net, &buff->dst, &fl, NULL, 0) >= 0) {
e5047992 1059 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
63231bdd 1060 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
626e264d
IJ
1061 if (rst)
1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4 1063 return;
ecc51b6d 1064 }
1da177e4
LT
1065 }
1066
1067 kfree_skb(buff);
1068}
1069
626e264d 1070static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1071{
626e264d
IJ
1072 struct tcphdr *th = tcp_hdr(skb);
1073 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1074 struct tcp_md5sig_key *key = NULL;
1da177e4 1075
626e264d 1076 if (th->rst)
1da177e4
LT
1077 return;
1078
626e264d
IJ
1079 if (!ipv6_unicast_destination(skb))
1080 return;
1da177e4 1081
cfb6eeb4 1082#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1083 if (sk)
1084 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1085#endif
1086
626e264d
IJ
1087 if (th->ack)
1088 seq = ntohl(th->ack_seq);
1089 else
1090 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1091 (th->doff << 2);
1da177e4 1092
626e264d
IJ
1093 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1094}
1da177e4 1095
626e264d
IJ
1096static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1097 struct tcp_md5sig_key *key)
1098{
1099 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1100}
1101
1102static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1103{
8feaf0c0 1104 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1105 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1106
9501f972 1107 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1108 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1109 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1110
8feaf0c0 1111 inet_twsk_put(tw);
1da177e4
LT
1112}
1113
6edafaaf
GJ
1114static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1115 struct request_sock *req)
1da177e4 1116{
9501f972 1117 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1118 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1119}
1120
1121
1122static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1123{
60236fdd 1124 struct request_sock *req, **prev;
aa8223c7 1125 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1126 struct sock *nsk;
1127
1128 /* Find possible connection requests. */
8129765a 1129 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1130 &ipv6_hdr(skb)->saddr,
1131 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1132 if (req)
1133 return tcp_check_req(sk, skb, req, prev);
1134
3b1e0a65 1135 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1136 &ipv6_hdr(skb)->saddr, th->source,
1137 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1138
1139 if (nsk) {
1140 if (nsk->sk_state != TCP_TIME_WAIT) {
1141 bh_lock_sock(nsk);
1142 return nsk;
1143 }
9469c7b4 1144 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1145 return NULL;
1146 }
1147
c6aefafb 1148#ifdef CONFIG_SYN_COOKIES
1da177e4 1149 if (!th->rst && !th->syn && th->ack)
c6aefafb 1150 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1151#endif
1152 return sk;
1153}
1154
1da177e4
LT
1155/* FIXME: this is substantially similar to the ipv4 code.
1156 * Can some kind of merge be done? -- erics
1157 */
1158static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1159{
ca304b61 1160 struct inet6_request_sock *treq;
1da177e4
LT
1161 struct ipv6_pinfo *np = inet6_sk(sk);
1162 struct tcp_options_received tmp_opt;
1163 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 1164 struct request_sock *req = NULL;
1da177e4 1165 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1166#ifdef CONFIG_SYN_COOKIES
1167 int want_cookie = 0;
1168#else
1169#define want_cookie 0
1170#endif
1da177e4
LT
1171
1172 if (skb->protocol == htons(ETH_P_IP))
1173 return tcp_v4_conn_request(sk, skb);
1174
1175 if (!ipv6_unicast_destination(skb))
1ab1457c 1176 goto drop;
1da177e4 1177
463c84b9 1178 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1179 if (net_ratelimit())
c6aefafb
GG
1180 syn_flood_warning(skb);
1181#ifdef CONFIG_SYN_COOKIES
1182 if (sysctl_tcp_syncookies)
1183 want_cookie = 1;
1184 else
1185#endif
1ab1457c 1186 goto drop;
1da177e4
LT
1187 }
1188
463c84b9 1189 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1190 goto drop;
1191
ca304b61 1192 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1193 if (req == NULL)
1194 goto drop;
1195
cfb6eeb4
YH
1196#ifdef CONFIG_TCP_MD5SIG
1197 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1198#endif
1199
1da177e4
LT
1200 tcp_clear_options(&tmp_opt);
1201 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1202 tmp_opt.user_mss = tp->rx_opt.user_mss;
1203
1204 tcp_parse_options(skb, &tmp_opt, 0);
1205
4dfc2817 1206 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1207 tcp_clear_options(&tmp_opt);
c6aefafb 1208
1da177e4
LT
1209 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1210 tcp_openreq_init(req, &tmp_opt, skb);
1211
ca304b61 1212 treq = inet6_rsk(req);
0660e03f
ACM
1213 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1214 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1215 if (!want_cookie)
1216 TCP_ECN_create_request(req, tcp_hdr(skb));
1217
1218 if (want_cookie) {
1219 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1220 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1221 } else if (!isn) {
1222 if (ipv6_opt_accepted(sk, skb) ||
1223 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1224 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1225 atomic_inc(&skb->users);
1226 treq->pktopts = skb;
1227 }
1228 treq->iif = sk->sk_bound_dev_if;
1da177e4 1229
c6aefafb
GG
1230 /* So that link locals have meaning */
1231 if (!sk->sk_bound_dev_if &&
1232 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1233 treq->iif = inet6_iif(skb);
1da177e4 1234
a94f723d 1235 isn = tcp_v6_init_sequence(skb);
c6aefafb 1236 }
1da177e4 1237
2e6599cb 1238 tcp_rsk(req)->snt_isn = isn;
1da177e4 1239
4237c75c
VY
1240 security_inet_conn_request(sk, skb, req);
1241
fd80eb94 1242 if (tcp_v6_send_synack(sk, req))
1da177e4
LT
1243 goto drop;
1244
c6aefafb
GG
1245 if (!want_cookie) {
1246 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1247 return 0;
1248 }
1da177e4
LT
1249
1250drop:
1251 if (req)
60236fdd 1252 reqsk_free(req);
1da177e4 1253
1da177e4
LT
1254 return 0; /* don't send reset */
1255}
1256
1257static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1258 struct request_sock *req,
1da177e4
LT
1259 struct dst_entry *dst)
1260{
78d15e82 1261 struct inet6_request_sock *treq;
1da177e4
LT
1262 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1263 struct tcp6_sock *newtcp6sk;
1264 struct inet_sock *newinet;
1265 struct tcp_sock *newtp;
1266 struct sock *newsk;
1267 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1268#ifdef CONFIG_TCP_MD5SIG
1269 struct tcp_md5sig_key *key;
1270#endif
1da177e4
LT
1271
1272 if (skb->protocol == htons(ETH_P_IP)) {
1273 /*
1274 * v6 mapped
1275 */
1276
1277 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1278
1ab1457c 1279 if (newsk == NULL)
1da177e4
LT
1280 return NULL;
1281
1282 newtcp6sk = (struct tcp6_sock *)newsk;
1283 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1284
1285 newinet = inet_sk(newsk);
1286 newnp = inet6_sk(newsk);
1287 newtp = tcp_sk(newsk);
1288
1289 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1290
1291 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1292 newinet->daddr);
1293
1294 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1295 newinet->saddr);
1296
1297 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1298
8292a17a 1299 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1300 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1301#ifdef CONFIG_TCP_MD5SIG
1302 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1303#endif
1304
1da177e4
LT
1305 newnp->pktoptions = NULL;
1306 newnp->opt = NULL;
505cbfc5 1307 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1308 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1309
e6848976
ACM
1310 /*
1311 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1312 * here, tcp_create_openreq_child now does this for us, see the comment in
1313 * that function for the gory details. -acme
1da177e4 1314 */
1da177e4
LT
1315
1316 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1317 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1318 Sync it now.
1319 */
d83d8461 1320 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1321
1322 return newsk;
1323 }
1324
78d15e82 1325 treq = inet6_rsk(req);
1da177e4
LT
1326 opt = np->opt;
1327
1328 if (sk_acceptq_is_full(sk))
1329 goto out_overflow;
1330
1da177e4
LT
1331 if (dst == NULL) {
1332 struct in6_addr *final_p = NULL, final;
1333 struct flowi fl;
1334
1335 memset(&fl, 0, sizeof(fl));
1336 fl.proto = IPPROTO_TCP;
2e6599cb 1337 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1338 if (opt && opt->srcrt) {
1339 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1340 ipv6_addr_copy(&final, &fl.fl6_dst);
1341 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1342 final_p = &final;
1343 }
2e6599cb 1344 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1345 fl.oif = sk->sk_bound_dev_if;
2e6599cb 1346 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 1347 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 1348 security_req_classify_flow(req, &fl);
1da177e4
LT
1349
1350 if (ip6_dst_lookup(sk, &dst, &fl))
1351 goto out;
1352
1353 if (final_p)
1354 ipv6_addr_copy(&fl.fl6_dst, final_p);
1355
52479b62 1356 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1da177e4 1357 goto out;
1ab1457c 1358 }
1da177e4
LT
1359
1360 newsk = tcp_create_openreq_child(sk, req, skb);
1361 if (newsk == NULL)
1362 goto out;
1363
e6848976
ACM
1364 /*
1365 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1366 * count here, tcp_create_openreq_child now does this for us, see the
1367 * comment in that function for the gory details. -acme
1368 */
1da177e4 1369
59eed279 1370 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1371 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1372
1373 newtcp6sk = (struct tcp6_sock *)newsk;
1374 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1375
1376 newtp = tcp_sk(newsk);
1377 newinet = inet_sk(newsk);
1378 newnp = inet6_sk(newsk);
1379
1380 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1381
2e6599cb
ACM
1382 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1383 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1384 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1385 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1386
1ab1457c 1387 /* Now IPv6 options...
1da177e4
LT
1388
1389 First: no IPv4 options.
1390 */
1391 newinet->opt = NULL;
d35690be 1392 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1393
1394 /* Clone RX bits */
1395 newnp->rxopt.all = np->rxopt.all;
1396
1397 /* Clone pktoptions received with SYN */
1398 newnp->pktoptions = NULL;
2e6599cb
ACM
1399 if (treq->pktopts != NULL) {
1400 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1401 kfree_skb(treq->pktopts);
1402 treq->pktopts = NULL;
1da177e4
LT
1403 if (newnp->pktoptions)
1404 skb_set_owner_r(newnp->pktoptions, newsk);
1405 }
1406 newnp->opt = NULL;
505cbfc5 1407 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1408 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1409
1410 /* Clone native IPv6 options from listening socket (if any)
1411
1412 Yes, keeping reference count would be much more clever,
1413 but we make one more one thing there: reattach optmem
1414 to newsk.
1415 */
1416 if (opt) {
1417 newnp->opt = ipv6_dup_options(newsk, opt);
1418 if (opt != np->opt)
1419 sock_kfree_s(sk, opt, opt->tot_len);
1420 }
1421
d83d8461 1422 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1423 if (newnp->opt)
d83d8461
ACM
1424 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1425 newnp->opt->opt_flen);
1da177e4 1426
5d424d5a 1427 tcp_mtup_init(newsk);
1da177e4
LT
1428 tcp_sync_mss(newsk, dst_mtu(dst));
1429 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1430 tcp_initialize_rcv_mss(newsk);
1431
1432 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1433
cfb6eeb4
YH
1434#ifdef CONFIG_TCP_MD5SIG
1435 /* Copy over the MD5 key from the original socket */
1436 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1437 /* We're using one, so create a matching key
1438 * on the newsk structure. If we fail to get
1439 * memory, then we end up not copying the key
1440 * across. Shucks.
1441 */
af879cc7
ACM
1442 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1443 if (newkey != NULL)
cfb6eeb4
YH
1444 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1445 newkey, key->keylen);
cfb6eeb4
YH
1446 }
1447#endif
1448
ab1e0a13 1449 __inet6_hash(newsk);
e56d8b8a 1450 __inet_inherit_port(sk, newsk);
1da177e4
LT
1451
1452 return newsk;
1453
1454out_overflow:
de0744af 1455 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1456out:
de0744af 1457 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1458 if (opt && opt != np->opt)
1459 sock_kfree_s(sk, opt, opt->tot_len);
1460 dst_release(dst);
1461 return NULL;
1462}
1463
b51655b9 1464static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1465{
84fa7933 1466 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1467 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1468 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1470 return 0;
fb286bb2 1471 }
1da177e4 1472 }
fb286bb2 1473
684f2176 1474 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1475 &ipv6_hdr(skb)->saddr,
1476 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1477
1da177e4 1478 if (skb->len <= 76) {
fb286bb2 1479 return __skb_checksum_complete(skb);
1da177e4
LT
1480 }
1481 return 0;
1482}
1483
1484/* The socket must have it's spinlock held when we get
1485 * here.
1486 *
1487 * We have a potential double-lock case here, so even when
1488 * doing backlog processing we use the BH locking scheme.
1489 * This is because we cannot sleep with the original spinlock
1490 * held.
1491 */
1492static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1493{
1494 struct ipv6_pinfo *np = inet6_sk(sk);
1495 struct tcp_sock *tp;
1496 struct sk_buff *opt_skb = NULL;
1497
1498 /* Imagine: socket is IPv6. IPv4 packet arrives,
1499 goes to IPv4 receive handler and backlogged.
1500 From backlog it always goes here. Kerboom...
1501 Fortunately, tcp_rcv_established and rcv_established
1502 handle them correctly, but it is not case with
1503 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1504 */
1505
1506 if (skb->protocol == htons(ETH_P_IP))
1507 return tcp_v4_do_rcv(sk, skb);
1508
cfb6eeb4
YH
1509#ifdef CONFIG_TCP_MD5SIG
1510 if (tcp_v6_inbound_md5_hash (sk, skb))
1511 goto discard;
1512#endif
1513
fda9ef5d 1514 if (sk_filter(sk, skb))
1da177e4
LT
1515 goto discard;
1516
1517 /*
1518 * socket locking is here for SMP purposes as backlog rcv
1519 * is currently called with bh processing disabled.
1520 */
1521
1522 /* Do Stevens' IPV6_PKTOPTIONS.
1523
1524 Yes, guys, it is the only place in our code, where we
1525 may make it not affecting IPv4.
1526 The rest of code is protocol independent,
1527 and I do not like idea to uglify IPv4.
1528
1529 Actually, all the idea behind IPV6_PKTOPTIONS
1530 looks not very well thought. For now we latch
1531 options, received in the last packet, enqueued
1532 by tcp. Feel free to propose better solution.
1ab1457c 1533 --ANK (980728)
1da177e4
LT
1534 */
1535 if (np->rxopt.all)
1536 opt_skb = skb_clone(skb, GFP_ATOMIC);
1537
1538 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1539 TCP_CHECK_TIMER(sk);
aa8223c7 1540 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1541 goto reset;
1542 TCP_CHECK_TIMER(sk);
1543 if (opt_skb)
1544 goto ipv6_pktoptions;
1545 return 0;
1546 }
1547
ab6a5bb6 1548 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1549 goto csum_err;
1550
1ab1457c 1551 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1552 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1553 if (!nsk)
1554 goto discard;
1555
1556 /*
1557 * Queue it on the new socket if the new socket is active,
1558 * otherwise we just shortcircuit this and continue with
1559 * the new socket..
1560 */
1ab1457c 1561 if(nsk != sk) {
1da177e4
LT
1562 if (tcp_child_process(sk, nsk, skb))
1563 goto reset;
1564 if (opt_skb)
1565 __kfree_skb(opt_skb);
1566 return 0;
1567 }
1568 }
1569
1570 TCP_CHECK_TIMER(sk);
aa8223c7 1571 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1572 goto reset;
1573 TCP_CHECK_TIMER(sk);
1574 if (opt_skb)
1575 goto ipv6_pktoptions;
1576 return 0;
1577
1578reset:
cfb6eeb4 1579 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1580discard:
1581 if (opt_skb)
1582 __kfree_skb(opt_skb);
1583 kfree_skb(skb);
1584 return 0;
1585csum_err:
63231bdd 1586 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1587 goto discard;
1588
1589
1590ipv6_pktoptions:
1591 /* Do you ask, what is it?
1592
1593 1. skb was enqueued by tcp.
1594 2. skb is added to tail of read queue, rather than out of order.
1595 3. socket is not in passive state.
1596 4. Finally, it really contains options, which user wants to receive.
1597 */
1598 tp = tcp_sk(sk);
1599 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1600 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1601 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1602 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1603 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1604 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1605 if (ipv6_opt_accepted(sk, opt_skb)) {
1606 skb_set_owner_r(opt_skb, sk);
1607 opt_skb = xchg(&np->pktoptions, opt_skb);
1608 } else {
1609 __kfree_skb(opt_skb);
1610 opt_skb = xchg(&np->pktoptions, NULL);
1611 }
1612 }
1613
1614 if (opt_skb)
1615 kfree_skb(opt_skb);
1616 return 0;
1617}
1618
e5bbef20 1619static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1620{
1ab1457c 1621 struct tcphdr *th;
1da177e4
LT
1622 struct sock *sk;
1623 int ret;
a86b1e30 1624 struct net *net = dev_net(skb->dev);
1da177e4
LT
1625
1626 if (skb->pkt_type != PACKET_HOST)
1627 goto discard_it;
1628
1629 /*
1630 * Count it even if it's bad.
1631 */
63231bdd 1632 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1633
1634 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1635 goto discard_it;
1636
aa8223c7 1637 th = tcp_hdr(skb);
1da177e4
LT
1638
1639 if (th->doff < sizeof(struct tcphdr)/4)
1640 goto bad_packet;
1641 if (!pskb_may_pull(skb, th->doff*4))
1642 goto discard_it;
1643
60476372 1644 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1645 goto bad_packet;
1646
aa8223c7 1647 th = tcp_hdr(skb);
1da177e4
LT
1648 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1649 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1650 skb->len - th->doff*4);
1651 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1652 TCP_SKB_CB(skb)->when = 0;
0660e03f 1653 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1654 TCP_SKB_CB(skb)->sacked = 0;
1655
9a1f27c4 1656 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1657 if (!sk)
1658 goto no_tcp_socket;
1659
1660process:
1661 if (sk->sk_state == TCP_TIME_WAIT)
1662 goto do_time_wait;
1663
1664 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1665 goto discard_and_relse;
1666
fda9ef5d 1667 if (sk_filter(sk, skb))
1da177e4
LT
1668 goto discard_and_relse;
1669
1670 skb->dev = NULL;
1671
293b9c42 1672 bh_lock_sock_nested(sk);
1da177e4
LT
1673 ret = 0;
1674 if (!sock_owned_by_user(sk)) {
1a2449a8 1675#ifdef CONFIG_NET_DMA
1ab1457c 1676 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1677 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1678 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1679 if (tp->ucopy.dma_chan)
1680 ret = tcp_v6_do_rcv(sk, skb);
1681 else
1a2449a8
CL
1682#endif
1683 {
1684 if (!tcp_prequeue(sk, skb))
1685 ret = tcp_v6_do_rcv(sk, skb);
1686 }
1da177e4
LT
1687 } else
1688 sk_add_backlog(sk, skb);
1689 bh_unlock_sock(sk);
1690
1691 sock_put(sk);
1692 return ret ? -1 : 0;
1693
1694no_tcp_socket:
1695 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1696 goto discard_it;
1697
1698 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1699bad_packet:
63231bdd 1700 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1701 } else {
cfb6eeb4 1702 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1703 }
1704
1705discard_it:
1706
1707 /*
1708 * Discard frame
1709 */
1710
1711 kfree_skb(skb);
1712 return 0;
1713
1714discard_and_relse:
1715 sock_put(sk);
1716 goto discard_it;
1717
1718do_time_wait:
1719 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1720 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1721 goto discard_it;
1722 }
1723
1724 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1725 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1726 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1727 goto discard_it;
1728 }
1729
9469c7b4 1730 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1731 case TCP_TW_SYN:
1732 {
1733 struct sock *sk2;
1734
c346dca1 1735 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1736 &ipv6_hdr(skb)->daddr,
505cbfc5 1737 ntohs(th->dest), inet6_iif(skb));
1da177e4 1738 if (sk2 != NULL) {
295ff7ed
ACM
1739 struct inet_timewait_sock *tw = inet_twsk(sk);
1740 inet_twsk_deschedule(tw, &tcp_death_row);
1741 inet_twsk_put(tw);
1da177e4
LT
1742 sk = sk2;
1743 goto process;
1744 }
1745 /* Fall through to ACK */
1746 }
1747 case TCP_TW_ACK:
1748 tcp_v6_timewait_ack(sk, skb);
1749 break;
1750 case TCP_TW_RST:
1751 goto no_tcp_socket;
1752 case TCP_TW_SUCCESS:;
1753 }
1754 goto discard_it;
1755}
1756
1da177e4
LT
1757static int tcp_v6_remember_stamp(struct sock *sk)
1758{
1759 /* Alas, not yet... */
1760 return 0;
1761}
1762
8292a17a 1763static struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1764 .queue_xmit = inet6_csk_xmit,
1765 .send_check = tcp_v6_send_check,
1766 .rebuild_header = inet6_sk_rebuild_header,
1767 .conn_request = tcp_v6_conn_request,
1768 .syn_recv_sock = tcp_v6_syn_recv_sock,
1769 .remember_stamp = tcp_v6_remember_stamp,
1770 .net_header_len = sizeof(struct ipv6hdr),
1771 .setsockopt = ipv6_setsockopt,
1772 .getsockopt = ipv6_getsockopt,
1773 .addr2sockaddr = inet6_csk_addr2sockaddr,
1774 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1775 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1776#ifdef CONFIG_COMPAT
543d9cfe
ACM
1777 .compat_setsockopt = compat_ipv6_setsockopt,
1778 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1779#endif
1da177e4
LT
1780};
1781
cfb6eeb4 1782#ifdef CONFIG_TCP_MD5SIG
a928630a 1783static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1784 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1785 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1786 .md5_add = tcp_v6_md5_add_func,
1787 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1788};
a928630a 1789#endif
cfb6eeb4 1790
1da177e4
LT
1791/*
1792 * TCP over IPv4 via INET6 API
1793 */
1794
8292a17a 1795static struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1796 .queue_xmit = ip_queue_xmit,
1797 .send_check = tcp_v4_send_check,
1798 .rebuild_header = inet_sk_rebuild_header,
1799 .conn_request = tcp_v6_conn_request,
1800 .syn_recv_sock = tcp_v6_syn_recv_sock,
1801 .remember_stamp = tcp_v4_remember_stamp,
1802 .net_header_len = sizeof(struct iphdr),
1803 .setsockopt = ipv6_setsockopt,
1804 .getsockopt = ipv6_getsockopt,
1805 .addr2sockaddr = inet6_csk_addr2sockaddr,
1806 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1807 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1808#ifdef CONFIG_COMPAT
543d9cfe
ACM
1809 .compat_setsockopt = compat_ipv6_setsockopt,
1810 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1811#endif
1da177e4
LT
1812};
1813
cfb6eeb4 1814#ifdef CONFIG_TCP_MD5SIG
a928630a 1815static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1816 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1817 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1818 .md5_add = tcp_v6_md5_add_func,
1819 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1820};
a928630a 1821#endif
cfb6eeb4 1822
1da177e4
LT
1823/* NOTE: A lot of things set to zero explicitly by call to
1824 * sk_alloc() so need not be done here.
1825 */
1826static int tcp_v6_init_sock(struct sock *sk)
1827{
6687e988 1828 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1829 struct tcp_sock *tp = tcp_sk(sk);
1830
1831 skb_queue_head_init(&tp->out_of_order_queue);
1832 tcp_init_xmit_timers(sk);
1833 tcp_prequeue_init(tp);
1834
6687e988 1835 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1836 tp->mdev = TCP_TIMEOUT_INIT;
1837
1838 /* So many TCP implementations out there (incorrectly) count the
1839 * initial SYN frame in their delayed-ACK and congestion control
1840 * algorithms that we must have the following bandaid to talk
1841 * efficiently to them. -DaveM
1842 */
1843 tp->snd_cwnd = 2;
1844
1845 /* See draft-stevens-tcpca-spec-01 for discussion of the
1846 * initialization of these values.
1847 */
1848 tp->snd_ssthresh = 0x7fffffff;
1849 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1850 tp->mss_cache = 536;
1da177e4
LT
1851
1852 tp->reordering = sysctl_tcp_reordering;
1853
1854 sk->sk_state = TCP_CLOSE;
1855
8292a17a 1856 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1857 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1858 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1859 sk->sk_write_space = sk_stream_write_space;
1860 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1861
cfb6eeb4
YH
1862#ifdef CONFIG_TCP_MD5SIG
1863 tp->af_specific = &tcp_sock_ipv6_specific;
1864#endif
1865
1da177e4
LT
1866 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1867 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1868
eb4dea58 1869 local_bh_disable();
1748376b 1870 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1871 local_bh_enable();
1da177e4
LT
1872
1873 return 0;
1874}
1875
7d06b2e0 1876static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1877{
cfb6eeb4
YH
1878#ifdef CONFIG_TCP_MD5SIG
1879 /* Clean up the MD5 key list */
1880 if (tcp_sk(sk)->md5sig_info)
1881 tcp_v6_clear_md5_list(sk);
1882#endif
1da177e4 1883 tcp_v4_destroy_sock(sk);
7d06b2e0 1884 inet6_destroy_sock(sk);
1da177e4
LT
1885}
1886
952a10be 1887#ifdef CONFIG_PROC_FS
1da177e4 1888/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1889static void get_openreq6(struct seq_file *seq,
60236fdd 1890 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1891{
1da177e4 1892 int ttd = req->expires - jiffies;
ca304b61
ACM
1893 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1894 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1895
1896 if (ttd < 0)
1897 ttd = 0;
1898
1da177e4
LT
1899 seq_printf(seq,
1900 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1901 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1902 i,
1903 src->s6_addr32[0], src->s6_addr32[1],
1904 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1905 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1906 dest->s6_addr32[0], dest->s6_addr32[1],
1907 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1908 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1909 TCP_SYN_RECV,
1910 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1911 1, /* timers active (only the expire timer) */
1912 jiffies_to_clock_t(ttd),
1da177e4
LT
1913 req->retrans,
1914 uid,
1ab1457c 1915 0, /* non standard timer */
1da177e4
LT
1916 0, /* open_requests have no inode */
1917 0, req);
1918}
1919
1920static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1921{
1922 struct in6_addr *dest, *src;
1923 __u16 destp, srcp;
1924 int timer_active;
1925 unsigned long timer_expires;
1926 struct inet_sock *inet = inet_sk(sp);
1927 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1928 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1929 struct ipv6_pinfo *np = inet6_sk(sp);
1930
1931 dest = &np->daddr;
1932 src = &np->rcv_saddr;
1933 destp = ntohs(inet->dport);
1934 srcp = ntohs(inet->sport);
463c84b9
ACM
1935
1936 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1937 timer_active = 1;
463c84b9
ACM
1938 timer_expires = icsk->icsk_timeout;
1939 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1940 timer_active = 4;
463c84b9 1941 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1942 } else if (timer_pending(&sp->sk_timer)) {
1943 timer_active = 2;
1944 timer_expires = sp->sk_timer.expires;
1945 } else {
1946 timer_active = 0;
1947 timer_expires = jiffies;
1948 }
1949
1950 seq_printf(seq,
1951 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 1952 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
1953 i,
1954 src->s6_addr32[0], src->s6_addr32[1],
1955 src->s6_addr32[2], src->s6_addr32[3], srcp,
1956 dest->s6_addr32[0], dest->s6_addr32[1],
1957 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1958 sp->sk_state,
47da8ee6
SS
1959 tp->write_seq-tp->snd_una,
1960 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1961 timer_active,
1962 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1963 icsk->icsk_retransmits,
1da177e4 1964 sock_i_uid(sp),
6687e988 1965 icsk->icsk_probes_out,
1da177e4
LT
1966 sock_i_ino(sp),
1967 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1968 jiffies_to_clock_t(icsk->icsk_rto),
1969 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 1970 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1971 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1972 );
1973}
1974
1ab1457c 1975static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1976 struct inet_timewait_sock *tw, int i)
1da177e4
LT
1977{
1978 struct in6_addr *dest, *src;
1979 __u16 destp, srcp;
0fa1a53e 1980 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1981 int ttd = tw->tw_ttd - jiffies;
1982
1983 if (ttd < 0)
1984 ttd = 0;
1985
0fa1a53e
ACM
1986 dest = &tw6->tw_v6_daddr;
1987 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1988 destp = ntohs(tw->tw_dport);
1989 srcp = ntohs(tw->tw_sport);
1990
1991 seq_printf(seq,
1992 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1993 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1994 i,
1995 src->s6_addr32[0], src->s6_addr32[1],
1996 src->s6_addr32[2], src->s6_addr32[3], srcp,
1997 dest->s6_addr32[0], dest->s6_addr32[1],
1998 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1999 tw->tw_substate, 0, 0,
2000 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2001 atomic_read(&tw->tw_refcnt), tw);
2002}
2003
1da177e4
LT
2004static int tcp6_seq_show(struct seq_file *seq, void *v)
2005{
2006 struct tcp_iter_state *st;
2007
2008 if (v == SEQ_START_TOKEN) {
2009 seq_puts(seq,
2010 " sl "
2011 "local_address "
2012 "remote_address "
2013 "st tx_queue rx_queue tr tm->when retrnsmt"
2014 " uid timeout inode\n");
2015 goto out;
2016 }
2017 st = seq->private;
2018
2019 switch (st->state) {
2020 case TCP_SEQ_STATE_LISTENING:
2021 case TCP_SEQ_STATE_ESTABLISHED:
2022 get_tcp6_sock(seq, v, st->num);
2023 break;
2024 case TCP_SEQ_STATE_OPENREQ:
2025 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2026 break;
2027 case TCP_SEQ_STATE_TIME_WAIT:
2028 get_timewait6_sock(seq, v, st->num);
2029 break;
2030 }
2031out:
2032 return 0;
2033}
2034
1da177e4 2035static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2036 .name = "tcp6",
2037 .family = AF_INET6,
5f4472c5
DL
2038 .seq_fops = {
2039 .owner = THIS_MODULE,
2040 },
9427c4b3
DL
2041 .seq_ops = {
2042 .show = tcp6_seq_show,
2043 },
1da177e4
LT
2044};
2045
6f8b13bc 2046int tcp6_proc_init(struct net *net)
1da177e4 2047{
6f8b13bc 2048 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2049}
2050
6f8b13bc 2051void tcp6_proc_exit(struct net *net)
1da177e4 2052{
6f8b13bc 2053 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2054}
2055#endif
2056
2057struct proto tcpv6_prot = {
2058 .name = "TCPv6",
2059 .owner = THIS_MODULE,
2060 .close = tcp_close,
2061 .connect = tcp_v6_connect,
2062 .disconnect = tcp_disconnect,
463c84b9 2063 .accept = inet_csk_accept,
1da177e4
LT
2064 .ioctl = tcp_ioctl,
2065 .init = tcp_v6_init_sock,
2066 .destroy = tcp_v6_destroy_sock,
2067 .shutdown = tcp_shutdown,
2068 .setsockopt = tcp_setsockopt,
2069 .getsockopt = tcp_getsockopt,
1da177e4
LT
2070 .recvmsg = tcp_recvmsg,
2071 .backlog_rcv = tcp_v6_do_rcv,
2072 .hash = tcp_v6_hash,
ab1e0a13
ACM
2073 .unhash = inet_unhash,
2074 .get_port = inet_csk_get_port,
1da177e4
LT
2075 .enter_memory_pressure = tcp_enter_memory_pressure,
2076 .sockets_allocated = &tcp_sockets_allocated,
2077 .memory_allocated = &tcp_memory_allocated,
2078 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2079 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2080 .sysctl_mem = sysctl_tcp_mem,
2081 .sysctl_wmem = sysctl_tcp_wmem,
2082 .sysctl_rmem = sysctl_tcp_rmem,
2083 .max_header = MAX_TCP_HEADER,
2084 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2085 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2086 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2087 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2088 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2089#ifdef CONFIG_COMPAT
2090 .compat_setsockopt = compat_tcp_setsockopt,
2091 .compat_getsockopt = compat_tcp_getsockopt,
2092#endif
1da177e4
LT
2093};
2094
2095static struct inet6_protocol tcpv6_protocol = {
2096 .handler = tcp_v6_rcv,
2097 .err_handler = tcp_v6_err,
a430a43d 2098 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2099 .gso_segment = tcp_tso_segment,
684f2176
HX
2100 .gro_receive = tcp6_gro_receive,
2101 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2102 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2103};
2104
1da177e4
LT
2105static struct inet_protosw tcpv6_protosw = {
2106 .type = SOCK_STREAM,
2107 .protocol = IPPROTO_TCP,
2108 .prot = &tcpv6_prot,
2109 .ops = &inet6_stream_ops,
2110 .capability = -1,
2111 .no_check = 0,
d83d8461
ACM
2112 .flags = INET_PROTOSW_PERMANENT |
2113 INET_PROTOSW_ICSK,
1da177e4
LT
2114};
2115
93ec926b
DL
2116static int tcpv6_net_init(struct net *net)
2117{
5677242f
DL
2118 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2119 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2120}
2121
2122static void tcpv6_net_exit(struct net *net)
2123{
5677242f 2124 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
d315492b 2125 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2126}
2127
2128static struct pernet_operations tcpv6_net_ops = {
2129 .init = tcpv6_net_init,
2130 .exit = tcpv6_net_exit,
2131};
2132
7f4e4868 2133int __init tcpv6_init(void)
1da177e4 2134{
7f4e4868
DL
2135 int ret;
2136
2137 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2138 if (ret)
2139 goto out;
2140
1da177e4 2141 /* register inet6 protocol */
7f4e4868
DL
2142 ret = inet6_register_protosw(&tcpv6_protosw);
2143 if (ret)
2144 goto out_tcpv6_protocol;
2145
93ec926b 2146 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2147 if (ret)
2148 goto out_tcpv6_protosw;
2149out:
2150 return ret;
ae0f7d5f 2151
7f4e4868
DL
2152out_tcpv6_protocol:
2153 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2154out_tcpv6_protosw:
2155 inet6_unregister_protosw(&tcpv6_protosw);
2156 goto out;
2157}
2158
09f7709f 2159void tcpv6_exit(void)
7f4e4868 2160{
93ec926b 2161 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2162 inet6_unregister_protosw(&tcpv6_protosw);
2163 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2164}