]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/tcp_ipv6.c
net: constify struct net_protocol
[net-next-2.6.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41
42#include <linux/ipv6.h>
43#include <linux/icmpv6.h>
44#include <linux/random.h>
45
46#include <net/tcp.h>
47#include <net/ndisc.h>
5324a040 48#include <net/inet6_hashtables.h>
8129765a 49#include <net/inet6_connection_sock.h>
1da177e4
LT
50#include <net/ipv6.h>
51#include <net/transp_v6.h>
52#include <net/addrconf.h>
53#include <net/ip6_route.h>
54#include <net/ip6_checksum.h>
55#include <net/inet_ecn.h>
56#include <net/protocol.h>
57#include <net/xfrm.h>
1da177e4
LT
58#include <net/snmp.h>
59#include <net/dsfield.h>
6d6ee43e 60#include <net/timewait_sock.h>
18134bed 61#include <net/netdma.h>
3d58b5fa 62#include <net/inet_common.h>
1da177e4
LT
63
64#include <asm/uaccess.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cfb6eeb4
YH
69#include <linux/crypto.h>
70#include <linux/scatterlist.h>
71
cfb6eeb4 72static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
73static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
83#else
84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
85 struct in6_addr *addr)
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
1da177e4
LT
91static void tcp_v6_hash(struct sock *sk)
92{
93 if (sk->sk_state != TCP_CLOSE) {
8292a17a 94 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
95 tcp_prot.hash(sk);
96 return;
97 }
98 local_bh_disable();
ab1e0a13 99 __inet6_hash(sk);
1da177e4
LT
100 local_bh_enable();
101 }
102}
103
684f2176 104static __inline__ __sum16 tcp_v6_check(int len,
1ab1457c
YH
105 struct in6_addr *saddr,
106 struct in6_addr *daddr,
868c86bc 107 __wsum base)
1da177e4
LT
108{
109 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
110}
111
a94f723d 112static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 113{
0660e03f
ACM
114 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
116 tcp_hdr(skb)->dest,
117 tcp_hdr(skb)->source);
1da177e4
LT
118}
119
1ab1457c 120static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
121 int addr_len)
122{
123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 124 struct inet_sock *inet = inet_sk(sk);
d83d8461 125 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
126 struct ipv6_pinfo *np = inet6_sk(sk);
127 struct tcp_sock *tp = tcp_sk(sk);
128 struct in6_addr *saddr = NULL, *final_p = NULL, final;
129 struct flowi fl;
130 struct dst_entry *dst;
131 int addr_type;
132 int err;
133
1ab1457c 134 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
135 return -EINVAL;
136
1ab1457c 137 if (usin->sin6_family != AF_INET6)
1da177e4
LT
138 return(-EAFNOSUPPORT);
139
140 memset(&fl, 0, sizeof(fl));
141
142 if (np->sndflow) {
143 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl.fl6_flowlabel);
145 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
146 struct ip6_flowlabel *flowlabel;
147 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
148 if (flowlabel == NULL)
149 return -EINVAL;
150 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
151 fl6_sock_release(flowlabel);
152 }
153 }
154
155 /*
1ab1457c
YH
156 * connect() to INADDR_ANY means loopback (BSD'ism).
157 */
158
159 if(ipv6_addr_any(&usin->sin6_addr))
160 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
161
162 addr_type = ipv6_addr_type(&usin->sin6_addr);
163
164 if(addr_type & IPV6_ADDR_MULTICAST)
165 return -ENETUNREACH;
166
167 if (addr_type&IPV6_ADDR_LINKLOCAL) {
168 if (addr_len >= sizeof(struct sockaddr_in6) &&
169 usin->sin6_scope_id) {
170 /* If interface is set while binding, indices
171 * must coincide.
172 */
173 if (sk->sk_bound_dev_if &&
174 sk->sk_bound_dev_if != usin->sin6_scope_id)
175 return -EINVAL;
176
177 sk->sk_bound_dev_if = usin->sin6_scope_id;
178 }
179
180 /* Connect to link-local address requires an interface */
181 if (!sk->sk_bound_dev_if)
182 return -EINVAL;
183 }
184
185 if (tp->rx_opt.ts_recent_stamp &&
186 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
187 tp->rx_opt.ts_recent = 0;
188 tp->rx_opt.ts_recent_stamp = 0;
189 tp->write_seq = 0;
190 }
191
192 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
193 np->flow_label = fl.fl6_flowlabel;
194
195 /*
196 * TCP over IPv4
197 */
198
199 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 200 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
201 struct sockaddr_in sin;
202
203 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
204
205 if (__ipv6_only_sock(sk))
206 return -ENETUNREACH;
207
208 sin.sin_family = AF_INET;
209 sin.sin_port = usin->sin6_port;
210 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
211
d83d8461 212 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 213 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
214#ifdef CONFIG_TCP_MD5SIG
215 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
216#endif
1da177e4
LT
217
218 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
219
220 if (err) {
d83d8461
ACM
221 icsk->icsk_ext_hdr_len = exthdrlen;
222 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 223 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
224#ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_specific;
226#endif
1da177e4
LT
227 goto failure;
228 } else {
229 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
230 inet->saddr);
231 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
232 inet->rcv_saddr);
233 }
234
235 return err;
236 }
237
238 if (!ipv6_addr_any(&np->rcv_saddr))
239 saddr = &np->rcv_saddr;
240
241 fl.proto = IPPROTO_TCP;
242 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
243 ipv6_addr_copy(&fl.fl6_src,
244 (saddr ? saddr : &np->saddr));
245 fl.oif = sk->sk_bound_dev_if;
246 fl.fl_ip_dport = usin->sin6_port;
247 fl.fl_ip_sport = inet->sport;
248
249 if (np->opt && np->opt->srcrt) {
250 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
251 ipv6_addr_copy(&final, &fl.fl6_dst);
252 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
253 final_p = &final;
254 }
255
beb8d13b
VY
256 security_sk_classify_flow(sk, &fl);
257
1da177e4
LT
258 err = ip6_dst_lookup(sk, &dst, &fl);
259 if (err)
260 goto failure;
261 if (final_p)
262 ipv6_addr_copy(&fl.fl6_dst, final_p);
263
52479b62
AD
264 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
265 if (err < 0) {
14e50e57
DM
266 if (err == -EREMOTE)
267 err = ip6_dst_blackhole(sk, &dst, &fl);
268 if (err < 0)
269 goto failure;
270 }
1da177e4
LT
271
272 if (saddr == NULL) {
273 saddr = &fl.fl6_src;
274 ipv6_addr_copy(&np->rcv_saddr, saddr);
275 }
276
277 /* set the source address */
278 ipv6_addr_copy(&np->saddr, saddr);
279 inet->rcv_saddr = LOOPBACK4_IPV6;
280
f83ef8c0 281 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 282 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 283
d83d8461 284 icsk->icsk_ext_hdr_len = 0;
1da177e4 285 if (np->opt)
d83d8461
ACM
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 np->opt->opt_nflen);
1da177e4
LT
288
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290
291 inet->dport = usin->sin6_port;
292
293 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 294 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
295 if (err)
296 goto late_failure;
297
298 if (!tp->write_seq)
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 np->daddr.s6_addr32,
301 inet->sport,
302 inet->dport);
303
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
312 __sk_dst_reset(sk);
313failure:
314 inet->dport = 0;
315 sk->sk_route_caps = 0;
316 return err;
317}
318
319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 320 u8 type, u8 code, int offset, __be32 info)
1da177e4
LT
321{
322 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
324 struct ipv6_pinfo *np;
325 struct sock *sk;
326 int err;
1ab1457c 327 struct tcp_sock *tp;
1da177e4 328 __u32 seq;
ca12a1a4 329 struct net *net = dev_net(skb->dev);
1da177e4 330
ca12a1a4 331 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 332 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
333
334 if (sk == NULL) {
e41b5368
DL
335 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
336 ICMP6_MIB_INERRORS);
1da177e4
LT
337 return;
338 }
339
340 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 341 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
342 return;
343 }
344
345 bh_lock_sock(sk);
346 if (sock_owned_by_user(sk))
de0744af 347 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
348
349 if (sk->sk_state == TCP_CLOSE)
350 goto out;
351
352 tp = tcp_sk(sk);
1ab1457c 353 seq = ntohl(th->seq);
1da177e4
LT
354 if (sk->sk_state != TCP_LISTEN &&
355 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 356 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
357 goto out;
358 }
359
360 np = inet6_sk(sk);
361
362 if (type == ICMPV6_PKT_TOOBIG) {
363 struct dst_entry *dst = NULL;
364
365 if (sock_owned_by_user(sk))
366 goto out;
367 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
368 goto out;
369
370 /* icmp should have updated the destination cache entry */
371 dst = __sk_dst_check(sk, np->dst_cookie);
372
373 if (dst == NULL) {
374 struct inet_sock *inet = inet_sk(sk);
375 struct flowi fl;
376
377 /* BUGGG_FUTURE: Again, it is not clear how
378 to handle rthdr case. Ignore this complexity
379 for now.
380 */
381 memset(&fl, 0, sizeof(fl));
382 fl.proto = IPPROTO_TCP;
383 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
384 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
385 fl.oif = sk->sk_bound_dev_if;
386 fl.fl_ip_dport = inet->dport;
387 fl.fl_ip_sport = inet->sport;
beb8d13b 388 security_skb_classify_flow(skb, &fl);
1da177e4
LT
389
390 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
391 sk->sk_err_soft = -err;
392 goto out;
393 }
394
52479b62 395 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
1da177e4
LT
396 sk->sk_err_soft = -err;
397 goto out;
398 }
399
400 } else
401 dst_hold(dst);
402
d83d8461 403 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
404 tcp_sync_mss(sk, dst_mtu(dst));
405 tcp_simple_retransmit(sk);
406 } /* else let the usual retransmit timer handle it */
407 dst_release(dst);
408 goto out;
409 }
410
411 icmpv6_err_convert(type, code, &err);
412
60236fdd 413 /* Might be for an request_sock */
1da177e4 414 switch (sk->sk_state) {
60236fdd 415 struct request_sock *req, **prev;
1da177e4
LT
416 case TCP_LISTEN:
417 if (sock_owned_by_user(sk))
418 goto out;
419
8129765a
ACM
420 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
421 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
422 if (!req)
423 goto out;
424
425 /* ICMPs are not backlogged, hence we cannot get
426 * an established socket here.
427 */
547b792c 428 WARN_ON(req->sk != NULL);
1da177e4 429
2e6599cb 430 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 431 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
432 goto out;
433 }
434
463c84b9 435 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
436 goto out;
437
438 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 440 It can, it SYNs are crossed. --ANK */
1da177e4 441 if (!sock_owned_by_user(sk)) {
1da177e4
LT
442 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
444
445 tcp_done(sk);
446 } else
447 sk->sk_err_soft = err;
448 goto out;
449 }
450
451 if (!sock_owned_by_user(sk) && np->recverr) {
452 sk->sk_err = err;
453 sk->sk_error_report(sk);
454 } else
455 sk->sk_err_soft = err;
456
457out:
458 bh_unlock_sock(sk);
459 sock_put(sk);
460}
461
462
fd80eb94 463static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
1da177e4 464{
ca304b61 465 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
466 struct ipv6_pinfo *np = inet6_sk(sk);
467 struct sk_buff * skb;
468 struct ipv6_txoptions *opt = NULL;
469 struct in6_addr * final_p = NULL, final;
470 struct flowi fl;
fd80eb94 471 struct dst_entry *dst;
1da177e4
LT
472 int err = -1;
473
474 memset(&fl, 0, sizeof(fl));
475 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
476 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
477 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 478 fl.fl6_flowlabel = 0;
2e6599cb
ACM
479 fl.oif = treq->iif;
480 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 481 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 482 security_req_classify_flow(req, &fl);
1da177e4 483
fd80eb94
DL
484 opt = np->opt;
485 if (opt && opt->srcrt) {
486 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
487 ipv6_addr_copy(&final, &fl.fl6_dst);
488 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
489 final_p = &final;
1da177e4
LT
490 }
491
fd80eb94
DL
492 err = ip6_dst_lookup(sk, &dst, &fl);
493 if (err)
494 goto done;
495 if (final_p)
496 ipv6_addr_copy(&fl.fl6_dst, final_p);
52479b62 497 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
fd80eb94
DL
498 goto done;
499
1da177e4
LT
500 skb = tcp_make_synack(sk, dst, req);
501 if (skb) {
aa8223c7 502 struct tcphdr *th = tcp_hdr(skb);
1da177e4 503
684f2176 504 th->check = tcp_v6_check(skb->len,
2e6599cb 505 &treq->loc_addr, &treq->rmt_addr,
07f0757a 506 csum_partial(th, skb->len, skb->csum));
1da177e4 507
2e6599cb 508 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4 509 err = ip6_xmit(sk, skb, &fl, opt, 0);
b9df3cb8 510 err = net_xmit_eval(err);
1da177e4
LT
511 }
512
513done:
1ab1457c 514 if (opt && opt != np->opt)
1da177e4 515 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 516 dst_release(dst);
1da177e4
LT
517 return err;
518}
519
c6aefafb
GG
520static inline void syn_flood_warning(struct sk_buff *skb)
521{
522#ifdef CONFIG_SYN_COOKIES
523 if (sysctl_tcp_syncookies)
524 printk(KERN_INFO
525 "TCPv6: Possible SYN flooding on port %d. "
526 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
527 else
528#endif
529 printk(KERN_INFO
530 "TCPv6: Possible SYN flooding on port %d. "
531 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
532}
533
60236fdd 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 535{
800d55f1 536 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
537}
538
cfb6eeb4
YH
539#ifdef CONFIG_TCP_MD5SIG
540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 struct in6_addr *addr)
542{
543 struct tcp_sock *tp = tcp_sk(sk);
544 int i;
545
546 BUG_ON(tp == NULL);
547
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
550
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 553 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
554 }
555 return NULL;
556}
557
558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
559 struct sock *addr_sk)
560{
561 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
562}
563
564static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
565 struct request_sock *req)
566{
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
568}
569
570static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
571 char *newkey, u8 newkeylen)
572{
573 /* Add key to the list */
b0a713e9 574 struct tcp_md5sig_key *key;
cfb6eeb4
YH
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
577
b0a713e9 578 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
579 if (key) {
580 /* modify existing entry - just update that one */
b0a713e9
MD
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
cfb6eeb4
YH
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
3d7dbeac 592 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 593 }
aa133076 594 if (tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
595 kfree(newkey);
596 return -ENOMEM;
597 }
cfb6eeb4
YH
598 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
599 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
601
602 if (!keys) {
603 tcp_free_md5sig_pool();
604 kfree(newkey);
605 return -ENOMEM;
606 }
607
608 if (tp->md5sig_info->entries6)
609 memmove(keys, tp->md5sig_info->keys6,
610 (sizeof (tp->md5sig_info->keys6[0]) *
611 tp->md5sig_info->entries6));
612
613 kfree(tp->md5sig_info->keys6);
614 tp->md5sig_info->keys6 = keys;
615 tp->md5sig_info->alloced6++;
616 }
617
618 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
619 peer);
f8ab18d2
DM
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
622
623 tp->md5sig_info->entries6++;
624 }
625 return 0;
626}
627
628static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
629 u8 *newkey, __u8 newkeylen)
630{
631 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
632 newkey, newkeylen);
633}
634
635static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
636{
637 struct tcp_sock *tp = tcp_sk(sk);
638 int i;
639
640 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 641 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 642 /* Free the key */
f8ab18d2 643 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
644 tp->md5sig_info->entries6--;
645
646 if (tp->md5sig_info->entries6 == 0) {
647 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL;
ca983cef 649 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
650 } else {
651 /* shrink the database */
652 if (tp->md5sig_info->entries6 != i)
653 memmove(&tp->md5sig_info->keys6[i],
654 &tp->md5sig_info->keys6[i+1],
655 (tp->md5sig_info->entries6 - i)
656 * sizeof (tp->md5sig_info->keys6[0]));
657 }
77adefdc
YH
658 tcp_free_md5sig_pool();
659 return 0;
cfb6eeb4
YH
660 }
661 }
662 return -ENOENT;
663}
664
665static void tcp_v6_clear_md5_list (struct sock *sk)
666{
667 struct tcp_sock *tp = tcp_sk(sk);
668 int i;
669
670 if (tp->md5sig_info->entries6) {
671 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 672 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
673 tp->md5sig_info->entries6 = 0;
674 tcp_free_md5sig_pool();
675 }
676
677 kfree(tp->md5sig_info->keys6);
678 tp->md5sig_info->keys6 = NULL;
679 tp->md5sig_info->alloced6 = 0;
680
681 if (tp->md5sig_info->entries4) {
682 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 683 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
684 tp->md5sig_info->entries4 = 0;
685 tcp_free_md5sig_pool();
686 }
687
688 kfree(tp->md5sig_info->keys4);
689 tp->md5sig_info->keys4 = NULL;
690 tp->md5sig_info->alloced4 = 0;
691}
692
693static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
694 int optlen)
695{
696 struct tcp_md5sig cmd;
697 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
698 u8 *newkey;
699
700 if (optlen < sizeof(cmd))
701 return -EINVAL;
702
703 if (copy_from_user(&cmd, optval, sizeof(cmd)))
704 return -EFAULT;
705
706 if (sin6->sin6_family != AF_INET6)
707 return -EINVAL;
708
709 if (!cmd.tcpm_keylen) {
710 if (!tcp_sk(sk)->md5sig_info)
711 return -ENOENT;
e773e4fa 712 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
713 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
714 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
715 }
716
717 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
718 return -EINVAL;
719
720 if (!tcp_sk(sk)->md5sig_info) {
721 struct tcp_sock *tp = tcp_sk(sk);
722 struct tcp_md5sig_info *p;
723
724 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
725 if (!p)
726 return -ENOMEM;
727
728 tp->md5sig_info = p;
3d7dbeac 729 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
730 }
731
af879cc7 732 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
733 if (!newkey)
734 return -ENOMEM;
e773e4fa 735 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
736 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
737 newkey, cmd.tcpm_keylen);
738 }
739 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
740}
741
49a72dfb
AL
742static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
743 struct in6_addr *daddr,
744 struct in6_addr *saddr, int nbytes)
cfb6eeb4 745{
cfb6eeb4 746 struct tcp6_pseudohdr *bp;
49a72dfb 747 struct scatterlist sg;
8d26d76d 748
cfb6eeb4 749 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
750 /* 1. TCP pseudo-header (RFC2460) */
751 ipv6_addr_copy(&bp->saddr, saddr);
752 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 753 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 754 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 755
49a72dfb
AL
756 sg_init_one(&sg, bp, sizeof(*bp));
757 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
758}
c7da57a1 759
49a72dfb
AL
760static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
761 struct in6_addr *daddr, struct in6_addr *saddr,
762 struct tcphdr *th)
763{
764 struct tcp_md5sig_pool *hp;
765 struct hash_desc *desc;
766
767 hp = tcp_get_md5sig_pool();
768 if (!hp)
769 goto clear_hash_noput;
770 desc = &hp->md5_desc;
771
772 if (crypto_hash_init(desc))
773 goto clear_hash;
774 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
775 goto clear_hash;
776 if (tcp_md5_hash_header(hp, th))
777 goto clear_hash;
778 if (tcp_md5_hash_key(hp, key))
779 goto clear_hash;
780 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 781 goto clear_hash;
cfb6eeb4 782
cfb6eeb4 783 tcp_put_md5sig_pool();
cfb6eeb4 784 return 0;
49a72dfb 785
cfb6eeb4
YH
786clear_hash:
787 tcp_put_md5sig_pool();
788clear_hash_noput:
789 memset(md5_hash, 0, 16);
49a72dfb 790 return 1;
cfb6eeb4
YH
791}
792
49a72dfb
AL
793static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
794 struct sock *sk, struct request_sock *req,
795 struct sk_buff *skb)
cfb6eeb4
YH
796{
797 struct in6_addr *saddr, *daddr;
49a72dfb
AL
798 struct tcp_md5sig_pool *hp;
799 struct hash_desc *desc;
800 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
801
802 if (sk) {
803 saddr = &inet6_sk(sk)->saddr;
804 daddr = &inet6_sk(sk)->daddr;
49a72dfb 805 } else if (req) {
cfb6eeb4
YH
806 saddr = &inet6_rsk(req)->loc_addr;
807 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
808 } else {
809 struct ipv6hdr *ip6h = ipv6_hdr(skb);
810 saddr = &ip6h->saddr;
811 daddr = &ip6h->daddr;
cfb6eeb4 812 }
49a72dfb
AL
813
814 hp = tcp_get_md5sig_pool();
815 if (!hp)
816 goto clear_hash_noput;
817 desc = &hp->md5_desc;
818
819 if (crypto_hash_init(desc))
820 goto clear_hash;
821
822 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
823 goto clear_hash;
824 if (tcp_md5_hash_header(hp, th))
825 goto clear_hash;
826 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
827 goto clear_hash;
828 if (tcp_md5_hash_key(hp, key))
829 goto clear_hash;
830 if (crypto_hash_final(desc, md5_hash))
831 goto clear_hash;
832
833 tcp_put_md5sig_pool();
834 return 0;
835
836clear_hash:
837 tcp_put_md5sig_pool();
838clear_hash_noput:
839 memset(md5_hash, 0, 16);
840 return 1;
cfb6eeb4
YH
841}
842
843static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
844{
845 __u8 *hash_location = NULL;
846 struct tcp_md5sig_key *hash_expected;
0660e03f 847 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 848 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 849 int genhash;
cfb6eeb4
YH
850 u8 newhash[16];
851
852 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 853 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 854
785957d3
DM
855 /* We've parsed the options - do we have a hash? */
856 if (!hash_expected && !hash_location)
857 return 0;
858
859 if (hash_expected && !hash_location) {
860 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
861 return 1;
862 }
863
785957d3
DM
864 if (!hash_expected && hash_location) {
865 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
866 return 1;
867 }
868
869 /* check the signature */
49a72dfb
AL
870 genhash = tcp_v6_md5_hash_skb(newhash,
871 hash_expected,
872 NULL, NULL, skb);
873
cfb6eeb4
YH
874 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
875 if (net_ratelimit()) {
5b095d98 876 printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n",
cfb6eeb4 877 genhash ? "failed" : "mismatch",
0c6ce78a
HH
878 &ip6h->saddr, ntohs(th->source),
879 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
880 }
881 return 1;
882 }
883 return 0;
884}
885#endif
886
c6aefafb 887struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 888 .family = AF_INET6,
2e6599cb 889 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 890 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
891 .send_ack = tcp_v6_reqsk_send_ack,
892 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
893 .send_reset = tcp_v6_send_reset
894};
895
cfb6eeb4 896#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 897static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 898 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 899 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 900};
b6332e6c 901#endif
cfb6eeb4 902
6d6ee43e
ACM
903static struct timewait_sock_ops tcp6_timewait_sock_ops = {
904 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
905 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 906 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
907};
908
8292a17a 909static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
910{
911 struct ipv6_pinfo *np = inet6_sk(sk);
aa8223c7 912 struct tcphdr *th = tcp_hdr(skb);
1da177e4 913
84fa7933 914 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4 915 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
663ead3b 916 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 917 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 918 } else {
1ab1457c 919 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
07f0757a 920 csum_partial(th, th->doff<<2,
1da177e4
LT
921 skb->csum));
922 }
923}
924
a430a43d
HX
925static int tcp_v6_gso_send_check(struct sk_buff *skb)
926{
927 struct ipv6hdr *ipv6h;
928 struct tcphdr *th;
929
930 if (!pskb_may_pull(skb, sizeof(*th)))
931 return -EINVAL;
932
0660e03f 933 ipv6h = ipv6_hdr(skb);
aa8223c7 934 th = tcp_hdr(skb);
a430a43d
HX
935
936 th->check = 0;
937 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
938 IPPROTO_TCP, 0);
663ead3b 939 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 940 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 941 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
942 return 0;
943}
1da177e4 944
36990673
HX
945static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
946 struct sk_buff *skb)
684f2176 947{
36e7b1b8 948 struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
949
950 switch (skb->ip_summed) {
951 case CHECKSUM_COMPLETE:
86911732 952 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
953 skb->csum)) {
954 skb->ip_summed = CHECKSUM_UNNECESSARY;
955 break;
956 }
957
958 /* fall through */
959 case CHECKSUM_NONE:
960 NAPI_GRO_CB(skb)->flush = 1;
961 return NULL;
962 }
963
964 return tcp_gro_receive(head, skb);
965}
684f2176 966
36990673 967static int tcp6_gro_complete(struct sk_buff *skb)
684f2176
HX
968{
969 struct ipv6hdr *iph = ipv6_hdr(skb);
970 struct tcphdr *th = tcp_hdr(skb);
971
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
975
976 return tcp_gro_complete(skb);
977}
684f2176 978
626e264d
IJ
979static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
980 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 981{
aa8223c7 982 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
983 struct sk_buff *buff;
984 struct flowi fl;
adf30907 985 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 986 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 987 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 988 struct dst_entry *dst;
81ada62d 989 __be32 *topt;
1da177e4 990
626e264d
IJ
991 if (ts)
992 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 993#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
994 if (key)
995 tot_len += TCPOLEN_MD5SIG_ALIGNED;
996#endif
997
cfb6eeb4 998 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 999 GFP_ATOMIC);
1ab1457c
YH
1000 if (buff == NULL)
1001 return;
1da177e4 1002
cfb6eeb4 1003 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1004
cfb6eeb4 1005 t1 = (struct tcphdr *) skb_push(buff, tot_len);
a8fdf2b3 1006 skb_reset_transport_header(skb);
1da177e4
LT
1007
1008 /* Swap the send and the receive. */
1009 memset(t1, 0, sizeof(*t1));
1010 t1->dest = th->source;
1011 t1->source = th->dest;
cfb6eeb4 1012 t1->doff = tot_len / 4;
626e264d
IJ
1013 t1->seq = htonl(seq);
1014 t1->ack_seq = htonl(ack);
1015 t1->ack = !rst || !th->ack;
1016 t1->rst = rst;
1017 t1->window = htons(win);
1da177e4 1018
81ada62d
IJ
1019 topt = (__be32 *)(t1 + 1);
1020
626e264d
IJ
1021 if (ts) {
1022 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1023 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1024 *topt++ = htonl(tcp_time_stamp);
1025 *topt++ = htonl(ts);
1026 }
1027
cfb6eeb4
YH
1028#ifdef CONFIG_TCP_MD5SIG
1029 if (key) {
81ada62d
IJ
1030 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1031 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1032 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1033 &ipv6_hdr(skb)->saddr,
1034 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1035 }
1036#endif
1037
07f0757a 1038 buff->csum = csum_partial(t1, tot_len, 0);
1da177e4
LT
1039
1040 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1041 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1042 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
1043
1044 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
52cd5750 1045 tot_len, IPPROTO_TCP,
1da177e4
LT
1046 buff->csum);
1047
1048 fl.proto = IPPROTO_TCP;
505cbfc5 1049 fl.oif = inet6_iif(skb);
1da177e4
LT
1050 fl.fl_ip_dport = t1->dest;
1051 fl.fl_ip_sport = t1->source;
beb8d13b 1052 security_skb_classify_flow(skb, &fl);
1da177e4 1053
c20121ae
DL
1054 /* Pass a socket to ip6_dst_lookup either it is for RST
1055 * Underlying function will use this to retrieve the network
1056 * namespace
1057 */
adf30907
ED
1058 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1059 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1060 skb_dst_set(buff, dst);
e5047992 1061 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
63231bdd 1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
626e264d
IJ
1063 if (rst)
1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4 1065 return;
ecc51b6d 1066 }
1da177e4
LT
1067 }
1068
1069 kfree_skb(buff);
1070}
1071
626e264d 1072static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1073{
626e264d
IJ
1074 struct tcphdr *th = tcp_hdr(skb);
1075 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1076 struct tcp_md5sig_key *key = NULL;
1da177e4 1077
626e264d 1078 if (th->rst)
1da177e4
LT
1079 return;
1080
626e264d
IJ
1081 if (!ipv6_unicast_destination(skb))
1082 return;
1da177e4 1083
cfb6eeb4 1084#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1085 if (sk)
1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1087#endif
1088
626e264d
IJ
1089 if (th->ack)
1090 seq = ntohl(th->ack_seq);
1091 else
1092 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1093 (th->doff << 2);
1da177e4 1094
626e264d
IJ
1095 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1096}
1da177e4 1097
626e264d
IJ
1098static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1099 struct tcp_md5sig_key *key)
1100{
1101 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1102}
1103
1104static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1105{
8feaf0c0 1106 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1107 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1108
9501f972 1109 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1110 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1111 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1112
8feaf0c0 1113 inet_twsk_put(tw);
1da177e4
LT
1114}
1115
6edafaaf
GJ
1116static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1117 struct request_sock *req)
1da177e4 1118{
9501f972 1119 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1120 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1121}
1122
1123
1124static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1125{
60236fdd 1126 struct request_sock *req, **prev;
aa8223c7 1127 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1128 struct sock *nsk;
1129
1130 /* Find possible connection requests. */
8129765a 1131 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1132 &ipv6_hdr(skb)->saddr,
1133 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1134 if (req)
1135 return tcp_check_req(sk, skb, req, prev);
1136
3b1e0a65 1137 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1138 &ipv6_hdr(skb)->saddr, th->source,
1139 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1140
1141 if (nsk) {
1142 if (nsk->sk_state != TCP_TIME_WAIT) {
1143 bh_lock_sock(nsk);
1144 return nsk;
1145 }
9469c7b4 1146 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1147 return NULL;
1148 }
1149
c6aefafb 1150#ifdef CONFIG_SYN_COOKIES
1da177e4 1151 if (!th->rst && !th->syn && th->ack)
c6aefafb 1152 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1153#endif
1154 return sk;
1155}
1156
1da177e4
LT
1157/* FIXME: this is substantially similar to the ipv4 code.
1158 * Can some kind of merge be done? -- erics
1159 */
1160static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1161{
ca304b61 1162 struct inet6_request_sock *treq;
1da177e4
LT
1163 struct ipv6_pinfo *np = inet6_sk(sk);
1164 struct tcp_options_received tmp_opt;
1165 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 1166 struct request_sock *req = NULL;
1da177e4 1167 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1168#ifdef CONFIG_SYN_COOKIES
1169 int want_cookie = 0;
1170#else
1171#define want_cookie 0
1172#endif
1da177e4
LT
1173
1174 if (skb->protocol == htons(ETH_P_IP))
1175 return tcp_v4_conn_request(sk, skb);
1176
1177 if (!ipv6_unicast_destination(skb))
1ab1457c 1178 goto drop;
1da177e4 1179
463c84b9 1180 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1181 if (net_ratelimit())
c6aefafb
GG
1182 syn_flood_warning(skb);
1183#ifdef CONFIG_SYN_COOKIES
1184 if (sysctl_tcp_syncookies)
1185 want_cookie = 1;
1186 else
1187#endif
1ab1457c 1188 goto drop;
1da177e4
LT
1189 }
1190
463c84b9 1191 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1192 goto drop;
1193
ca304b61 1194 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1195 if (req == NULL)
1196 goto drop;
1197
cfb6eeb4
YH
1198#ifdef CONFIG_TCP_MD5SIG
1199 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1200#endif
1201
1da177e4
LT
1202 tcp_clear_options(&tmp_opt);
1203 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1204 tmp_opt.user_mss = tp->rx_opt.user_mss;
1205
1206 tcp_parse_options(skb, &tmp_opt, 0);
1207
4dfc2817 1208 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1209 tcp_clear_options(&tmp_opt);
c6aefafb 1210
1da177e4
LT
1211 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1212 tcp_openreq_init(req, &tmp_opt, skb);
1213
ca304b61 1214 treq = inet6_rsk(req);
0660e03f
ACM
1215 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1216 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1217 if (!want_cookie)
1218 TCP_ECN_create_request(req, tcp_hdr(skb));
1219
1220 if (want_cookie) {
1221 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1222 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1223 } else if (!isn) {
1224 if (ipv6_opt_accepted(sk, skb) ||
1225 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1226 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1227 atomic_inc(&skb->users);
1228 treq->pktopts = skb;
1229 }
1230 treq->iif = sk->sk_bound_dev_if;
1da177e4 1231
c6aefafb
GG
1232 /* So that link locals have meaning */
1233 if (!sk->sk_bound_dev_if &&
1234 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1235 treq->iif = inet6_iif(skb);
1da177e4 1236
a94f723d 1237 isn = tcp_v6_init_sequence(skb);
c6aefafb 1238 }
1da177e4 1239
2e6599cb 1240 tcp_rsk(req)->snt_isn = isn;
1da177e4 1241
4237c75c
VY
1242 security_inet_conn_request(sk, skb, req);
1243
fd80eb94 1244 if (tcp_v6_send_synack(sk, req))
1da177e4
LT
1245 goto drop;
1246
c6aefafb
GG
1247 if (!want_cookie) {
1248 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1249 return 0;
1250 }
1da177e4
LT
1251
1252drop:
1253 if (req)
60236fdd 1254 reqsk_free(req);
1da177e4 1255
1da177e4
LT
1256 return 0; /* don't send reset */
1257}
1258
1259static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1260 struct request_sock *req,
1da177e4
LT
1261 struct dst_entry *dst)
1262{
78d15e82 1263 struct inet6_request_sock *treq;
1da177e4
LT
1264 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1265 struct tcp6_sock *newtcp6sk;
1266 struct inet_sock *newinet;
1267 struct tcp_sock *newtp;
1268 struct sock *newsk;
1269 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1270#ifdef CONFIG_TCP_MD5SIG
1271 struct tcp_md5sig_key *key;
1272#endif
1da177e4
LT
1273
1274 if (skb->protocol == htons(ETH_P_IP)) {
1275 /*
1276 * v6 mapped
1277 */
1278
1279 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1280
1ab1457c 1281 if (newsk == NULL)
1da177e4
LT
1282 return NULL;
1283
1284 newtcp6sk = (struct tcp6_sock *)newsk;
1285 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1286
1287 newinet = inet_sk(newsk);
1288 newnp = inet6_sk(newsk);
1289 newtp = tcp_sk(newsk);
1290
1291 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1292
1293 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1294 newinet->daddr);
1295
1296 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1297 newinet->saddr);
1298
1299 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1300
8292a17a 1301 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1302 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1303#ifdef CONFIG_TCP_MD5SIG
1304 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1305#endif
1306
1da177e4
LT
1307 newnp->pktoptions = NULL;
1308 newnp->opt = NULL;
505cbfc5 1309 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1310 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1311
e6848976
ACM
1312 /*
1313 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1314 * here, tcp_create_openreq_child now does this for us, see the comment in
1315 * that function for the gory details. -acme
1da177e4 1316 */
1da177e4
LT
1317
1318 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1319 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1320 Sync it now.
1321 */
d83d8461 1322 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1323
1324 return newsk;
1325 }
1326
78d15e82 1327 treq = inet6_rsk(req);
1da177e4
LT
1328 opt = np->opt;
1329
1330 if (sk_acceptq_is_full(sk))
1331 goto out_overflow;
1332
1da177e4
LT
1333 if (dst == NULL) {
1334 struct in6_addr *final_p = NULL, final;
1335 struct flowi fl;
1336
1337 memset(&fl, 0, sizeof(fl));
1338 fl.proto = IPPROTO_TCP;
2e6599cb 1339 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1340 if (opt && opt->srcrt) {
1341 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1342 ipv6_addr_copy(&final, &fl.fl6_dst);
1343 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1344 final_p = &final;
1345 }
2e6599cb 1346 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1347 fl.oif = sk->sk_bound_dev_if;
2e6599cb 1348 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 1349 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 1350 security_req_classify_flow(req, &fl);
1da177e4
LT
1351
1352 if (ip6_dst_lookup(sk, &dst, &fl))
1353 goto out;
1354
1355 if (final_p)
1356 ipv6_addr_copy(&fl.fl6_dst, final_p);
1357
52479b62 1358 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1da177e4 1359 goto out;
1ab1457c 1360 }
1da177e4
LT
1361
1362 newsk = tcp_create_openreq_child(sk, req, skb);
1363 if (newsk == NULL)
1364 goto out;
1365
e6848976
ACM
1366 /*
1367 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1368 * count here, tcp_create_openreq_child now does this for us, see the
1369 * comment in that function for the gory details. -acme
1370 */
1da177e4 1371
59eed279 1372 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1373 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1374
1375 newtcp6sk = (struct tcp6_sock *)newsk;
1376 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1377
1378 newtp = tcp_sk(newsk);
1379 newinet = inet_sk(newsk);
1380 newnp = inet6_sk(newsk);
1381
1382 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1383
2e6599cb
ACM
1384 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1385 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1386 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1387 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1388
1ab1457c 1389 /* Now IPv6 options...
1da177e4
LT
1390
1391 First: no IPv4 options.
1392 */
1393 newinet->opt = NULL;
d35690be 1394 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1395
1396 /* Clone RX bits */
1397 newnp->rxopt.all = np->rxopt.all;
1398
1399 /* Clone pktoptions received with SYN */
1400 newnp->pktoptions = NULL;
2e6599cb
ACM
1401 if (treq->pktopts != NULL) {
1402 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1403 kfree_skb(treq->pktopts);
1404 treq->pktopts = NULL;
1da177e4
LT
1405 if (newnp->pktoptions)
1406 skb_set_owner_r(newnp->pktoptions, newsk);
1407 }
1408 newnp->opt = NULL;
505cbfc5 1409 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1410 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1411
1412 /* Clone native IPv6 options from listening socket (if any)
1413
1414 Yes, keeping reference count would be much more clever,
1415 but we make one more one thing there: reattach optmem
1416 to newsk.
1417 */
1418 if (opt) {
1419 newnp->opt = ipv6_dup_options(newsk, opt);
1420 if (opt != np->opt)
1421 sock_kfree_s(sk, opt, opt->tot_len);
1422 }
1423
d83d8461 1424 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1425 if (newnp->opt)
d83d8461
ACM
1426 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1427 newnp->opt->opt_flen);
1da177e4 1428
5d424d5a 1429 tcp_mtup_init(newsk);
1da177e4
LT
1430 tcp_sync_mss(newsk, dst_mtu(dst));
1431 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1432 tcp_initialize_rcv_mss(newsk);
1433
1434 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1435
cfb6eeb4
YH
1436#ifdef CONFIG_TCP_MD5SIG
1437 /* Copy over the MD5 key from the original socket */
1438 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1439 /* We're using one, so create a matching key
1440 * on the newsk structure. If we fail to get
1441 * memory, then we end up not copying the key
1442 * across. Shucks.
1443 */
af879cc7
ACM
1444 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1445 if (newkey != NULL)
e547bc1e 1446 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1447 newkey, key->keylen);
cfb6eeb4
YH
1448 }
1449#endif
1450
ab1e0a13 1451 __inet6_hash(newsk);
e56d8b8a 1452 __inet_inherit_port(sk, newsk);
1da177e4
LT
1453
1454 return newsk;
1455
1456out_overflow:
de0744af 1457 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1458out:
de0744af 1459 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1460 if (opt && opt != np->opt)
1461 sock_kfree_s(sk, opt, opt->tot_len);
1462 dst_release(dst);
1463 return NULL;
1464}
1465
b51655b9 1466static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1467{
84fa7933 1468 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1469 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1470 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1471 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1472 return 0;
fb286bb2 1473 }
1da177e4 1474 }
fb286bb2 1475
684f2176 1476 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1477 &ipv6_hdr(skb)->saddr,
1478 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1479
1da177e4 1480 if (skb->len <= 76) {
fb286bb2 1481 return __skb_checksum_complete(skb);
1da177e4
LT
1482 }
1483 return 0;
1484}
1485
1486/* The socket must have it's spinlock held when we get
1487 * here.
1488 *
1489 * We have a potential double-lock case here, so even when
1490 * doing backlog processing we use the BH locking scheme.
1491 * This is because we cannot sleep with the original spinlock
1492 * held.
1493 */
1494static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1495{
1496 struct ipv6_pinfo *np = inet6_sk(sk);
1497 struct tcp_sock *tp;
1498 struct sk_buff *opt_skb = NULL;
1499
1500 /* Imagine: socket is IPv6. IPv4 packet arrives,
1501 goes to IPv4 receive handler and backlogged.
1502 From backlog it always goes here. Kerboom...
1503 Fortunately, tcp_rcv_established and rcv_established
1504 handle them correctly, but it is not case with
1505 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1506 */
1507
1508 if (skb->protocol == htons(ETH_P_IP))
1509 return tcp_v4_do_rcv(sk, skb);
1510
cfb6eeb4
YH
1511#ifdef CONFIG_TCP_MD5SIG
1512 if (tcp_v6_inbound_md5_hash (sk, skb))
1513 goto discard;
1514#endif
1515
fda9ef5d 1516 if (sk_filter(sk, skb))
1da177e4
LT
1517 goto discard;
1518
1519 /*
1520 * socket locking is here for SMP purposes as backlog rcv
1521 * is currently called with bh processing disabled.
1522 */
1523
1524 /* Do Stevens' IPV6_PKTOPTIONS.
1525
1526 Yes, guys, it is the only place in our code, where we
1527 may make it not affecting IPv4.
1528 The rest of code is protocol independent,
1529 and I do not like idea to uglify IPv4.
1530
1531 Actually, all the idea behind IPV6_PKTOPTIONS
1532 looks not very well thought. For now we latch
1533 options, received in the last packet, enqueued
1534 by tcp. Feel free to propose better solution.
1ab1457c 1535 --ANK (980728)
1da177e4
LT
1536 */
1537 if (np->rxopt.all)
1538 opt_skb = skb_clone(skb, GFP_ATOMIC);
1539
1540 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1541 TCP_CHECK_TIMER(sk);
aa8223c7 1542 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1543 goto reset;
1544 TCP_CHECK_TIMER(sk);
1545 if (opt_skb)
1546 goto ipv6_pktoptions;
1547 return 0;
1548 }
1549
ab6a5bb6 1550 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1551 goto csum_err;
1552
1ab1457c 1553 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1554 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1555 if (!nsk)
1556 goto discard;
1557
1558 /*
1559 * Queue it on the new socket if the new socket is active,
1560 * otherwise we just shortcircuit this and continue with
1561 * the new socket..
1562 */
1ab1457c 1563 if(nsk != sk) {
1da177e4
LT
1564 if (tcp_child_process(sk, nsk, skb))
1565 goto reset;
1566 if (opt_skb)
1567 __kfree_skb(opt_skb);
1568 return 0;
1569 }
1570 }
1571
1572 TCP_CHECK_TIMER(sk);
aa8223c7 1573 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1574 goto reset;
1575 TCP_CHECK_TIMER(sk);
1576 if (opt_skb)
1577 goto ipv6_pktoptions;
1578 return 0;
1579
1580reset:
cfb6eeb4 1581 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1582discard:
1583 if (opt_skb)
1584 __kfree_skb(opt_skb);
1585 kfree_skb(skb);
1586 return 0;
1587csum_err:
63231bdd 1588 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1589 goto discard;
1590
1591
1592ipv6_pktoptions:
1593 /* Do you ask, what is it?
1594
1595 1. skb was enqueued by tcp.
1596 2. skb is added to tail of read queue, rather than out of order.
1597 3. socket is not in passive state.
1598 4. Finally, it really contains options, which user wants to receive.
1599 */
1600 tp = tcp_sk(sk);
1601 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1602 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1603 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1604 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1605 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1606 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1607 if (ipv6_opt_accepted(sk, opt_skb)) {
1608 skb_set_owner_r(opt_skb, sk);
1609 opt_skb = xchg(&np->pktoptions, opt_skb);
1610 } else {
1611 __kfree_skb(opt_skb);
1612 opt_skb = xchg(&np->pktoptions, NULL);
1613 }
1614 }
1615
800d55f1 1616 kfree_skb(opt_skb);
1da177e4
LT
1617 return 0;
1618}
1619
e5bbef20 1620static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1621{
1ab1457c 1622 struct tcphdr *th;
1da177e4
LT
1623 struct sock *sk;
1624 int ret;
a86b1e30 1625 struct net *net = dev_net(skb->dev);
1da177e4
LT
1626
1627 if (skb->pkt_type != PACKET_HOST)
1628 goto discard_it;
1629
1630 /*
1631 * Count it even if it's bad.
1632 */
63231bdd 1633 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1634
1635 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1636 goto discard_it;
1637
aa8223c7 1638 th = tcp_hdr(skb);
1da177e4
LT
1639
1640 if (th->doff < sizeof(struct tcphdr)/4)
1641 goto bad_packet;
1642 if (!pskb_may_pull(skb, th->doff*4))
1643 goto discard_it;
1644
60476372 1645 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1646 goto bad_packet;
1647
aa8223c7 1648 th = tcp_hdr(skb);
1da177e4
LT
1649 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1650 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1651 skb->len - th->doff*4);
1652 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1653 TCP_SKB_CB(skb)->when = 0;
0660e03f 1654 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1655 TCP_SKB_CB(skb)->sacked = 0;
1656
9a1f27c4 1657 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1658 if (!sk)
1659 goto no_tcp_socket;
1660
1661process:
1662 if (sk->sk_state == TCP_TIME_WAIT)
1663 goto do_time_wait;
1664
1665 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1666 goto discard_and_relse;
1667
fda9ef5d 1668 if (sk_filter(sk, skb))
1da177e4
LT
1669 goto discard_and_relse;
1670
1671 skb->dev = NULL;
1672
293b9c42 1673 bh_lock_sock_nested(sk);
1da177e4
LT
1674 ret = 0;
1675 if (!sock_owned_by_user(sk)) {
1a2449a8 1676#ifdef CONFIG_NET_DMA
1ab1457c 1677 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1678 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1679 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1680 if (tp->ucopy.dma_chan)
1681 ret = tcp_v6_do_rcv(sk, skb);
1682 else
1a2449a8
CL
1683#endif
1684 {
1685 if (!tcp_prequeue(sk, skb))
1686 ret = tcp_v6_do_rcv(sk, skb);
1687 }
1da177e4
LT
1688 } else
1689 sk_add_backlog(sk, skb);
1690 bh_unlock_sock(sk);
1691
1692 sock_put(sk);
1693 return ret ? -1 : 0;
1694
1695no_tcp_socket:
1696 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1697 goto discard_it;
1698
1699 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1700bad_packet:
63231bdd 1701 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1702 } else {
cfb6eeb4 1703 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1704 }
1705
1706discard_it:
1707
1708 /*
1709 * Discard frame
1710 */
1711
1712 kfree_skb(skb);
1713 return 0;
1714
1715discard_and_relse:
1716 sock_put(sk);
1717 goto discard_it;
1718
1719do_time_wait:
1720 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1721 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1722 goto discard_it;
1723 }
1724
1725 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1726 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1727 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1728 goto discard_it;
1729 }
1730
9469c7b4 1731 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1732 case TCP_TW_SYN:
1733 {
1734 struct sock *sk2;
1735
c346dca1 1736 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1737 &ipv6_hdr(skb)->daddr,
505cbfc5 1738 ntohs(th->dest), inet6_iif(skb));
1da177e4 1739 if (sk2 != NULL) {
295ff7ed
ACM
1740 struct inet_timewait_sock *tw = inet_twsk(sk);
1741 inet_twsk_deschedule(tw, &tcp_death_row);
1742 inet_twsk_put(tw);
1da177e4
LT
1743 sk = sk2;
1744 goto process;
1745 }
1746 /* Fall through to ACK */
1747 }
1748 case TCP_TW_ACK:
1749 tcp_v6_timewait_ack(sk, skb);
1750 break;
1751 case TCP_TW_RST:
1752 goto no_tcp_socket;
1753 case TCP_TW_SUCCESS:;
1754 }
1755 goto discard_it;
1756}
1757
1da177e4
LT
1758static int tcp_v6_remember_stamp(struct sock *sk)
1759{
1760 /* Alas, not yet... */
1761 return 0;
1762}
1763
3b401a81 1764static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1765 .queue_xmit = inet6_csk_xmit,
1766 .send_check = tcp_v6_send_check,
1767 .rebuild_header = inet6_sk_rebuild_header,
1768 .conn_request = tcp_v6_conn_request,
1769 .syn_recv_sock = tcp_v6_syn_recv_sock,
1770 .remember_stamp = tcp_v6_remember_stamp,
1771 .net_header_len = sizeof(struct ipv6hdr),
1772 .setsockopt = ipv6_setsockopt,
1773 .getsockopt = ipv6_getsockopt,
1774 .addr2sockaddr = inet6_csk_addr2sockaddr,
1775 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1776 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1777#ifdef CONFIG_COMPAT
543d9cfe
ACM
1778 .compat_setsockopt = compat_ipv6_setsockopt,
1779 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1780#endif
1da177e4
LT
1781};
1782
cfb6eeb4 1783#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1784static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1785 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1786 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1787 .md5_add = tcp_v6_md5_add_func,
1788 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1789};
a928630a 1790#endif
cfb6eeb4 1791
1da177e4
LT
1792/*
1793 * TCP over IPv4 via INET6 API
1794 */
1795
3b401a81 1796static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1797 .queue_xmit = ip_queue_xmit,
1798 .send_check = tcp_v4_send_check,
1799 .rebuild_header = inet_sk_rebuild_header,
1800 .conn_request = tcp_v6_conn_request,
1801 .syn_recv_sock = tcp_v6_syn_recv_sock,
1802 .remember_stamp = tcp_v4_remember_stamp,
1803 .net_header_len = sizeof(struct iphdr),
1804 .setsockopt = ipv6_setsockopt,
1805 .getsockopt = ipv6_getsockopt,
1806 .addr2sockaddr = inet6_csk_addr2sockaddr,
1807 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1808 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1809#ifdef CONFIG_COMPAT
543d9cfe
ACM
1810 .compat_setsockopt = compat_ipv6_setsockopt,
1811 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1812#endif
1da177e4
LT
1813};
1814
cfb6eeb4 1815#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1816static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1817 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1818 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1819 .md5_add = tcp_v6_md5_add_func,
1820 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1821};
a928630a 1822#endif
cfb6eeb4 1823
1da177e4
LT
1824/* NOTE: A lot of things set to zero explicitly by call to
1825 * sk_alloc() so need not be done here.
1826 */
1827static int tcp_v6_init_sock(struct sock *sk)
1828{
6687e988 1829 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1830 struct tcp_sock *tp = tcp_sk(sk);
1831
1832 skb_queue_head_init(&tp->out_of_order_queue);
1833 tcp_init_xmit_timers(sk);
1834 tcp_prequeue_init(tp);
1835
6687e988 1836 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1837 tp->mdev = TCP_TIMEOUT_INIT;
1838
1839 /* So many TCP implementations out there (incorrectly) count the
1840 * initial SYN frame in their delayed-ACK and congestion control
1841 * algorithms that we must have the following bandaid to talk
1842 * efficiently to them. -DaveM
1843 */
1844 tp->snd_cwnd = 2;
1845
1846 /* See draft-stevens-tcpca-spec-01 for discussion of the
1847 * initialization of these values.
1848 */
1849 tp->snd_ssthresh = 0x7fffffff;
1850 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1851 tp->mss_cache = 536;
1da177e4
LT
1852
1853 tp->reordering = sysctl_tcp_reordering;
1854
1855 sk->sk_state = TCP_CLOSE;
1856
8292a17a 1857 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1858 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1859 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1860 sk->sk_write_space = sk_stream_write_space;
1861 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1862
cfb6eeb4
YH
1863#ifdef CONFIG_TCP_MD5SIG
1864 tp->af_specific = &tcp_sock_ipv6_specific;
1865#endif
1866
1da177e4
LT
1867 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1868 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1869
eb4dea58 1870 local_bh_disable();
1748376b 1871 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1872 local_bh_enable();
1da177e4
LT
1873
1874 return 0;
1875}
1876
7d06b2e0 1877static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1878{
cfb6eeb4
YH
1879#ifdef CONFIG_TCP_MD5SIG
1880 /* Clean up the MD5 key list */
1881 if (tcp_sk(sk)->md5sig_info)
1882 tcp_v6_clear_md5_list(sk);
1883#endif
1da177e4 1884 tcp_v4_destroy_sock(sk);
7d06b2e0 1885 inet6_destroy_sock(sk);
1da177e4
LT
1886}
1887
952a10be 1888#ifdef CONFIG_PROC_FS
1da177e4 1889/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1890static void get_openreq6(struct seq_file *seq,
60236fdd 1891 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1892{
1da177e4 1893 int ttd = req->expires - jiffies;
ca304b61
ACM
1894 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1895 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1896
1897 if (ttd < 0)
1898 ttd = 0;
1899
1da177e4
LT
1900 seq_printf(seq,
1901 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1902 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1903 i,
1904 src->s6_addr32[0], src->s6_addr32[1],
1905 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1906 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1907 dest->s6_addr32[0], dest->s6_addr32[1],
1908 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1909 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1910 TCP_SYN_RECV,
1911 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1912 1, /* timers active (only the expire timer) */
1913 jiffies_to_clock_t(ttd),
1da177e4
LT
1914 req->retrans,
1915 uid,
1ab1457c 1916 0, /* non standard timer */
1da177e4
LT
1917 0, /* open_requests have no inode */
1918 0, req);
1919}
1920
1921static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1922{
1923 struct in6_addr *dest, *src;
1924 __u16 destp, srcp;
1925 int timer_active;
1926 unsigned long timer_expires;
1927 struct inet_sock *inet = inet_sk(sp);
1928 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1929 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1930 struct ipv6_pinfo *np = inet6_sk(sp);
1931
1932 dest = &np->daddr;
1933 src = &np->rcv_saddr;
1934 destp = ntohs(inet->dport);
1935 srcp = ntohs(inet->sport);
463c84b9
ACM
1936
1937 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1938 timer_active = 1;
463c84b9
ACM
1939 timer_expires = icsk->icsk_timeout;
1940 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1941 timer_active = 4;
463c84b9 1942 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1943 } else if (timer_pending(&sp->sk_timer)) {
1944 timer_active = 2;
1945 timer_expires = sp->sk_timer.expires;
1946 } else {
1947 timer_active = 0;
1948 timer_expires = jiffies;
1949 }
1950
1951 seq_printf(seq,
1952 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 1953 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
1954 i,
1955 src->s6_addr32[0], src->s6_addr32[1],
1956 src->s6_addr32[2], src->s6_addr32[3], srcp,
1957 dest->s6_addr32[0], dest->s6_addr32[1],
1958 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1959 sp->sk_state,
47da8ee6
SS
1960 tp->write_seq-tp->snd_una,
1961 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1962 timer_active,
1963 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1964 icsk->icsk_retransmits,
1da177e4 1965 sock_i_uid(sp),
6687e988 1966 icsk->icsk_probes_out,
1da177e4
LT
1967 sock_i_ino(sp),
1968 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1969 jiffies_to_clock_t(icsk->icsk_rto),
1970 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 1971 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1972 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1973 );
1974}
1975
1ab1457c 1976static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1977 struct inet_timewait_sock *tw, int i)
1da177e4
LT
1978{
1979 struct in6_addr *dest, *src;
1980 __u16 destp, srcp;
0fa1a53e 1981 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1982 int ttd = tw->tw_ttd - jiffies;
1983
1984 if (ttd < 0)
1985 ttd = 0;
1986
0fa1a53e
ACM
1987 dest = &tw6->tw_v6_daddr;
1988 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1989 destp = ntohs(tw->tw_dport);
1990 srcp = ntohs(tw->tw_sport);
1991
1992 seq_printf(seq,
1993 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1994 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1995 i,
1996 src->s6_addr32[0], src->s6_addr32[1],
1997 src->s6_addr32[2], src->s6_addr32[3], srcp,
1998 dest->s6_addr32[0], dest->s6_addr32[1],
1999 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2000 tw->tw_substate, 0, 0,
2001 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2002 atomic_read(&tw->tw_refcnt), tw);
2003}
2004
1da177e4
LT
2005static int tcp6_seq_show(struct seq_file *seq, void *v)
2006{
2007 struct tcp_iter_state *st;
2008
2009 if (v == SEQ_START_TOKEN) {
2010 seq_puts(seq,
2011 " sl "
2012 "local_address "
2013 "remote_address "
2014 "st tx_queue rx_queue tr tm->when retrnsmt"
2015 " uid timeout inode\n");
2016 goto out;
2017 }
2018 st = seq->private;
2019
2020 switch (st->state) {
2021 case TCP_SEQ_STATE_LISTENING:
2022 case TCP_SEQ_STATE_ESTABLISHED:
2023 get_tcp6_sock(seq, v, st->num);
2024 break;
2025 case TCP_SEQ_STATE_OPENREQ:
2026 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2027 break;
2028 case TCP_SEQ_STATE_TIME_WAIT:
2029 get_timewait6_sock(seq, v, st->num);
2030 break;
2031 }
2032out:
2033 return 0;
2034}
2035
1da177e4 2036static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2037 .name = "tcp6",
2038 .family = AF_INET6,
5f4472c5
DL
2039 .seq_fops = {
2040 .owner = THIS_MODULE,
2041 },
9427c4b3
DL
2042 .seq_ops = {
2043 .show = tcp6_seq_show,
2044 },
1da177e4
LT
2045};
2046
6f8b13bc 2047int tcp6_proc_init(struct net *net)
1da177e4 2048{
6f8b13bc 2049 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2050}
2051
6f8b13bc 2052void tcp6_proc_exit(struct net *net)
1da177e4 2053{
6f8b13bc 2054 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2055}
2056#endif
2057
2058struct proto tcpv6_prot = {
2059 .name = "TCPv6",
2060 .owner = THIS_MODULE,
2061 .close = tcp_close,
2062 .connect = tcp_v6_connect,
2063 .disconnect = tcp_disconnect,
463c84b9 2064 .accept = inet_csk_accept,
1da177e4
LT
2065 .ioctl = tcp_ioctl,
2066 .init = tcp_v6_init_sock,
2067 .destroy = tcp_v6_destroy_sock,
2068 .shutdown = tcp_shutdown,
2069 .setsockopt = tcp_setsockopt,
2070 .getsockopt = tcp_getsockopt,
1da177e4
LT
2071 .recvmsg = tcp_recvmsg,
2072 .backlog_rcv = tcp_v6_do_rcv,
2073 .hash = tcp_v6_hash,
ab1e0a13
ACM
2074 .unhash = inet_unhash,
2075 .get_port = inet_csk_get_port,
1da177e4
LT
2076 .enter_memory_pressure = tcp_enter_memory_pressure,
2077 .sockets_allocated = &tcp_sockets_allocated,
2078 .memory_allocated = &tcp_memory_allocated,
2079 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2080 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2081 .sysctl_mem = sysctl_tcp_mem,
2082 .sysctl_wmem = sysctl_tcp_wmem,
2083 .sysctl_rmem = sysctl_tcp_rmem,
2084 .max_header = MAX_TCP_HEADER,
2085 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2086 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2087 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2088 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2089 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2090#ifdef CONFIG_COMPAT
2091 .compat_setsockopt = compat_tcp_setsockopt,
2092 .compat_getsockopt = compat_tcp_getsockopt,
2093#endif
1da177e4
LT
2094};
2095
2096static struct inet6_protocol tcpv6_protocol = {
2097 .handler = tcp_v6_rcv,
2098 .err_handler = tcp_v6_err,
a430a43d 2099 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2100 .gso_segment = tcp_tso_segment,
684f2176
HX
2101 .gro_receive = tcp6_gro_receive,
2102 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2103 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2104};
2105
1da177e4
LT
2106static struct inet_protosw tcpv6_protosw = {
2107 .type = SOCK_STREAM,
2108 .protocol = IPPROTO_TCP,
2109 .prot = &tcpv6_prot,
2110 .ops = &inet6_stream_ops,
2111 .capability = -1,
2112 .no_check = 0,
d83d8461
ACM
2113 .flags = INET_PROTOSW_PERMANENT |
2114 INET_PROTOSW_ICSK,
1da177e4
LT
2115};
2116
93ec926b
DL
2117static int tcpv6_net_init(struct net *net)
2118{
5677242f
DL
2119 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2120 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2121}
2122
2123static void tcpv6_net_exit(struct net *net)
2124{
5677242f 2125 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
d315492b 2126 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2127}
2128
2129static struct pernet_operations tcpv6_net_ops = {
2130 .init = tcpv6_net_init,
2131 .exit = tcpv6_net_exit,
2132};
2133
7f4e4868 2134int __init tcpv6_init(void)
1da177e4 2135{
7f4e4868
DL
2136 int ret;
2137
2138 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2139 if (ret)
2140 goto out;
2141
1da177e4 2142 /* register inet6 protocol */
7f4e4868
DL
2143 ret = inet6_register_protosw(&tcpv6_protosw);
2144 if (ret)
2145 goto out_tcpv6_protocol;
2146
93ec926b 2147 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2148 if (ret)
2149 goto out_tcpv6_protosw;
2150out:
2151 return ret;
ae0f7d5f 2152
7f4e4868
DL
2153out_tcpv6_protocol:
2154 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2155out_tcpv6_protosw:
2156 inet6_unregister_protosw(&tcpv6_protosw);
2157 goto out;
2158}
2159
09f7709f 2160void tcpv6_exit(void)
7f4e4868 2161{
93ec926b 2162 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2163 inet6_unregister_protosw(&tcpv6_protosw);
2164 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2165}