]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/tcp_ipv6.c
net: replace ipfragok with skb->local_df
[net-next-2.6.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
1da177e4
LT
64
65#include <asm/uaccess.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
cfb6eeb4
YH
70#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
cfb6eeb4 73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
74static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
1da177e4
LT
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96
HX
78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr,
80 struct in6_addr *daddr);
1da177e4 81
3b401a81
SH
82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 84#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
85static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr)
90{
91 return NULL;
92}
a928630a 93#endif
1da177e4 94
1da177e4
LT
95static void tcp_v6_hash(struct sock *sk)
96{
97 if (sk->sk_state != TCP_CLOSE) {
8292a17a 98 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
99 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
9327f705 103 __inet6_hash(sk, NULL);
1da177e4
LT
104 local_bh_enable();
105 }
106}
107
684f2176 108static __inline__ __sum16 tcp_v6_check(int len,
1ab1457c
YH
109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
868c86bc 111 __wsum base)
1da177e4
LT
112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114}
115
a94f723d 116static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 117{
0660e03f
ACM
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
1da177e4
LT
122}
123
1ab1457c 124static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
125 int addr_len)
126{
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 128 struct inet_sock *inet = inet_sk(sk);
d83d8461 129 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
133 struct flowi fl;
134 struct dst_entry *dst;
135 int addr_type;
136 int err;
137
1ab1457c 138 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
139 return -EINVAL;
140
1ab1457c 141 if (usin->sin6_family != AF_INET6)
1da177e4
LT
142 return(-EAFNOSUPPORT);
143
144 memset(&fl, 0, sizeof(fl));
145
146 if (np->sndflow) {
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
153 return -EINVAL;
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
156 }
157 }
158
159 /*
1ab1457c
YH
160 * connect() to INADDR_ANY means loopback (BSD'ism).
161 */
162
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if(addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
198
199 /*
200 * TCP over IPv4
201 */
202
203 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
d83d8461 216 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
218#ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220#endif
1da177e4
LT
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
d83d8461
ACM
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
228#ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230#endif
1da177e4
LT
231 goto failure;
232 } else {
c720c7e8
ED
233 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
234 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
235 &np->rcv_saddr);
1da177e4
LT
236 }
237
238 return err;
239 }
240
241 if (!ipv6_addr_any(&np->rcv_saddr))
242 saddr = &np->rcv_saddr;
243
244 fl.proto = IPPROTO_TCP;
245 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
246 ipv6_addr_copy(&fl.fl6_src,
247 (saddr ? saddr : &np->saddr));
248 fl.oif = sk->sk_bound_dev_if;
51953d5b 249 fl.mark = sk->sk_mark;
1da177e4 250 fl.fl_ip_dport = usin->sin6_port;
c720c7e8 251 fl.fl_ip_sport = inet->inet_sport;
1da177e4
LT
252
253 if (np->opt && np->opt->srcrt) {
254 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255 ipv6_addr_copy(&final, &fl.fl6_dst);
256 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 final_p = &final;
258 }
259
beb8d13b
VY
260 security_sk_classify_flow(sk, &fl);
261
1da177e4
LT
262 err = ip6_dst_lookup(sk, &dst, &fl);
263 if (err)
264 goto failure;
265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267
52479b62
AD
268 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
269 if (err < 0) {
14e50e57
DM
270 if (err == -EREMOTE)
271 err = ip6_dst_blackhole(sk, &dst, &fl);
272 if (err < 0)
273 goto failure;
274 }
1da177e4
LT
275
276 if (saddr == NULL) {
277 saddr = &fl.fl6_src;
278 ipv6_addr_copy(&np->rcv_saddr, saddr);
279 }
280
281 /* set the source address */
282 ipv6_addr_copy(&np->saddr, saddr);
c720c7e8 283 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 284
f83ef8c0 285 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 286 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 287
d83d8461 288 icsk->icsk_ext_hdr_len = 0;
1da177e4 289 if (np->opt)
d83d8461
ACM
290 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
291 np->opt->opt_nflen);
1da177e4
LT
292
293 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
294
c720c7e8 295 inet->inet_dport = usin->sin6_port;
1da177e4
LT
296
297 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 298 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
299 if (err)
300 goto late_failure;
301
302 if (!tp->write_seq)
303 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
304 np->daddr.s6_addr32,
c720c7e8
ED
305 inet->inet_sport,
306 inet->inet_dport);
1da177e4
LT
307
308 err = tcp_connect(sk);
309 if (err)
310 goto late_failure;
311
312 return 0;
313
314late_failure:
315 tcp_set_state(sk, TCP_CLOSE);
316 __sk_dst_reset(sk);
317failure:
c720c7e8 318 inet->inet_dport = 0;
1da177e4
LT
319 sk->sk_route_caps = 0;
320 return err;
321}
322
323static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 324 u8 type, u8 code, int offset, __be32 info)
1da177e4
LT
325{
326 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 327 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
328 struct ipv6_pinfo *np;
329 struct sock *sk;
330 int err;
1ab1457c 331 struct tcp_sock *tp;
1da177e4 332 __u32 seq;
ca12a1a4 333 struct net *net = dev_net(skb->dev);
1da177e4 334
ca12a1a4 335 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 336 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
337
338 if (sk == NULL) {
e41b5368
DL
339 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
1da177e4
LT
341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 345 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
346 return;
347 }
348
349 bh_lock_sock(sk);
350 if (sock_owned_by_user(sk))
de0744af 351 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
352
353 if (sk->sk_state == TCP_CLOSE)
354 goto out;
355
356 tp = tcp_sk(sk);
1ab1457c 357 seq = ntohl(th->seq);
1da177e4
LT
358 if (sk->sk_state != TCP_LISTEN &&
359 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 360 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
361 goto out;
362 }
363
364 np = inet6_sk(sk);
365
366 if (type == ICMPV6_PKT_TOOBIG) {
367 struct dst_entry *dst = NULL;
368
369 if (sock_owned_by_user(sk))
370 goto out;
371 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
372 goto out;
373
374 /* icmp should have updated the destination cache entry */
375 dst = __sk_dst_check(sk, np->dst_cookie);
376
377 if (dst == NULL) {
378 struct inet_sock *inet = inet_sk(sk);
379 struct flowi fl;
380
381 /* BUGGG_FUTURE: Again, it is not clear how
382 to handle rthdr case. Ignore this complexity
383 for now.
384 */
385 memset(&fl, 0, sizeof(fl));
386 fl.proto = IPPROTO_TCP;
387 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
388 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
389 fl.oif = sk->sk_bound_dev_if;
51953d5b 390 fl.mark = sk->sk_mark;
c720c7e8
ED
391 fl.fl_ip_dport = inet->inet_dport;
392 fl.fl_ip_sport = inet->inet_sport;
beb8d13b 393 security_skb_classify_flow(skb, &fl);
1da177e4
LT
394
395 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
396 sk->sk_err_soft = -err;
397 goto out;
398 }
399
52479b62 400 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
1da177e4
LT
401 sk->sk_err_soft = -err;
402 goto out;
403 }
404
405 } else
406 dst_hold(dst);
407
d83d8461 408 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
409 tcp_sync_mss(sk, dst_mtu(dst));
410 tcp_simple_retransmit(sk);
411 } /* else let the usual retransmit timer handle it */
412 dst_release(dst);
413 goto out;
414 }
415
416 icmpv6_err_convert(type, code, &err);
417
60236fdd 418 /* Might be for an request_sock */
1da177e4 419 switch (sk->sk_state) {
60236fdd 420 struct request_sock *req, **prev;
1da177e4
LT
421 case TCP_LISTEN:
422 if (sock_owned_by_user(sk))
423 goto out;
424
8129765a
ACM
425 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
426 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
427 if (!req)
428 goto out;
429
430 /* ICMPs are not backlogged, hence we cannot get
431 * an established socket here.
432 */
547b792c 433 WARN_ON(req->sk != NULL);
1da177e4 434
2e6599cb 435 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 436 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
437 goto out;
438 }
439
463c84b9 440 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
441 goto out;
442
443 case TCP_SYN_SENT:
444 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 445 It can, it SYNs are crossed. --ANK */
1da177e4 446 if (!sock_owned_by_user(sk)) {
1da177e4
LT
447 sk->sk_err = err;
448 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
449
450 tcp_done(sk);
451 } else
452 sk->sk_err_soft = err;
453 goto out;
454 }
455
456 if (!sock_owned_by_user(sk) && np->recverr) {
457 sk->sk_err = err;
458 sk->sk_error_report(sk);
459 } else
460 sk->sk_err_soft = err;
461
462out:
463 bh_unlock_sock(sk);
464 sock_put(sk);
465}
466
467
e6b4d113
WAS
468static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
469 struct request_values *rvp)
1da177e4 470{
ca304b61 471 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
472 struct ipv6_pinfo *np = inet6_sk(sk);
473 struct sk_buff * skb;
474 struct ipv6_txoptions *opt = NULL;
475 struct in6_addr * final_p = NULL, final;
476 struct flowi fl;
fd80eb94 477 struct dst_entry *dst;
1da177e4
LT
478 int err = -1;
479
480 memset(&fl, 0, sizeof(fl));
481 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
482 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
483 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 484 fl.fl6_flowlabel = 0;
2e6599cb 485 fl.oif = treq->iif;
51953d5b 486 fl.mark = sk->sk_mark;
2e6599cb 487 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 488 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 489 security_req_classify_flow(req, &fl);
1da177e4 490
fd80eb94
DL
491 opt = np->opt;
492 if (opt && opt->srcrt) {
493 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
494 ipv6_addr_copy(&final, &fl.fl6_dst);
495 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
496 final_p = &final;
1da177e4
LT
497 }
498
fd80eb94
DL
499 err = ip6_dst_lookup(sk, &dst, &fl);
500 if (err)
501 goto done;
502 if (final_p)
503 ipv6_addr_copy(&fl.fl6_dst, final_p);
52479b62 504 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
fd80eb94
DL
505 goto done;
506
e6b4d113 507 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4 508 if (skb) {
8ad50d96 509 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 510
2e6599cb 511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
4e15ed4d 512 err = ip6_xmit(sk, skb, &fl, opt);
b9df3cb8 513 err = net_xmit_eval(err);
1da177e4
LT
514 }
515
516done:
1ab1457c 517 if (opt && opt != np->opt)
1da177e4 518 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 519 dst_release(dst);
1da177e4
LT
520 return err;
521}
522
72659ecc
OP
523static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
524 struct request_values *rvp)
525{
526 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
527 return tcp_v6_send_synack(sk, req, rvp);
528}
529
c6aefafb
GG
530static inline void syn_flood_warning(struct sk_buff *skb)
531{
532#ifdef CONFIG_SYN_COOKIES
533 if (sysctl_tcp_syncookies)
534 printk(KERN_INFO
535 "TCPv6: Possible SYN flooding on port %d. "
536 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
537 else
538#endif
539 printk(KERN_INFO
540 "TCPv6: Possible SYN flooding on port %d. "
541 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
542}
543
60236fdd 544static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 545{
800d55f1 546 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
547}
548
cfb6eeb4
YH
549#ifdef CONFIG_TCP_MD5SIG
550static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
551 struct in6_addr *addr)
552{
553 struct tcp_sock *tp = tcp_sk(sk);
554 int i;
555
556 BUG_ON(tp == NULL);
557
558 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
559 return NULL;
560
561 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 562 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 563 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
564 }
565 return NULL;
566}
567
568static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
569 struct sock *addr_sk)
570{
571 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
572}
573
574static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
575 struct request_sock *req)
576{
577 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
578}
579
580static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
581 char *newkey, u8 newkeylen)
582{
583 /* Add key to the list */
b0a713e9 584 struct tcp_md5sig_key *key;
cfb6eeb4
YH
585 struct tcp_sock *tp = tcp_sk(sk);
586 struct tcp6_md5sig_key *keys;
587
b0a713e9 588 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
589 if (key) {
590 /* modify existing entry - just update that one */
b0a713e9
MD
591 kfree(key->key);
592 key->key = newkey;
593 key->keylen = newkeylen;
cfb6eeb4
YH
594 } else {
595 /* reallocate new list if current one is full. */
596 if (!tp->md5sig_info) {
597 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
598 if (!tp->md5sig_info) {
599 kfree(newkey);
600 return -ENOMEM;
601 }
3d7dbeac 602 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 603 }
aa133076 604 if (tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
605 kfree(newkey);
606 return -ENOMEM;
607 }
cfb6eeb4
YH
608 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
609 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
610 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
611
612 if (!keys) {
613 tcp_free_md5sig_pool();
614 kfree(newkey);
615 return -ENOMEM;
616 }
617
618 if (tp->md5sig_info->entries6)
619 memmove(keys, tp->md5sig_info->keys6,
620 (sizeof (tp->md5sig_info->keys6[0]) *
621 tp->md5sig_info->entries6));
622
623 kfree(tp->md5sig_info->keys6);
624 tp->md5sig_info->keys6 = keys;
625 tp->md5sig_info->alloced6++;
626 }
627
628 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
629 peer);
f8ab18d2
DM
630 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
631 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
632
633 tp->md5sig_info->entries6++;
634 }
635 return 0;
636}
637
638static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
639 u8 *newkey, __u8 newkeylen)
640{
641 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
642 newkey, newkeylen);
643}
644
645static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
646{
647 struct tcp_sock *tp = tcp_sk(sk);
648 int i;
649
650 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 651 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 652 /* Free the key */
f8ab18d2 653 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
654 tp->md5sig_info->entries6--;
655
656 if (tp->md5sig_info->entries6 == 0) {
657 kfree(tp->md5sig_info->keys6);
658 tp->md5sig_info->keys6 = NULL;
ca983cef 659 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
660 } else {
661 /* shrink the database */
662 if (tp->md5sig_info->entries6 != i)
663 memmove(&tp->md5sig_info->keys6[i],
664 &tp->md5sig_info->keys6[i+1],
665 (tp->md5sig_info->entries6 - i)
666 * sizeof (tp->md5sig_info->keys6[0]));
667 }
77adefdc
YH
668 tcp_free_md5sig_pool();
669 return 0;
cfb6eeb4
YH
670 }
671 }
672 return -ENOENT;
673}
674
675static void tcp_v6_clear_md5_list (struct sock *sk)
676{
677 struct tcp_sock *tp = tcp_sk(sk);
678 int i;
679
680 if (tp->md5sig_info->entries6) {
681 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 682 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
683 tp->md5sig_info->entries6 = 0;
684 tcp_free_md5sig_pool();
685 }
686
687 kfree(tp->md5sig_info->keys6);
688 tp->md5sig_info->keys6 = NULL;
689 tp->md5sig_info->alloced6 = 0;
690
691 if (tp->md5sig_info->entries4) {
692 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 693 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
694 tp->md5sig_info->entries4 = 0;
695 tcp_free_md5sig_pool();
696 }
697
698 kfree(tp->md5sig_info->keys4);
699 tp->md5sig_info->keys4 = NULL;
700 tp->md5sig_info->alloced4 = 0;
701}
702
703static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
704 int optlen)
705{
706 struct tcp_md5sig cmd;
707 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
708 u8 *newkey;
709
710 if (optlen < sizeof(cmd))
711 return -EINVAL;
712
713 if (copy_from_user(&cmd, optval, sizeof(cmd)))
714 return -EFAULT;
715
716 if (sin6->sin6_family != AF_INET6)
717 return -EINVAL;
718
719 if (!cmd.tcpm_keylen) {
720 if (!tcp_sk(sk)->md5sig_info)
721 return -ENOENT;
e773e4fa 722 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
723 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
724 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
725 }
726
727 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
728 return -EINVAL;
729
730 if (!tcp_sk(sk)->md5sig_info) {
731 struct tcp_sock *tp = tcp_sk(sk);
732 struct tcp_md5sig_info *p;
733
734 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
735 if (!p)
736 return -ENOMEM;
737
738 tp->md5sig_info = p;
3d7dbeac 739 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
740 }
741
af879cc7 742 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
743 if (!newkey)
744 return -ENOMEM;
e773e4fa 745 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
746 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
747 newkey, cmd.tcpm_keylen);
748 }
749 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
750}
751
49a72dfb
AL
752static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
753 struct in6_addr *daddr,
754 struct in6_addr *saddr, int nbytes)
cfb6eeb4 755{
cfb6eeb4 756 struct tcp6_pseudohdr *bp;
49a72dfb 757 struct scatterlist sg;
8d26d76d 758
cfb6eeb4 759 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
760 /* 1. TCP pseudo-header (RFC2460) */
761 ipv6_addr_copy(&bp->saddr, saddr);
762 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 763 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 764 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 765
49a72dfb
AL
766 sg_init_one(&sg, bp, sizeof(*bp));
767 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
768}
c7da57a1 769
49a72dfb
AL
770static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
771 struct in6_addr *daddr, struct in6_addr *saddr,
772 struct tcphdr *th)
773{
774 struct tcp_md5sig_pool *hp;
775 struct hash_desc *desc;
776
777 hp = tcp_get_md5sig_pool();
778 if (!hp)
779 goto clear_hash_noput;
780 desc = &hp->md5_desc;
781
782 if (crypto_hash_init(desc))
783 goto clear_hash;
784 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
785 goto clear_hash;
786 if (tcp_md5_hash_header(hp, th))
787 goto clear_hash;
788 if (tcp_md5_hash_key(hp, key))
789 goto clear_hash;
790 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 791 goto clear_hash;
cfb6eeb4 792
cfb6eeb4 793 tcp_put_md5sig_pool();
cfb6eeb4 794 return 0;
49a72dfb 795
cfb6eeb4
YH
796clear_hash:
797 tcp_put_md5sig_pool();
798clear_hash_noput:
799 memset(md5_hash, 0, 16);
49a72dfb 800 return 1;
cfb6eeb4
YH
801}
802
49a72dfb
AL
803static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
804 struct sock *sk, struct request_sock *req,
805 struct sk_buff *skb)
cfb6eeb4
YH
806{
807 struct in6_addr *saddr, *daddr;
49a72dfb
AL
808 struct tcp_md5sig_pool *hp;
809 struct hash_desc *desc;
810 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
811
812 if (sk) {
813 saddr = &inet6_sk(sk)->saddr;
814 daddr = &inet6_sk(sk)->daddr;
49a72dfb 815 } else if (req) {
cfb6eeb4
YH
816 saddr = &inet6_rsk(req)->loc_addr;
817 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
818 } else {
819 struct ipv6hdr *ip6h = ipv6_hdr(skb);
820 saddr = &ip6h->saddr;
821 daddr = &ip6h->daddr;
cfb6eeb4 822 }
49a72dfb
AL
823
824 hp = tcp_get_md5sig_pool();
825 if (!hp)
826 goto clear_hash_noput;
827 desc = &hp->md5_desc;
828
829 if (crypto_hash_init(desc))
830 goto clear_hash;
831
832 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
833 goto clear_hash;
834 if (tcp_md5_hash_header(hp, th))
835 goto clear_hash;
836 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
837 goto clear_hash;
838 if (tcp_md5_hash_key(hp, key))
839 goto clear_hash;
840 if (crypto_hash_final(desc, md5_hash))
841 goto clear_hash;
842
843 tcp_put_md5sig_pool();
844 return 0;
845
846clear_hash:
847 tcp_put_md5sig_pool();
848clear_hash_noput:
849 memset(md5_hash, 0, 16);
850 return 1;
cfb6eeb4
YH
851}
852
853static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
854{
855 __u8 *hash_location = NULL;
856 struct tcp_md5sig_key *hash_expected;
0660e03f 857 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 858 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 859 int genhash;
cfb6eeb4
YH
860 u8 newhash[16];
861
862 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 863 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 864
785957d3
DM
865 /* We've parsed the options - do we have a hash? */
866 if (!hash_expected && !hash_location)
867 return 0;
868
869 if (hash_expected && !hash_location) {
870 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
871 return 1;
872 }
873
785957d3
DM
874 if (!hash_expected && hash_location) {
875 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
876 return 1;
877 }
878
879 /* check the signature */
49a72dfb
AL
880 genhash = tcp_v6_md5_hash_skb(newhash,
881 hash_expected,
882 NULL, NULL, skb);
883
cfb6eeb4
YH
884 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
885 if (net_ratelimit()) {
5856b606 886 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
cfb6eeb4 887 genhash ? "failed" : "mismatch",
0c6ce78a
HH
888 &ip6h->saddr, ntohs(th->source),
889 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
890 }
891 return 1;
892 }
893 return 0;
894}
895#endif
896
c6aefafb 897struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 898 .family = AF_INET6,
2e6599cb 899 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 900 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
901 .send_ack = tcp_v6_reqsk_send_ack,
902 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
903 .send_reset = tcp_v6_send_reset,
904 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
905};
906
cfb6eeb4 907#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 908static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 909 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 910 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 911};
b6332e6c 912#endif
cfb6eeb4 913
6d6ee43e
ACM
914static struct timewait_sock_ops tcp6_timewait_sock_ops = {
915 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
916 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 917 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
918};
919
8ad50d96
HX
920static void __tcp_v6_send_check(struct sk_buff *skb,
921 struct in6_addr *saddr, struct in6_addr *daddr)
1da177e4 922{
aa8223c7 923 struct tcphdr *th = tcp_hdr(skb);
1da177e4 924
84fa7933 925 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 926 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 927 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 928 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 929 } else {
8ad50d96
HX
930 th->check = tcp_v6_check(skb->len, saddr, daddr,
931 csum_partial(th, th->doff << 2,
932 skb->csum));
1da177e4
LT
933 }
934}
935
bb296246 936static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
937{
938 struct ipv6_pinfo *np = inet6_sk(sk);
939
940 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
941}
942
a430a43d
HX
943static int tcp_v6_gso_send_check(struct sk_buff *skb)
944{
945 struct ipv6hdr *ipv6h;
946 struct tcphdr *th;
947
948 if (!pskb_may_pull(skb, sizeof(*th)))
949 return -EINVAL;
950
0660e03f 951 ipv6h = ipv6_hdr(skb);
aa8223c7 952 th = tcp_hdr(skb);
a430a43d
HX
953
954 th->check = 0;
84fa7933 955 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 956 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
957 return 0;
958}
1da177e4 959
36990673
HX
960static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
961 struct sk_buff *skb)
684f2176 962{
36e7b1b8 963 struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
964
965 switch (skb->ip_summed) {
966 case CHECKSUM_COMPLETE:
86911732 967 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
968 skb->csum)) {
969 skb->ip_summed = CHECKSUM_UNNECESSARY;
970 break;
971 }
972
973 /* fall through */
974 case CHECKSUM_NONE:
975 NAPI_GRO_CB(skb)->flush = 1;
976 return NULL;
977 }
978
979 return tcp_gro_receive(head, skb);
980}
684f2176 981
36990673 982static int tcp6_gro_complete(struct sk_buff *skb)
684f2176
HX
983{
984 struct ipv6hdr *iph = ipv6_hdr(skb);
985 struct tcphdr *th = tcp_hdr(skb);
986
987 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
988 &iph->saddr, &iph->daddr, 0);
989 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
990
991 return tcp_gro_complete(skb);
992}
684f2176 993
626e264d
IJ
994static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
995 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 996{
aa8223c7 997 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
998 struct sk_buff *buff;
999 struct flowi fl;
adf30907 1000 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 1001 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 1002 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 1003 struct dst_entry *dst;
81ada62d 1004 __be32 *topt;
1da177e4 1005
626e264d
IJ
1006 if (ts)
1007 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 1008#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1009 if (key)
1010 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1011#endif
1012
cfb6eeb4 1013 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 1014 GFP_ATOMIC);
1ab1457c
YH
1015 if (buff == NULL)
1016 return;
1da177e4 1017
cfb6eeb4 1018 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1019
cfb6eeb4 1020 t1 = (struct tcphdr *) skb_push(buff, tot_len);
a8fdf2b3 1021 skb_reset_transport_header(skb);
1da177e4
LT
1022
1023 /* Swap the send and the receive. */
1024 memset(t1, 0, sizeof(*t1));
1025 t1->dest = th->source;
1026 t1->source = th->dest;
cfb6eeb4 1027 t1->doff = tot_len / 4;
626e264d
IJ
1028 t1->seq = htonl(seq);
1029 t1->ack_seq = htonl(ack);
1030 t1->ack = !rst || !th->ack;
1031 t1->rst = rst;
1032 t1->window = htons(win);
1da177e4 1033
81ada62d
IJ
1034 topt = (__be32 *)(t1 + 1);
1035
626e264d
IJ
1036 if (ts) {
1037 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1038 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1039 *topt++ = htonl(tcp_time_stamp);
1040 *topt++ = htonl(ts);
1041 }
1042
cfb6eeb4
YH
1043#ifdef CONFIG_TCP_MD5SIG
1044 if (key) {
81ada62d
IJ
1045 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1046 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1047 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1048 &ipv6_hdr(skb)->saddr,
1049 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1050 }
1051#endif
1052
07f0757a 1053 buff->csum = csum_partial(t1, tot_len, 0);
1da177e4
LT
1054
1055 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1056 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1057 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4 1058
8ad50d96 1059 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1da177e4
LT
1060
1061 fl.proto = IPPROTO_TCP;
505cbfc5 1062 fl.oif = inet6_iif(skb);
1da177e4
LT
1063 fl.fl_ip_dport = t1->dest;
1064 fl.fl_ip_sport = t1->source;
beb8d13b 1065 security_skb_classify_flow(skb, &fl);
1da177e4 1066
c20121ae
DL
1067 /* Pass a socket to ip6_dst_lookup either it is for RST
1068 * Underlying function will use this to retrieve the network
1069 * namespace
1070 */
adf30907
ED
1071 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1072 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1073 skb_dst_set(buff, dst);
4e15ed4d 1074 ip6_xmit(ctl_sk, buff, &fl, NULL);
63231bdd 1075 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
626e264d
IJ
1076 if (rst)
1077 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4 1078 return;
ecc51b6d 1079 }
1da177e4
LT
1080 }
1081
1082 kfree_skb(buff);
1083}
1084
626e264d 1085static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1086{
626e264d
IJ
1087 struct tcphdr *th = tcp_hdr(skb);
1088 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1089 struct tcp_md5sig_key *key = NULL;
1da177e4 1090
626e264d 1091 if (th->rst)
1da177e4
LT
1092 return;
1093
626e264d
IJ
1094 if (!ipv6_unicast_destination(skb))
1095 return;
1da177e4 1096
cfb6eeb4 1097#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1098 if (sk)
1099 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1100#endif
1101
626e264d
IJ
1102 if (th->ack)
1103 seq = ntohl(th->ack_seq);
1104 else
1105 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1106 (th->doff << 2);
1da177e4 1107
626e264d
IJ
1108 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1109}
1da177e4 1110
626e264d
IJ
1111static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1112 struct tcp_md5sig_key *key)
1113{
1114 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1115}
1116
1117static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1118{
8feaf0c0 1119 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1120 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1121
9501f972 1122 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1123 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1124 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1125
8feaf0c0 1126 inet_twsk_put(tw);
1da177e4
LT
1127}
1128
6edafaaf
GJ
1129static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1130 struct request_sock *req)
1da177e4 1131{
9501f972 1132 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1133 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1134}
1135
1136
1137static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1138{
60236fdd 1139 struct request_sock *req, **prev;
aa8223c7 1140 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1141 struct sock *nsk;
1142
1143 /* Find possible connection requests. */
8129765a 1144 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1145 &ipv6_hdr(skb)->saddr,
1146 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1147 if (req)
1148 return tcp_check_req(sk, skb, req, prev);
1149
3b1e0a65 1150 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1151 &ipv6_hdr(skb)->saddr, th->source,
1152 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1153
1154 if (nsk) {
1155 if (nsk->sk_state != TCP_TIME_WAIT) {
1156 bh_lock_sock(nsk);
1157 return nsk;
1158 }
9469c7b4 1159 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1160 return NULL;
1161 }
1162
c6aefafb 1163#ifdef CONFIG_SYN_COOKIES
1da177e4 1164 if (!th->rst && !th->syn && th->ack)
c6aefafb 1165 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1166#endif
1167 return sk;
1168}
1169
1da177e4
LT
1170/* FIXME: this is substantially similar to the ipv4 code.
1171 * Can some kind of merge be done? -- erics
1172 */
1173static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1174{
4957faad 1175 struct tcp_extend_values tmp_ext;
e6b4d113 1176 struct tcp_options_received tmp_opt;
4957faad 1177 u8 *hash_location;
e6b4d113 1178 struct request_sock *req;
ca304b61 1179 struct inet6_request_sock *treq;
1da177e4 1180 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1181 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1182 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1183#ifdef CONFIG_SYN_COOKIES
1184 int want_cookie = 0;
1185#else
1186#define want_cookie 0
1187#endif
1da177e4
LT
1188
1189 if (skb->protocol == htons(ETH_P_IP))
1190 return tcp_v4_conn_request(sk, skb);
1191
1192 if (!ipv6_unicast_destination(skb))
1ab1457c 1193 goto drop;
1da177e4 1194
463c84b9 1195 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1196 if (net_ratelimit())
c6aefafb
GG
1197 syn_flood_warning(skb);
1198#ifdef CONFIG_SYN_COOKIES
1199 if (sysctl_tcp_syncookies)
1200 want_cookie = 1;
1201 else
1202#endif
1ab1457c 1203 goto drop;
1da177e4
LT
1204 }
1205
463c84b9 1206 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1207 goto drop;
1208
ca304b61 1209 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1210 if (req == NULL)
1211 goto drop;
1212
cfb6eeb4
YH
1213#ifdef CONFIG_TCP_MD5SIG
1214 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1215#endif
1216
1da177e4
LT
1217 tcp_clear_options(&tmp_opt);
1218 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1219 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1220 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1221
1222 if (tmp_opt.cookie_plus > 0 &&
1223 tmp_opt.saw_tstamp &&
1224 !tp->rx_opt.cookie_out_never &&
1225 (sysctl_tcp_cookie_size > 0 ||
1226 (tp->cookie_values != NULL &&
1227 tp->cookie_values->cookie_desired > 0))) {
1228 u8 *c;
1229 u32 *d;
1230 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1231 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1232
1233 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1234 goto drop_and_free;
1235
1236 /* Secret recipe starts with IP addresses */
1237 d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
1238 *mess++ ^= *d++;
1239 *mess++ ^= *d++;
1240 *mess++ ^= *d++;
1241 *mess++ ^= *d++;
1242 d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1243 *mess++ ^= *d++;
1244 *mess++ ^= *d++;
1245 *mess++ ^= *d++;
1246 *mess++ ^= *d++;
1247
1248 /* plus variable length Initiator Cookie */
1249 c = (u8 *)mess;
1250 while (l-- > 0)
1251 *c++ ^= *hash_location++;
1da177e4 1252
4957faad
WAS
1253#ifdef CONFIG_SYN_COOKIES
1254 want_cookie = 0; /* not our kind of cookie */
1255#endif
1256 tmp_ext.cookie_out_never = 0; /* false */
1257 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1258 } else if (!tp->rx_opt.cookie_in_always) {
1259 /* redundant indications, but ensure initialization. */
1260 tmp_ext.cookie_out_never = 1; /* true */
1261 tmp_ext.cookie_plus = 0;
1262 } else {
1263 goto drop_and_free;
1264 }
1265 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1266
4dfc2817 1267 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1268 tcp_clear_options(&tmp_opt);
c6aefafb 1269
1da177e4
LT
1270 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1271 tcp_openreq_init(req, &tmp_opt, skb);
1272
ca304b61 1273 treq = inet6_rsk(req);
0660e03f
ACM
1274 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1275 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1276 if (!want_cookie)
1277 TCP_ECN_create_request(req, tcp_hdr(skb));
1278
1279 if (want_cookie) {
1280 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1281 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1282 } else if (!isn) {
1283 if (ipv6_opt_accepted(sk, skb) ||
1284 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1285 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1286 atomic_inc(&skb->users);
1287 treq->pktopts = skb;
1288 }
1289 treq->iif = sk->sk_bound_dev_if;
1da177e4 1290
c6aefafb
GG
1291 /* So that link locals have meaning */
1292 if (!sk->sk_bound_dev_if &&
1293 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1294 treq->iif = inet6_iif(skb);
1da177e4 1295
a94f723d 1296 isn = tcp_v6_init_sequence(skb);
c6aefafb 1297 }
2e6599cb 1298 tcp_rsk(req)->snt_isn = isn;
1da177e4 1299
4237c75c
VY
1300 security_inet_conn_request(sk, skb, req);
1301
4957faad
WAS
1302 if (tcp_v6_send_synack(sk, req,
1303 (struct request_values *)&tmp_ext) ||
1304 want_cookie)
e6b4d113 1305 goto drop_and_free;
1da177e4 1306
e6b4d113
WAS
1307 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1308 return 0;
1da177e4 1309
e6b4d113
WAS
1310drop_and_free:
1311 reqsk_free(req);
1da177e4 1312drop:
1da177e4
LT
1313 return 0; /* don't send reset */
1314}
1315
1316static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1317 struct request_sock *req,
1da177e4
LT
1318 struct dst_entry *dst)
1319{
78d15e82 1320 struct inet6_request_sock *treq;
1da177e4
LT
1321 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1322 struct tcp6_sock *newtcp6sk;
1323 struct inet_sock *newinet;
1324 struct tcp_sock *newtp;
1325 struct sock *newsk;
1326 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1327#ifdef CONFIG_TCP_MD5SIG
1328 struct tcp_md5sig_key *key;
1329#endif
1da177e4
LT
1330
1331 if (skb->protocol == htons(ETH_P_IP)) {
1332 /*
1333 * v6 mapped
1334 */
1335
1336 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1337
1ab1457c 1338 if (newsk == NULL)
1da177e4
LT
1339 return NULL;
1340
1341 newtcp6sk = (struct tcp6_sock *)newsk;
1342 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1343
1344 newinet = inet_sk(newsk);
1345 newnp = inet6_sk(newsk);
1346 newtp = tcp_sk(newsk);
1347
1348 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1349
c720c7e8 1350 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1351
c720c7e8 1352 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4
LT
1353
1354 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1355
8292a17a 1356 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1357 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1358#ifdef CONFIG_TCP_MD5SIG
1359 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1360#endif
1361
1da177e4
LT
1362 newnp->pktoptions = NULL;
1363 newnp->opt = NULL;
505cbfc5 1364 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1365 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1366
e6848976
ACM
1367 /*
1368 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1369 * here, tcp_create_openreq_child now does this for us, see the comment in
1370 * that function for the gory details. -acme
1da177e4 1371 */
1da177e4
LT
1372
1373 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1374 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1375 Sync it now.
1376 */
d83d8461 1377 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1378
1379 return newsk;
1380 }
1381
78d15e82 1382 treq = inet6_rsk(req);
1da177e4
LT
1383 opt = np->opt;
1384
1385 if (sk_acceptq_is_full(sk))
1386 goto out_overflow;
1387
1da177e4
LT
1388 if (dst == NULL) {
1389 struct in6_addr *final_p = NULL, final;
1390 struct flowi fl;
1391
1392 memset(&fl, 0, sizeof(fl));
1393 fl.proto = IPPROTO_TCP;
2e6599cb 1394 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1395 if (opt && opt->srcrt) {
1396 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1397 ipv6_addr_copy(&final, &fl.fl6_dst);
1398 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1399 final_p = &final;
1400 }
2e6599cb 1401 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1402 fl.oif = sk->sk_bound_dev_if;
51953d5b 1403 fl.mark = sk->sk_mark;
2e6599cb 1404 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 1405 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 1406 security_req_classify_flow(req, &fl);
1da177e4
LT
1407
1408 if (ip6_dst_lookup(sk, &dst, &fl))
1409 goto out;
1410
1411 if (final_p)
1412 ipv6_addr_copy(&fl.fl6_dst, final_p);
1413
52479b62 1414 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1da177e4 1415 goto out;
1ab1457c 1416 }
1da177e4
LT
1417
1418 newsk = tcp_create_openreq_child(sk, req, skb);
1419 if (newsk == NULL)
1420 goto out;
1421
e6848976
ACM
1422 /*
1423 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1424 * count here, tcp_create_openreq_child now does this for us, see the
1425 * comment in that function for the gory details. -acme
1426 */
1da177e4 1427
59eed279 1428 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1429 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1430
1431 newtcp6sk = (struct tcp6_sock *)newsk;
1432 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1433
1434 newtp = tcp_sk(newsk);
1435 newinet = inet_sk(newsk);
1436 newnp = inet6_sk(newsk);
1437
1438 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1439
2e6599cb
ACM
1440 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1441 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1442 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1443 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1444
1ab1457c 1445 /* Now IPv6 options...
1da177e4
LT
1446
1447 First: no IPv4 options.
1448 */
1449 newinet->opt = NULL;
d35690be 1450 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1451
1452 /* Clone RX bits */
1453 newnp->rxopt.all = np->rxopt.all;
1454
1455 /* Clone pktoptions received with SYN */
1456 newnp->pktoptions = NULL;
2e6599cb
ACM
1457 if (treq->pktopts != NULL) {
1458 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1459 kfree_skb(treq->pktopts);
1460 treq->pktopts = NULL;
1da177e4
LT
1461 if (newnp->pktoptions)
1462 skb_set_owner_r(newnp->pktoptions, newsk);
1463 }
1464 newnp->opt = NULL;
505cbfc5 1465 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1466 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1467
1468 /* Clone native IPv6 options from listening socket (if any)
1469
1470 Yes, keeping reference count would be much more clever,
1471 but we make one more one thing there: reattach optmem
1472 to newsk.
1473 */
1474 if (opt) {
1475 newnp->opt = ipv6_dup_options(newsk, opt);
1476 if (opt != np->opt)
1477 sock_kfree_s(sk, opt, opt->tot_len);
1478 }
1479
d83d8461 1480 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1481 if (newnp->opt)
d83d8461
ACM
1482 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1483 newnp->opt->opt_flen);
1da177e4 1484
5d424d5a 1485 tcp_mtup_init(newsk);
1da177e4
LT
1486 tcp_sync_mss(newsk, dst_mtu(dst));
1487 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1488 tcp_initialize_rcv_mss(newsk);
1489
c720c7e8
ED
1490 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1491 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1492
cfb6eeb4
YH
1493#ifdef CONFIG_TCP_MD5SIG
1494 /* Copy over the MD5 key from the original socket */
1495 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1496 /* We're using one, so create a matching key
1497 * on the newsk structure. If we fail to get
1498 * memory, then we end up not copying the key
1499 * across. Shucks.
1500 */
af879cc7
ACM
1501 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1502 if (newkey != NULL)
e547bc1e 1503 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1504 newkey, key->keylen);
cfb6eeb4
YH
1505 }
1506#endif
1507
9327f705 1508 __inet6_hash(newsk, NULL);
e56d8b8a 1509 __inet_inherit_port(sk, newsk);
1da177e4
LT
1510
1511 return newsk;
1512
1513out_overflow:
de0744af 1514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1515out:
de0744af 1516 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1517 if (opt && opt != np->opt)
1518 sock_kfree_s(sk, opt, opt->tot_len);
1519 dst_release(dst);
1520 return NULL;
1521}
1522
b51655b9 1523static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1524{
84fa7933 1525 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1526 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1527 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1528 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1529 return 0;
fb286bb2 1530 }
1da177e4 1531 }
fb286bb2 1532
684f2176 1533 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1534 &ipv6_hdr(skb)->saddr,
1535 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1536
1da177e4 1537 if (skb->len <= 76) {
fb286bb2 1538 return __skb_checksum_complete(skb);
1da177e4
LT
1539 }
1540 return 0;
1541}
1542
1543/* The socket must have it's spinlock held when we get
1544 * here.
1545 *
1546 * We have a potential double-lock case here, so even when
1547 * doing backlog processing we use the BH locking scheme.
1548 * This is because we cannot sleep with the original spinlock
1549 * held.
1550 */
1551static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1552{
1553 struct ipv6_pinfo *np = inet6_sk(sk);
1554 struct tcp_sock *tp;
1555 struct sk_buff *opt_skb = NULL;
1556
1557 /* Imagine: socket is IPv6. IPv4 packet arrives,
1558 goes to IPv4 receive handler and backlogged.
1559 From backlog it always goes here. Kerboom...
1560 Fortunately, tcp_rcv_established and rcv_established
1561 handle them correctly, but it is not case with
1562 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1563 */
1564
1565 if (skb->protocol == htons(ETH_P_IP))
1566 return tcp_v4_do_rcv(sk, skb);
1567
cfb6eeb4
YH
1568#ifdef CONFIG_TCP_MD5SIG
1569 if (tcp_v6_inbound_md5_hash (sk, skb))
1570 goto discard;
1571#endif
1572
fda9ef5d 1573 if (sk_filter(sk, skb))
1da177e4
LT
1574 goto discard;
1575
1576 /*
1577 * socket locking is here for SMP purposes as backlog rcv
1578 * is currently called with bh processing disabled.
1579 */
1580
1581 /* Do Stevens' IPV6_PKTOPTIONS.
1582
1583 Yes, guys, it is the only place in our code, where we
1584 may make it not affecting IPv4.
1585 The rest of code is protocol independent,
1586 and I do not like idea to uglify IPv4.
1587
1588 Actually, all the idea behind IPV6_PKTOPTIONS
1589 looks not very well thought. For now we latch
1590 options, received in the last packet, enqueued
1591 by tcp. Feel free to propose better solution.
1ab1457c 1592 --ANK (980728)
1da177e4
LT
1593 */
1594 if (np->rxopt.all)
1595 opt_skb = skb_clone(skb, GFP_ATOMIC);
1596
1597 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1598 TCP_CHECK_TIMER(sk);
aa8223c7 1599 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1600 goto reset;
1601 TCP_CHECK_TIMER(sk);
1602 if (opt_skb)
1603 goto ipv6_pktoptions;
1604 return 0;
1605 }
1606
ab6a5bb6 1607 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1608 goto csum_err;
1609
1ab1457c 1610 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1611 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1612 if (!nsk)
1613 goto discard;
1614
1615 /*
1616 * Queue it on the new socket if the new socket is active,
1617 * otherwise we just shortcircuit this and continue with
1618 * the new socket..
1619 */
1ab1457c 1620 if(nsk != sk) {
1da177e4
LT
1621 if (tcp_child_process(sk, nsk, skb))
1622 goto reset;
1623 if (opt_skb)
1624 __kfree_skb(opt_skb);
1625 return 0;
1626 }
1627 }
1628
1629 TCP_CHECK_TIMER(sk);
aa8223c7 1630 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1631 goto reset;
1632 TCP_CHECK_TIMER(sk);
1633 if (opt_skb)
1634 goto ipv6_pktoptions;
1635 return 0;
1636
1637reset:
cfb6eeb4 1638 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1639discard:
1640 if (opt_skb)
1641 __kfree_skb(opt_skb);
1642 kfree_skb(skb);
1643 return 0;
1644csum_err:
63231bdd 1645 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1646 goto discard;
1647
1648
1649ipv6_pktoptions:
1650 /* Do you ask, what is it?
1651
1652 1. skb was enqueued by tcp.
1653 2. skb is added to tail of read queue, rather than out of order.
1654 3. socket is not in passive state.
1655 4. Finally, it really contains options, which user wants to receive.
1656 */
1657 tp = tcp_sk(sk);
1658 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1659 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1660 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1661 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1662 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1663 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1664 if (ipv6_opt_accepted(sk, opt_skb)) {
1665 skb_set_owner_r(opt_skb, sk);
1666 opt_skb = xchg(&np->pktoptions, opt_skb);
1667 } else {
1668 __kfree_skb(opt_skb);
1669 opt_skb = xchg(&np->pktoptions, NULL);
1670 }
1671 }
1672
800d55f1 1673 kfree_skb(opt_skb);
1da177e4
LT
1674 return 0;
1675}
1676
e5bbef20 1677static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1678{
1ab1457c 1679 struct tcphdr *th;
1da177e4
LT
1680 struct sock *sk;
1681 int ret;
a86b1e30 1682 struct net *net = dev_net(skb->dev);
1da177e4
LT
1683
1684 if (skb->pkt_type != PACKET_HOST)
1685 goto discard_it;
1686
1687 /*
1688 * Count it even if it's bad.
1689 */
63231bdd 1690 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1691
1692 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1693 goto discard_it;
1694
aa8223c7 1695 th = tcp_hdr(skb);
1da177e4
LT
1696
1697 if (th->doff < sizeof(struct tcphdr)/4)
1698 goto bad_packet;
1699 if (!pskb_may_pull(skb, th->doff*4))
1700 goto discard_it;
1701
60476372 1702 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1703 goto bad_packet;
1704
aa8223c7 1705 th = tcp_hdr(skb);
1da177e4
LT
1706 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1707 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1708 skb->len - th->doff*4);
1709 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1710 TCP_SKB_CB(skb)->when = 0;
0660e03f 1711 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1712 TCP_SKB_CB(skb)->sacked = 0;
1713
9a1f27c4 1714 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1715 if (!sk)
1716 goto no_tcp_socket;
1717
1718process:
1719 if (sk->sk_state == TCP_TIME_WAIT)
1720 goto do_time_wait;
1721
1722 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1723 goto discard_and_relse;
1724
fda9ef5d 1725 if (sk_filter(sk, skb))
1da177e4
LT
1726 goto discard_and_relse;
1727
1728 skb->dev = NULL;
1729
293b9c42 1730 bh_lock_sock_nested(sk);
1da177e4
LT
1731 ret = 0;
1732 if (!sock_owned_by_user(sk)) {
1a2449a8 1733#ifdef CONFIG_NET_DMA
1ab1457c 1734 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1735 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1736 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1737 if (tp->ucopy.dma_chan)
1738 ret = tcp_v6_do_rcv(sk, skb);
1739 else
1a2449a8
CL
1740#endif
1741 {
1742 if (!tcp_prequeue(sk, skb))
1743 ret = tcp_v6_do_rcv(sk, skb);
1744 }
6cce09f8 1745 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1746 bh_unlock_sock(sk);
6cce09f8 1747 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1748 goto discard_and_relse;
1749 }
1da177e4
LT
1750 bh_unlock_sock(sk);
1751
1752 sock_put(sk);
1753 return ret ? -1 : 0;
1754
1755no_tcp_socket:
1756 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1757 goto discard_it;
1758
1759 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1760bad_packet:
63231bdd 1761 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1762 } else {
cfb6eeb4 1763 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1764 }
1765
1766discard_it:
1767
1768 /*
1769 * Discard frame
1770 */
1771
1772 kfree_skb(skb);
1773 return 0;
1774
1775discard_and_relse:
1776 sock_put(sk);
1777 goto discard_it;
1778
1779do_time_wait:
1780 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1781 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1782 goto discard_it;
1783 }
1784
1785 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1786 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1787 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1788 goto discard_it;
1789 }
1790
9469c7b4 1791 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1792 case TCP_TW_SYN:
1793 {
1794 struct sock *sk2;
1795
c346dca1 1796 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1797 &ipv6_hdr(skb)->daddr,
505cbfc5 1798 ntohs(th->dest), inet6_iif(skb));
1da177e4 1799 if (sk2 != NULL) {
295ff7ed
ACM
1800 struct inet_timewait_sock *tw = inet_twsk(sk);
1801 inet_twsk_deschedule(tw, &tcp_death_row);
1802 inet_twsk_put(tw);
1da177e4
LT
1803 sk = sk2;
1804 goto process;
1805 }
1806 /* Fall through to ACK */
1807 }
1808 case TCP_TW_ACK:
1809 tcp_v6_timewait_ack(sk, skb);
1810 break;
1811 case TCP_TW_RST:
1812 goto no_tcp_socket;
1813 case TCP_TW_SUCCESS:;
1814 }
1815 goto discard_it;
1816}
1817
1da177e4
LT
1818static int tcp_v6_remember_stamp(struct sock *sk)
1819{
1820 /* Alas, not yet... */
1821 return 0;
1822}
1823
3b401a81 1824static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1825 .queue_xmit = inet6_csk_xmit,
1826 .send_check = tcp_v6_send_check,
1827 .rebuild_header = inet6_sk_rebuild_header,
1828 .conn_request = tcp_v6_conn_request,
1829 .syn_recv_sock = tcp_v6_syn_recv_sock,
1830 .remember_stamp = tcp_v6_remember_stamp,
1831 .net_header_len = sizeof(struct ipv6hdr),
1832 .setsockopt = ipv6_setsockopt,
1833 .getsockopt = ipv6_getsockopt,
1834 .addr2sockaddr = inet6_csk_addr2sockaddr,
1835 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1836 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1837#ifdef CONFIG_COMPAT
543d9cfe
ACM
1838 .compat_setsockopt = compat_ipv6_setsockopt,
1839 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1840#endif
1da177e4
LT
1841};
1842
cfb6eeb4 1843#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1844static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1845 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1846 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1847 .md5_add = tcp_v6_md5_add_func,
1848 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1849};
a928630a 1850#endif
cfb6eeb4 1851
1da177e4
LT
1852/*
1853 * TCP over IPv4 via INET6 API
1854 */
1855
3b401a81 1856static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1857 .queue_xmit = ip_queue_xmit,
1858 .send_check = tcp_v4_send_check,
1859 .rebuild_header = inet_sk_rebuild_header,
1860 .conn_request = tcp_v6_conn_request,
1861 .syn_recv_sock = tcp_v6_syn_recv_sock,
1862 .remember_stamp = tcp_v4_remember_stamp,
1863 .net_header_len = sizeof(struct iphdr),
1864 .setsockopt = ipv6_setsockopt,
1865 .getsockopt = ipv6_getsockopt,
1866 .addr2sockaddr = inet6_csk_addr2sockaddr,
1867 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1868 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1869#ifdef CONFIG_COMPAT
543d9cfe
ACM
1870 .compat_setsockopt = compat_ipv6_setsockopt,
1871 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1872#endif
1da177e4
LT
1873};
1874
cfb6eeb4 1875#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1876static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1877 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1878 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1879 .md5_add = tcp_v6_md5_add_func,
1880 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1881};
a928630a 1882#endif
cfb6eeb4 1883
1da177e4
LT
1884/* NOTE: A lot of things set to zero explicitly by call to
1885 * sk_alloc() so need not be done here.
1886 */
1887static int tcp_v6_init_sock(struct sock *sk)
1888{
6687e988 1889 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1890 struct tcp_sock *tp = tcp_sk(sk);
1891
1892 skb_queue_head_init(&tp->out_of_order_queue);
1893 tcp_init_xmit_timers(sk);
1894 tcp_prequeue_init(tp);
1895
6687e988 1896 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1897 tp->mdev = TCP_TIMEOUT_INIT;
1898
1899 /* So many TCP implementations out there (incorrectly) count the
1900 * initial SYN frame in their delayed-ACK and congestion control
1901 * algorithms that we must have the following bandaid to talk
1902 * efficiently to them. -DaveM
1903 */
1904 tp->snd_cwnd = 2;
1905
1906 /* See draft-stevens-tcpca-spec-01 for discussion of the
1907 * initialization of these values.
1908 */
0b6a05c1 1909 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1910 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1911 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1912
1913 tp->reordering = sysctl_tcp_reordering;
1914
1915 sk->sk_state = TCP_CLOSE;
1916
8292a17a 1917 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1918 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1919 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1920 sk->sk_write_space = sk_stream_write_space;
1921 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1922
cfb6eeb4
YH
1923#ifdef CONFIG_TCP_MD5SIG
1924 tp->af_specific = &tcp_sock_ipv6_specific;
1925#endif
1926
435cf559
WAS
1927 /* TCP Cookie Transactions */
1928 if (sysctl_tcp_cookie_size > 0) {
1929 /* Default, cookies without s_data_payload. */
1930 tp->cookie_values =
1931 kzalloc(sizeof(*tp->cookie_values),
1932 sk->sk_allocation);
1933 if (tp->cookie_values != NULL)
1934 kref_init(&tp->cookie_values->kref);
1935 }
1936 /* Presumed zeroed, in order of appearance:
1937 * cookie_in_always, cookie_out_never,
1938 * s_data_constant, s_data_in, s_data_out
1939 */
1da177e4
LT
1940 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1941 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1942
eb4dea58 1943 local_bh_disable();
1748376b 1944 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1945 local_bh_enable();
1da177e4
LT
1946
1947 return 0;
1948}
1949
7d06b2e0 1950static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1951{
cfb6eeb4
YH
1952#ifdef CONFIG_TCP_MD5SIG
1953 /* Clean up the MD5 key list */
1954 if (tcp_sk(sk)->md5sig_info)
1955 tcp_v6_clear_md5_list(sk);
1956#endif
1da177e4 1957 tcp_v4_destroy_sock(sk);
7d06b2e0 1958 inet6_destroy_sock(sk);
1da177e4
LT
1959}
1960
952a10be 1961#ifdef CONFIG_PROC_FS
1da177e4 1962/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1963static void get_openreq6(struct seq_file *seq,
60236fdd 1964 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1965{
1da177e4 1966 int ttd = req->expires - jiffies;
ca304b61
ACM
1967 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1968 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1969
1970 if (ttd < 0)
1971 ttd = 0;
1972
1da177e4
LT
1973 seq_printf(seq,
1974 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1975 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1976 i,
1977 src->s6_addr32[0], src->s6_addr32[1],
1978 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1979 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1980 dest->s6_addr32[0], dest->s6_addr32[1],
1981 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1982 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1983 TCP_SYN_RECV,
1984 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1985 1, /* timers active (only the expire timer) */
1986 jiffies_to_clock_t(ttd),
1da177e4
LT
1987 req->retrans,
1988 uid,
1ab1457c 1989 0, /* non standard timer */
1da177e4
LT
1990 0, /* open_requests have no inode */
1991 0, req);
1992}
1993
1994static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1995{
1996 struct in6_addr *dest, *src;
1997 __u16 destp, srcp;
1998 int timer_active;
1999 unsigned long timer_expires;
2000 struct inet_sock *inet = inet_sk(sp);
2001 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 2002 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
2003 struct ipv6_pinfo *np = inet6_sk(sp);
2004
2005 dest = &np->daddr;
2006 src = &np->rcv_saddr;
c720c7e8
ED
2007 destp = ntohs(inet->inet_dport);
2008 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
2009
2010 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2011 timer_active = 1;
463c84b9
ACM
2012 timer_expires = icsk->icsk_timeout;
2013 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2014 timer_active = 4;
463c84b9 2015 timer_expires = icsk->icsk_timeout;
1da177e4
LT
2016 } else if (timer_pending(&sp->sk_timer)) {
2017 timer_active = 2;
2018 timer_expires = sp->sk_timer.expires;
2019 } else {
2020 timer_active = 0;
2021 timer_expires = jiffies;
2022 }
2023
2024 seq_printf(seq,
2025 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 2026 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
2027 i,
2028 src->s6_addr32[0], src->s6_addr32[1],
2029 src->s6_addr32[2], src->s6_addr32[3], srcp,
2030 dest->s6_addr32[0], dest->s6_addr32[1],
2031 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 2032 sp->sk_state,
47da8ee6
SS
2033 tp->write_seq-tp->snd_una,
2034 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2035 timer_active,
2036 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2037 icsk->icsk_retransmits,
1da177e4 2038 sock_i_uid(sp),
6687e988 2039 icsk->icsk_probes_out,
1da177e4
LT
2040 sock_i_ino(sp),
2041 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
2042 jiffies_to_clock_t(icsk->icsk_rto),
2043 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2044 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
2045 tp->snd_cwnd,
2046 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
2047 );
2048}
2049
1ab1457c 2050static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2051 struct inet_timewait_sock *tw, int i)
1da177e4
LT
2052{
2053 struct in6_addr *dest, *src;
2054 __u16 destp, srcp;
0fa1a53e 2055 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2056 int ttd = tw->tw_ttd - jiffies;
2057
2058 if (ttd < 0)
2059 ttd = 0;
2060
0fa1a53e
ACM
2061 dest = &tw6->tw_v6_daddr;
2062 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2063 destp = ntohs(tw->tw_dport);
2064 srcp = ntohs(tw->tw_sport);
2065
2066 seq_printf(seq,
2067 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2068 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2069 i,
2070 src->s6_addr32[0], src->s6_addr32[1],
2071 src->s6_addr32[2], src->s6_addr32[3], srcp,
2072 dest->s6_addr32[0], dest->s6_addr32[1],
2073 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2074 tw->tw_substate, 0, 0,
2075 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2076 atomic_read(&tw->tw_refcnt), tw);
2077}
2078
1da177e4
LT
2079static int tcp6_seq_show(struct seq_file *seq, void *v)
2080{
2081 struct tcp_iter_state *st;
2082
2083 if (v == SEQ_START_TOKEN) {
2084 seq_puts(seq,
2085 " sl "
2086 "local_address "
2087 "remote_address "
2088 "st tx_queue rx_queue tr tm->when retrnsmt"
2089 " uid timeout inode\n");
2090 goto out;
2091 }
2092 st = seq->private;
2093
2094 switch (st->state) {
2095 case TCP_SEQ_STATE_LISTENING:
2096 case TCP_SEQ_STATE_ESTABLISHED:
2097 get_tcp6_sock(seq, v, st->num);
2098 break;
2099 case TCP_SEQ_STATE_OPENREQ:
2100 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2101 break;
2102 case TCP_SEQ_STATE_TIME_WAIT:
2103 get_timewait6_sock(seq, v, st->num);
2104 break;
2105 }
2106out:
2107 return 0;
2108}
2109
1da177e4 2110static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2111 .name = "tcp6",
2112 .family = AF_INET6,
5f4472c5
DL
2113 .seq_fops = {
2114 .owner = THIS_MODULE,
2115 },
9427c4b3
DL
2116 .seq_ops = {
2117 .show = tcp6_seq_show,
2118 },
1da177e4
LT
2119};
2120
2c8c1e72 2121int __net_init tcp6_proc_init(struct net *net)
1da177e4 2122{
6f8b13bc 2123 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2124}
2125
6f8b13bc 2126void tcp6_proc_exit(struct net *net)
1da177e4 2127{
6f8b13bc 2128 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2129}
2130#endif
2131
2132struct proto tcpv6_prot = {
2133 .name = "TCPv6",
2134 .owner = THIS_MODULE,
2135 .close = tcp_close,
2136 .connect = tcp_v6_connect,
2137 .disconnect = tcp_disconnect,
463c84b9 2138 .accept = inet_csk_accept,
1da177e4
LT
2139 .ioctl = tcp_ioctl,
2140 .init = tcp_v6_init_sock,
2141 .destroy = tcp_v6_destroy_sock,
2142 .shutdown = tcp_shutdown,
2143 .setsockopt = tcp_setsockopt,
2144 .getsockopt = tcp_getsockopt,
1da177e4
LT
2145 .recvmsg = tcp_recvmsg,
2146 .backlog_rcv = tcp_v6_do_rcv,
2147 .hash = tcp_v6_hash,
ab1e0a13
ACM
2148 .unhash = inet_unhash,
2149 .get_port = inet_csk_get_port,
1da177e4
LT
2150 .enter_memory_pressure = tcp_enter_memory_pressure,
2151 .sockets_allocated = &tcp_sockets_allocated,
2152 .memory_allocated = &tcp_memory_allocated,
2153 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2154 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2155 .sysctl_mem = sysctl_tcp_mem,
2156 .sysctl_wmem = sysctl_tcp_wmem,
2157 .sysctl_rmem = sysctl_tcp_rmem,
2158 .max_header = MAX_TCP_HEADER,
2159 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2160 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2161 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2162 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2163 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2164#ifdef CONFIG_COMPAT
2165 .compat_setsockopt = compat_tcp_setsockopt,
2166 .compat_getsockopt = compat_tcp_getsockopt,
2167#endif
1da177e4
LT
2168};
2169
41135cc8 2170static const struct inet6_protocol tcpv6_protocol = {
1da177e4
LT
2171 .handler = tcp_v6_rcv,
2172 .err_handler = tcp_v6_err,
a430a43d 2173 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2174 .gso_segment = tcp_tso_segment,
684f2176
HX
2175 .gro_receive = tcp6_gro_receive,
2176 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2177 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2178};
2179
1da177e4
LT
2180static struct inet_protosw tcpv6_protosw = {
2181 .type = SOCK_STREAM,
2182 .protocol = IPPROTO_TCP,
2183 .prot = &tcpv6_prot,
2184 .ops = &inet6_stream_ops,
1da177e4 2185 .no_check = 0,
d83d8461
ACM
2186 .flags = INET_PROTOSW_PERMANENT |
2187 INET_PROTOSW_ICSK,
1da177e4
LT
2188};
2189
2c8c1e72 2190static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2191{
5677242f
DL
2192 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2193 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2194}
2195
2c8c1e72 2196static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2197{
5677242f 2198 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2199}
2200
2c8c1e72 2201static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2202{
2203 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2204}
2205
2206static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2207 .init = tcpv6_net_init,
2208 .exit = tcpv6_net_exit,
2209 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2210};
2211
7f4e4868 2212int __init tcpv6_init(void)
1da177e4 2213{
7f4e4868
DL
2214 int ret;
2215
2216 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2217 if (ret)
2218 goto out;
2219
1da177e4 2220 /* register inet6 protocol */
7f4e4868
DL
2221 ret = inet6_register_protosw(&tcpv6_protosw);
2222 if (ret)
2223 goto out_tcpv6_protocol;
2224
93ec926b 2225 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2226 if (ret)
2227 goto out_tcpv6_protosw;
2228out:
2229 return ret;
ae0f7d5f 2230
7f4e4868
DL
2231out_tcpv6_protocol:
2232 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2233out_tcpv6_protosw:
2234 inet6_unregister_protosw(&tcpv6_protosw);
2235 goto out;
2236}
2237
09f7709f 2238void tcpv6_exit(void)
7f4e4868 2239{
93ec926b 2240 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2241 inet6_unregister_protosw(&tcpv6_protosw);
2242 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2243}