]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/ipv6/tcp_ipv6.c
[SK_BUFF]: Introduce skb_network_header()
[net-next-2.6.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9  *
10  *      Based on:
11  *      linux/net/ipv4/tcp.c
12  *      linux/net/ipv4/tcp_input.c
13  *      linux/net/ipv4/tcp_output.c
14  *
15  *      Fixes:
16  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
17  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
18  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
19  *                                      a single port at the same time.
20  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
21  *
22  *      This program is free software; you can redistribute it and/or
23  *      modify it under the terms of the GNU General Public License
24  *      as published by the Free Software Foundation; either version
25  *      2 of the License, or (at your option) any later version.
26  */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/addrconf.h>
60 #include <net/snmp.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 /* Socket used for sending RSTs and ACKs */
73 static struct socket *tcp6_socket;
74
75 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void     tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77 static void     tcp_v6_send_check(struct sock *sk, int len,
78                                   struct sk_buff *skb);
79
80 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81
82 static struct inet_connection_sock_af_ops ipv6_mapped;
83 static struct inet_connection_sock_af_ops ipv6_specific;
84 #ifdef CONFIG_TCP_MD5SIG
85 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 #endif
88
89 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
90 {
91         return inet_csk_get_port(&tcp_hashinfo, sk, snum,
92                                  inet6_csk_bind_conflict);
93 }
94
95 static void tcp_v6_hash(struct sock *sk)
96 {
97         if (sk->sk_state != TCP_CLOSE) {
98                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
99                         tcp_prot.hash(sk);
100                         return;
101                 }
102                 local_bh_disable();
103                 __inet6_hash(&tcp_hashinfo, sk);
104                 local_bh_enable();
105         }
106 }
107
108 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
109                                    struct in6_addr *saddr,
110                                    struct in6_addr *daddr,
111                                    __wsum base)
112 {
113         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114 }
115
116 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
117 {
118         return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
119                                             skb->nh.ipv6h->saddr.s6_addr32,
120                                             skb->h.th->dest,
121                                             skb->h.th->source);
122 }
123
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125                           int addr_len)
126 {
127         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128         struct inet_sock *inet = inet_sk(sk);
129         struct inet_connection_sock *icsk = inet_csk(sk);
130         struct ipv6_pinfo *np = inet6_sk(sk);
131         struct tcp_sock *tp = tcp_sk(sk);
132         struct in6_addr *saddr = NULL, *final_p = NULL, final;
133         struct flowi fl;
134         struct dst_entry *dst;
135         int addr_type;
136         int err;
137
138         if (addr_len < SIN6_LEN_RFC2133)
139                 return -EINVAL;
140
141         if (usin->sin6_family != AF_INET6)
142                 return(-EAFNOSUPPORT);
143
144         memset(&fl, 0, sizeof(fl));
145
146         if (np->sndflow) {
147                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148                 IP6_ECN_flow_init(fl.fl6_flowlabel);
149                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150                         struct ip6_flowlabel *flowlabel;
151                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152                         if (flowlabel == NULL)
153                                 return -EINVAL;
154                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155                         fl6_sock_release(flowlabel);
156                 }
157         }
158
159         /*
160          *      connect() to INADDR_ANY means loopback (BSD'ism).
161          */
162
163         if(ipv6_addr_any(&usin->sin6_addr))
164                 usin->sin6_addr.s6_addr[15] = 0x1;
165
166         addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168         if(addr_type & IPV6_ADDR_MULTICAST)
169                 return -ENETUNREACH;
170
171         if (addr_type&IPV6_ADDR_LINKLOCAL) {
172                 if (addr_len >= sizeof(struct sockaddr_in6) &&
173                     usin->sin6_scope_id) {
174                         /* If interface is set while binding, indices
175                          * must coincide.
176                          */
177                         if (sk->sk_bound_dev_if &&
178                             sk->sk_bound_dev_if != usin->sin6_scope_id)
179                                 return -EINVAL;
180
181                         sk->sk_bound_dev_if = usin->sin6_scope_id;
182                 }
183
184                 /* Connect to link-local address requires an interface */
185                 if (!sk->sk_bound_dev_if)
186                         return -EINVAL;
187         }
188
189         if (tp->rx_opt.ts_recent_stamp &&
190             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191                 tp->rx_opt.ts_recent = 0;
192                 tp->rx_opt.ts_recent_stamp = 0;
193                 tp->write_seq = 0;
194         }
195
196         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197         np->flow_label = fl.fl6_flowlabel;
198
199         /*
200          *      TCP over IPv4
201          */
202
203         if (addr_type == IPV6_ADDR_MAPPED) {
204                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205                 struct sockaddr_in sin;
206
207                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209                 if (__ipv6_only_sock(sk))
210                         return -ENETUNREACH;
211
212                 sin.sin_family = AF_INET;
213                 sin.sin_port = usin->sin6_port;
214                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216                 icsk->icsk_af_ops = &ipv6_mapped;
217                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220 #endif
221
222                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224                 if (err) {
225                         icsk->icsk_ext_hdr_len = exthdrlen;
226                         icsk->icsk_af_ops = &ipv6_specific;
227                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229                         tp->af_specific = &tcp_sock_ipv6_specific;
230 #endif
231                         goto failure;
232                 } else {
233                         ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
234                                       inet->saddr);
235                         ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
236                                       inet->rcv_saddr);
237                 }
238
239                 return err;
240         }
241
242         if (!ipv6_addr_any(&np->rcv_saddr))
243                 saddr = &np->rcv_saddr;
244
245         fl.proto = IPPROTO_TCP;
246         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
247         ipv6_addr_copy(&fl.fl6_src,
248                        (saddr ? saddr : &np->saddr));
249         fl.oif = sk->sk_bound_dev_if;
250         fl.fl_ip_dport = usin->sin6_port;
251         fl.fl_ip_sport = inet->sport;
252
253         if (np->opt && np->opt->srcrt) {
254                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255                 ipv6_addr_copy(&final, &fl.fl6_dst);
256                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257                 final_p = &final;
258         }
259
260         security_sk_classify_flow(sk, &fl);
261
262         err = ip6_dst_lookup(sk, &dst, &fl);
263         if (err)
264                 goto failure;
265         if (final_p)
266                 ipv6_addr_copy(&fl.fl6_dst, final_p);
267
268         if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
269                 goto failure;
270
271         if (saddr == NULL) {
272                 saddr = &fl.fl6_src;
273                 ipv6_addr_copy(&np->rcv_saddr, saddr);
274         }
275
276         /* set the source address */
277         ipv6_addr_copy(&np->saddr, saddr);
278         inet->rcv_saddr = LOOPBACK4_IPV6;
279
280         sk->sk_gso_type = SKB_GSO_TCPV6;
281         __ip6_dst_store(sk, dst, NULL, NULL);
282
283         icsk->icsk_ext_hdr_len = 0;
284         if (np->opt)
285                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
286                                           np->opt->opt_nflen);
287
288         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289
290         inet->dport = usin->sin6_port;
291
292         tcp_set_state(sk, TCP_SYN_SENT);
293         err = inet6_hash_connect(&tcp_death_row, sk);
294         if (err)
295                 goto late_failure;
296
297         if (!tp->write_seq)
298                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299                                                              np->daddr.s6_addr32,
300                                                              inet->sport,
301                                                              inet->dport);
302
303         err = tcp_connect(sk);
304         if (err)
305                 goto late_failure;
306
307         return 0;
308
309 late_failure:
310         tcp_set_state(sk, TCP_CLOSE);
311         __sk_dst_reset(sk);
312 failure:
313         inet->dport = 0;
314         sk->sk_route_caps = 0;
315         return err;
316 }
317
318 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
319                 int type, int code, int offset, __be32 info)
320 {
321         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
322         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
323         struct ipv6_pinfo *np;
324         struct sock *sk;
325         int err;
326         struct tcp_sock *tp;
327         __u32 seq;
328
329         sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
330                           th->source, skb->dev->ifindex);
331
332         if (sk == NULL) {
333                 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
334                 return;
335         }
336
337         if (sk->sk_state == TCP_TIME_WAIT) {
338                 inet_twsk_put(inet_twsk(sk));
339                 return;
340         }
341
342         bh_lock_sock(sk);
343         if (sock_owned_by_user(sk))
344                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
345
346         if (sk->sk_state == TCP_CLOSE)
347                 goto out;
348
349         tp = tcp_sk(sk);
350         seq = ntohl(th->seq);
351         if (sk->sk_state != TCP_LISTEN &&
352             !between(seq, tp->snd_una, tp->snd_nxt)) {
353                 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
354                 goto out;
355         }
356
357         np = inet6_sk(sk);
358
359         if (type == ICMPV6_PKT_TOOBIG) {
360                 struct dst_entry *dst = NULL;
361
362                 if (sock_owned_by_user(sk))
363                         goto out;
364                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
365                         goto out;
366
367                 /* icmp should have updated the destination cache entry */
368                 dst = __sk_dst_check(sk, np->dst_cookie);
369
370                 if (dst == NULL) {
371                         struct inet_sock *inet = inet_sk(sk);
372                         struct flowi fl;
373
374                         /* BUGGG_FUTURE: Again, it is not clear how
375                            to handle rthdr case. Ignore this complexity
376                            for now.
377                          */
378                         memset(&fl, 0, sizeof(fl));
379                         fl.proto = IPPROTO_TCP;
380                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
381                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
382                         fl.oif = sk->sk_bound_dev_if;
383                         fl.fl_ip_dport = inet->dport;
384                         fl.fl_ip_sport = inet->sport;
385                         security_skb_classify_flow(skb, &fl);
386
387                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
388                                 sk->sk_err_soft = -err;
389                                 goto out;
390                         }
391
392                         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
393                                 sk->sk_err_soft = -err;
394                                 goto out;
395                         }
396
397                 } else
398                         dst_hold(dst);
399
400                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
401                         tcp_sync_mss(sk, dst_mtu(dst));
402                         tcp_simple_retransmit(sk);
403                 } /* else let the usual retransmit timer handle it */
404                 dst_release(dst);
405                 goto out;
406         }
407
408         icmpv6_err_convert(type, code, &err);
409
410         /* Might be for an request_sock */
411         switch (sk->sk_state) {
412                 struct request_sock *req, **prev;
413         case TCP_LISTEN:
414                 if (sock_owned_by_user(sk))
415                         goto out;
416
417                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
418                                            &hdr->saddr, inet6_iif(skb));
419                 if (!req)
420                         goto out;
421
422                 /* ICMPs are not backlogged, hence we cannot get
423                  * an established socket here.
424                  */
425                 BUG_TRAP(req->sk == NULL);
426
427                 if (seq != tcp_rsk(req)->snt_isn) {
428                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
429                         goto out;
430                 }
431
432                 inet_csk_reqsk_queue_drop(sk, req, prev);
433                 goto out;
434
435         case TCP_SYN_SENT:
436         case TCP_SYN_RECV:  /* Cannot happen.
437                                It can, it SYNs are crossed. --ANK */
438                 if (!sock_owned_by_user(sk)) {
439                         sk->sk_err = err;
440                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
441
442                         tcp_done(sk);
443                 } else
444                         sk->sk_err_soft = err;
445                 goto out;
446         }
447
448         if (!sock_owned_by_user(sk) && np->recverr) {
449                 sk->sk_err = err;
450                 sk->sk_error_report(sk);
451         } else
452                 sk->sk_err_soft = err;
453
454 out:
455         bh_unlock_sock(sk);
456         sock_put(sk);
457 }
458
459
460 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
461                               struct dst_entry *dst)
462 {
463         struct inet6_request_sock *treq = inet6_rsk(req);
464         struct ipv6_pinfo *np = inet6_sk(sk);
465         struct sk_buff * skb;
466         struct ipv6_txoptions *opt = NULL;
467         struct in6_addr * final_p = NULL, final;
468         struct flowi fl;
469         int err = -1;
470
471         memset(&fl, 0, sizeof(fl));
472         fl.proto = IPPROTO_TCP;
473         ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
474         ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
475         fl.fl6_flowlabel = 0;
476         fl.oif = treq->iif;
477         fl.fl_ip_dport = inet_rsk(req)->rmt_port;
478         fl.fl_ip_sport = inet_sk(sk)->sport;
479         security_req_classify_flow(req, &fl);
480
481         if (dst == NULL) {
482                 opt = np->opt;
483                 if (opt == NULL &&
484                     np->rxopt.bits.osrcrt == 2 &&
485                     treq->pktopts) {
486                         struct sk_buff *pktopts = treq->pktopts;
487                         struct inet6_skb_parm *rxopt = IP6CB(pktopts);
488                         if (rxopt->srcrt)
489                                 opt = ipv6_invert_rthdr(sk,
490                           (struct ipv6_rt_hdr *)(skb_network_header(pktopts) +
491                                                  rxopt->srcrt));
492                 }
493
494                 if (opt && opt->srcrt) {
495                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
496                         ipv6_addr_copy(&final, &fl.fl6_dst);
497                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
498                         final_p = &final;
499                 }
500
501                 err = ip6_dst_lookup(sk, &dst, &fl);
502                 if (err)
503                         goto done;
504                 if (final_p)
505                         ipv6_addr_copy(&fl.fl6_dst, final_p);
506                 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
507                         goto done;
508         }
509
510         skb = tcp_make_synack(sk, dst, req);
511         if (skb) {
512                 struct tcphdr *th = skb->h.th;
513
514                 th->check = tcp_v6_check(th, skb->len,
515                                          &treq->loc_addr, &treq->rmt_addr,
516                                          csum_partial((char *)th, skb->len, skb->csum));
517
518                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
519                 err = ip6_xmit(sk, skb, &fl, opt, 0);
520                 err = net_xmit_eval(err);
521         }
522
523 done:
524         if (opt && opt != np->opt)
525                 sock_kfree_s(sk, opt, opt->tot_len);
526         dst_release(dst);
527         return err;
528 }
529
530 static void tcp_v6_reqsk_destructor(struct request_sock *req)
531 {
532         if (inet6_rsk(req)->pktopts)
533                 kfree_skb(inet6_rsk(req)->pktopts);
534 }
535
536 #ifdef CONFIG_TCP_MD5SIG
537 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
538                                                    struct in6_addr *addr)
539 {
540         struct tcp_sock *tp = tcp_sk(sk);
541         int i;
542
543         BUG_ON(tp == NULL);
544
545         if (!tp->md5sig_info || !tp->md5sig_info->entries6)
546                 return NULL;
547
548         for (i = 0; i < tp->md5sig_info->entries6; i++) {
549                 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
550                         return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
551         }
552         return NULL;
553 }
554
555 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
556                                                 struct sock *addr_sk)
557 {
558         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
559 }
560
561 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
562                                                       struct request_sock *req)
563 {
564         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
565 }
566
567 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
568                              char *newkey, u8 newkeylen)
569 {
570         /* Add key to the list */
571         struct tcp6_md5sig_key *key;
572         struct tcp_sock *tp = tcp_sk(sk);
573         struct tcp6_md5sig_key *keys;
574
575         key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
576         if (key) {
577                 /* modify existing entry - just update that one */
578                 kfree(key->key);
579                 key->key = newkey;
580                 key->keylen = newkeylen;
581         } else {
582                 /* reallocate new list if current one is full. */
583                 if (!tp->md5sig_info) {
584                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
585                         if (!tp->md5sig_info) {
586                                 kfree(newkey);
587                                 return -ENOMEM;
588                         }
589                 }
590                 tcp_alloc_md5sig_pool();
591                 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
592                         keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
593                                        (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
594
595                         if (!keys) {
596                                 tcp_free_md5sig_pool();
597                                 kfree(newkey);
598                                 return -ENOMEM;
599                         }
600
601                         if (tp->md5sig_info->entries6)
602                                 memmove(keys, tp->md5sig_info->keys6,
603                                         (sizeof (tp->md5sig_info->keys6[0]) *
604                                          tp->md5sig_info->entries6));
605
606                         kfree(tp->md5sig_info->keys6);
607                         tp->md5sig_info->keys6 = keys;
608                         tp->md5sig_info->alloced6++;
609                 }
610
611                 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
612                                peer);
613                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
614                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
615
616                 tp->md5sig_info->entries6++;
617         }
618         return 0;
619 }
620
621 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
622                                u8 *newkey, __u8 newkeylen)
623 {
624         return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
625                                  newkey, newkeylen);
626 }
627
628 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
629 {
630         struct tcp_sock *tp = tcp_sk(sk);
631         int i;
632
633         for (i = 0; i < tp->md5sig_info->entries6; i++) {
634                 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
635                         /* Free the key */
636                         kfree(tp->md5sig_info->keys6[i].key);
637                         tp->md5sig_info->entries6--;
638
639                         if (tp->md5sig_info->entries6 == 0) {
640                                 kfree(tp->md5sig_info->keys6);
641                                 tp->md5sig_info->keys6 = NULL;
642
643                                 tcp_free_md5sig_pool();
644
645                                 return 0;
646                         } else {
647                                 /* shrink the database */
648                                 if (tp->md5sig_info->entries6 != i)
649                                         memmove(&tp->md5sig_info->keys6[i],
650                                                 &tp->md5sig_info->keys6[i+1],
651                                                 (tp->md5sig_info->entries6 - i)
652                                                 * sizeof (tp->md5sig_info->keys6[0]));
653                         }
654                 }
655         }
656         return -ENOENT;
657 }
658
659 static void tcp_v6_clear_md5_list (struct sock *sk)
660 {
661         struct tcp_sock *tp = tcp_sk(sk);
662         int i;
663
664         if (tp->md5sig_info->entries6) {
665                 for (i = 0; i < tp->md5sig_info->entries6; i++)
666                         kfree(tp->md5sig_info->keys6[i].key);
667                 tp->md5sig_info->entries6 = 0;
668                 tcp_free_md5sig_pool();
669         }
670
671         kfree(tp->md5sig_info->keys6);
672         tp->md5sig_info->keys6 = NULL;
673         tp->md5sig_info->alloced6 = 0;
674
675         if (tp->md5sig_info->entries4) {
676                 for (i = 0; i < tp->md5sig_info->entries4; i++)
677                         kfree(tp->md5sig_info->keys4[i].key);
678                 tp->md5sig_info->entries4 = 0;
679                 tcp_free_md5sig_pool();
680         }
681
682         kfree(tp->md5sig_info->keys4);
683         tp->md5sig_info->keys4 = NULL;
684         tp->md5sig_info->alloced4 = 0;
685 }
686
687 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
688                                   int optlen)
689 {
690         struct tcp_md5sig cmd;
691         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
692         u8 *newkey;
693
694         if (optlen < sizeof(cmd))
695                 return -EINVAL;
696
697         if (copy_from_user(&cmd, optval, sizeof(cmd)))
698                 return -EFAULT;
699
700         if (sin6->sin6_family != AF_INET6)
701                 return -EINVAL;
702
703         if (!cmd.tcpm_keylen) {
704                 if (!tcp_sk(sk)->md5sig_info)
705                         return -ENOENT;
706                 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
707                         return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
708                 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
709         }
710
711         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
712                 return -EINVAL;
713
714         if (!tcp_sk(sk)->md5sig_info) {
715                 struct tcp_sock *tp = tcp_sk(sk);
716                 struct tcp_md5sig_info *p;
717
718                 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
719                 if (!p)
720                         return -ENOMEM;
721
722                 tp->md5sig_info = p;
723         }
724
725         newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
726         if (!newkey)
727                 return -ENOMEM;
728         if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
729                 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
730                                          newkey, cmd.tcpm_keylen);
731         }
732         return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
733 }
734
735 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
736                                    struct in6_addr *saddr,
737                                    struct in6_addr *daddr,
738                                    struct tcphdr *th, int protocol,
739                                    int tcplen)
740 {
741         struct scatterlist sg[4];
742         __u16 data_len;
743         int block = 0;
744         __sum16 cksum;
745         struct tcp_md5sig_pool *hp;
746         struct tcp6_pseudohdr *bp;
747         struct hash_desc *desc;
748         int err;
749         unsigned int nbytes = 0;
750
751         hp = tcp_get_md5sig_pool();
752         if (!hp) {
753                 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
754                 goto clear_hash_noput;
755         }
756         bp = &hp->md5_blk.ip6;
757         desc = &hp->md5_desc;
758
759         /* 1. TCP pseudo-header (RFC2460) */
760         ipv6_addr_copy(&bp->saddr, saddr);
761         ipv6_addr_copy(&bp->daddr, daddr);
762         bp->len = htonl(tcplen);
763         bp->protocol = htonl(protocol);
764
765         sg_set_buf(&sg[block++], bp, sizeof(*bp));
766         nbytes += sizeof(*bp);
767
768         /* 2. TCP header, excluding options */
769         cksum = th->check;
770         th->check = 0;
771         sg_set_buf(&sg[block++], th, sizeof(*th));
772         nbytes += sizeof(*th);
773
774         /* 3. TCP segment data (if any) */
775         data_len = tcplen - (th->doff << 2);
776         if (data_len > 0) {
777                 u8 *data = (u8 *)th + (th->doff << 2);
778                 sg_set_buf(&sg[block++], data, data_len);
779                 nbytes += data_len;
780         }
781
782         /* 4. shared key */
783         sg_set_buf(&sg[block++], key->key, key->keylen);
784         nbytes += key->keylen;
785
786         /* Now store the hash into the packet */
787         err = crypto_hash_init(desc);
788         if (err) {
789                 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
790                 goto clear_hash;
791         }
792         err = crypto_hash_update(desc, sg, nbytes);
793         if (err) {
794                 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
795                 goto clear_hash;
796         }
797         err = crypto_hash_final(desc, md5_hash);
798         if (err) {
799                 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
800                 goto clear_hash;
801         }
802
803         /* Reset header, and free up the crypto */
804         tcp_put_md5sig_pool();
805         th->check = cksum;
806 out:
807         return 0;
808 clear_hash:
809         tcp_put_md5sig_pool();
810 clear_hash_noput:
811         memset(md5_hash, 0, 16);
812         goto out;
813 }
814
815 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
816                                 struct sock *sk,
817                                 struct dst_entry *dst,
818                                 struct request_sock *req,
819                                 struct tcphdr *th, int protocol,
820                                 int tcplen)
821 {
822         struct in6_addr *saddr, *daddr;
823
824         if (sk) {
825                 saddr = &inet6_sk(sk)->saddr;
826                 daddr = &inet6_sk(sk)->daddr;
827         } else {
828                 saddr = &inet6_rsk(req)->loc_addr;
829                 daddr = &inet6_rsk(req)->rmt_addr;
830         }
831         return tcp_v6_do_calc_md5_hash(md5_hash, key,
832                                        saddr, daddr,
833                                        th, protocol, tcplen);
834 }
835
836 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
837 {
838         __u8 *hash_location = NULL;
839         struct tcp_md5sig_key *hash_expected;
840         struct ipv6hdr *ip6h = skb->nh.ipv6h;
841         struct tcphdr *th = skb->h.th;
842         int length = (th->doff << 2) - sizeof (*th);
843         int genhash;
844         u8 *ptr;
845         u8 newhash[16];
846
847         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
848
849         /* If the TCP option is too short, we can short cut */
850         if (length < TCPOLEN_MD5SIG)
851                 return hash_expected ? 1 : 0;
852
853         /* parse options */
854         ptr = (u8*)(th + 1);
855         while (length > 0) {
856                 int opcode = *ptr++;
857                 int opsize;
858
859                 switch(opcode) {
860                 case TCPOPT_EOL:
861                         goto done_opts;
862                 case TCPOPT_NOP:
863                         length--;
864                         continue;
865                 default:
866                         opsize = *ptr++;
867                         if (opsize < 2 || opsize > length)
868                                 goto done_opts;
869                         if (opcode == TCPOPT_MD5SIG) {
870                                 hash_location = ptr;
871                                 goto done_opts;
872                         }
873                 }
874                 ptr += opsize - 2;
875                 length -= opsize;
876         }
877
878 done_opts:
879         /* do we have a hash as expected? */
880         if (!hash_expected) {
881                 if (!hash_location)
882                         return 0;
883                 if (net_ratelimit()) {
884                         printk(KERN_INFO "MD5 Hash NOT expected but found "
885                                "(" NIP6_FMT ", %u)->"
886                                "(" NIP6_FMT ", %u)\n",
887                                NIP6(ip6h->saddr), ntohs(th->source),
888                                NIP6(ip6h->daddr), ntohs(th->dest));
889                 }
890                 return 1;
891         }
892
893         if (!hash_location) {
894                 if (net_ratelimit()) {
895                         printk(KERN_INFO "MD5 Hash expected but NOT found "
896                                "(" NIP6_FMT ", %u)->"
897                                "(" NIP6_FMT ", %u)\n",
898                                NIP6(ip6h->saddr), ntohs(th->source),
899                                NIP6(ip6h->daddr), ntohs(th->dest));
900                 }
901                 return 1;
902         }
903
904         /* check the signature */
905         genhash = tcp_v6_do_calc_md5_hash(newhash,
906                                           hash_expected,
907                                           &ip6h->saddr, &ip6h->daddr,
908                                           th, sk->sk_protocol,
909                                           skb->len);
910         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
911                 if (net_ratelimit()) {
912                         printk(KERN_INFO "MD5 Hash %s for "
913                                "(" NIP6_FMT ", %u)->"
914                                "(" NIP6_FMT ", %u)\n",
915                                genhash ? "failed" : "mismatch",
916                                NIP6(ip6h->saddr), ntohs(th->source),
917                                NIP6(ip6h->daddr), ntohs(th->dest));
918                 }
919                 return 1;
920         }
921         return 0;
922 }
923 #endif
924
925 static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
926         .family         =       AF_INET6,
927         .obj_size       =       sizeof(struct tcp6_request_sock),
928         .rtx_syn_ack    =       tcp_v6_send_synack,
929         .send_ack       =       tcp_v6_reqsk_send_ack,
930         .destructor     =       tcp_v6_reqsk_destructor,
931         .send_reset     =       tcp_v6_send_reset
932 };
933
934 #ifdef CONFIG_TCP_MD5SIG
935 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
936         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
937 };
938 #endif
939
940 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
941         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
942         .twsk_unique    = tcp_twsk_unique,
943         .twsk_destructor= tcp_twsk_destructor,
944 };
945
946 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
947 {
948         struct ipv6_pinfo *np = inet6_sk(sk);
949         struct tcphdr *th = skb->h.th;
950
951         if (skb->ip_summed == CHECKSUM_PARTIAL) {
952                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
953                 skb->csum_offset = offsetof(struct tcphdr, check);
954         } else {
955                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
956                                             csum_partial((char *)th, th->doff<<2,
957                                                          skb->csum));
958         }
959 }
960
961 static int tcp_v6_gso_send_check(struct sk_buff *skb)
962 {
963         struct ipv6hdr *ipv6h;
964         struct tcphdr *th;
965
966         if (!pskb_may_pull(skb, sizeof(*th)))
967                 return -EINVAL;
968
969         ipv6h = skb->nh.ipv6h;
970         th = skb->h.th;
971
972         th->check = 0;
973         th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
974                                      IPPROTO_TCP, 0);
975         skb->csum_offset = offsetof(struct tcphdr, check);
976         skb->ip_summed = CHECKSUM_PARTIAL;
977         return 0;
978 }
979
980 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
981 {
982         struct tcphdr *th = skb->h.th, *t1;
983         struct sk_buff *buff;
984         struct flowi fl;
985         int tot_len = sizeof(*th);
986 #ifdef CONFIG_TCP_MD5SIG
987         struct tcp_md5sig_key *key;
988 #endif
989
990         if (th->rst)
991                 return;
992
993         if (!ipv6_unicast_destination(skb))
994                 return;
995
996 #ifdef CONFIG_TCP_MD5SIG
997         if (sk)
998                 key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
999         else
1000                 key = NULL;
1001
1002         if (key)
1003                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1004 #endif
1005
1006         /*
1007          * We need to grab some memory, and put together an RST,
1008          * and then put it into the queue to be sent.
1009          */
1010
1011         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1012                          GFP_ATOMIC);
1013         if (buff == NULL)
1014                 return;
1015
1016         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1017
1018         t1 = (struct tcphdr *) skb_push(buff, tot_len);
1019
1020         /* Swap the send and the receive. */
1021         memset(t1, 0, sizeof(*t1));
1022         t1->dest = th->source;
1023         t1->source = th->dest;
1024         t1->doff = tot_len / 4;
1025         t1->rst = 1;
1026
1027         if(th->ack) {
1028                 t1->seq = th->ack_seq;
1029         } else {
1030                 t1->ack = 1;
1031                 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1032                                     + skb->len - (th->doff<<2));
1033         }
1034
1035 #ifdef CONFIG_TCP_MD5SIG
1036         if (key) {
1037                 __be32 *opt = (__be32*)(t1 + 1);
1038                 opt[0] = htonl((TCPOPT_NOP << 24) |
1039                                (TCPOPT_NOP << 16) |
1040                                (TCPOPT_MD5SIG << 8) |
1041                                TCPOLEN_MD5SIG);
1042                 tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
1043                                         key,
1044                                         &skb->nh.ipv6h->daddr,
1045                                         &skb->nh.ipv6h->saddr,
1046                                         t1, IPPROTO_TCP,
1047                                         tot_len);
1048         }
1049 #endif
1050
1051         buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1052
1053         memset(&fl, 0, sizeof(fl));
1054         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1055         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1056
1057         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1058                                     sizeof(*t1), IPPROTO_TCP,
1059                                     buff->csum);
1060
1061         fl.proto = IPPROTO_TCP;
1062         fl.oif = inet6_iif(skb);
1063         fl.fl_ip_dport = t1->dest;
1064         fl.fl_ip_sport = t1->source;
1065         security_skb_classify_flow(skb, &fl);
1066
1067         /* sk = NULL, but it is safe for now. RST socket required. */
1068         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1069
1070                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1071                         ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1072                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1073                         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1074                         return;
1075                 }
1076         }
1077
1078         kfree_skb(buff);
1079 }
1080
1081 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1082                             struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1083 {
1084         struct tcphdr *th = skb->h.th, *t1;
1085         struct sk_buff *buff;
1086         struct flowi fl;
1087         int tot_len = sizeof(struct tcphdr);
1088         __be32 *topt;
1089 #ifdef CONFIG_TCP_MD5SIG
1090         struct tcp_md5sig_key *key;
1091         struct tcp_md5sig_key tw_key;
1092 #endif
1093
1094 #ifdef CONFIG_TCP_MD5SIG
1095         if (!tw && skb->sk) {
1096                 key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
1097         } else if (tw && tw->tw_md5_keylen) {
1098                 tw_key.key = tw->tw_md5_key;
1099                 tw_key.keylen = tw->tw_md5_keylen;
1100                 key = &tw_key;
1101         } else {
1102                 key = NULL;
1103         }
1104 #endif
1105
1106         if (ts)
1107                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1108 #ifdef CONFIG_TCP_MD5SIG
1109         if (key)
1110                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1111 #endif
1112
1113         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1114                          GFP_ATOMIC);
1115         if (buff == NULL)
1116                 return;
1117
1118         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1119
1120         t1 = (struct tcphdr *) skb_push(buff,tot_len);
1121
1122         /* Swap the send and the receive. */
1123         memset(t1, 0, sizeof(*t1));
1124         t1->dest = th->source;
1125         t1->source = th->dest;
1126         t1->doff = tot_len/4;
1127         t1->seq = htonl(seq);
1128         t1->ack_seq = htonl(ack);
1129         t1->ack = 1;
1130         t1->window = htons(win);
1131
1132         topt = (__be32 *)(t1 + 1);
1133
1134         if (ts) {
1135                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1136                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1137                 *topt++ = htonl(tcp_time_stamp);
1138                 *topt = htonl(ts);
1139         }
1140
1141 #ifdef CONFIG_TCP_MD5SIG
1142         if (key) {
1143                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1144                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1145                 tcp_v6_do_calc_md5_hash((__u8 *)topt,
1146                                         key,
1147                                         &skb->nh.ipv6h->daddr,
1148                                         &skb->nh.ipv6h->saddr,
1149                                         t1, IPPROTO_TCP,
1150                                         tot_len);
1151         }
1152 #endif
1153
1154         buff->csum = csum_partial((char *)t1, tot_len, 0);
1155
1156         memset(&fl, 0, sizeof(fl));
1157         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1158         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1159
1160         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1161                                     tot_len, IPPROTO_TCP,
1162                                     buff->csum);
1163
1164         fl.proto = IPPROTO_TCP;
1165         fl.oif = inet6_iif(skb);
1166         fl.fl_ip_dport = t1->dest;
1167         fl.fl_ip_sport = t1->source;
1168         security_skb_classify_flow(skb, &fl);
1169
1170         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1171                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1172                         ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1173                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1174                         return;
1175                 }
1176         }
1177
1178         kfree_skb(buff);
1179 }
1180
1181 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1182 {
1183         struct inet_timewait_sock *tw = inet_twsk(sk);
1184         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1185
1186         tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1187                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1188                         tcptw->tw_ts_recent);
1189
1190         inet_twsk_put(tw);
1191 }
1192
1193 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1194 {
1195         tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1196 }
1197
1198
1199 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1200 {
1201         struct request_sock *req, **prev;
1202         const struct tcphdr *th = skb->h.th;
1203         struct sock *nsk;
1204
1205         /* Find possible connection requests. */
1206         req = inet6_csk_search_req(sk, &prev, th->source,
1207                                    &skb->nh.ipv6h->saddr,
1208                                    &skb->nh.ipv6h->daddr, inet6_iif(skb));
1209         if (req)
1210                 return tcp_check_req(sk, skb, req, prev);
1211
1212         nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
1213                                          th->source, &skb->nh.ipv6h->daddr,
1214                                          ntohs(th->dest), inet6_iif(skb));
1215
1216         if (nsk) {
1217                 if (nsk->sk_state != TCP_TIME_WAIT) {
1218                         bh_lock_sock(nsk);
1219                         return nsk;
1220                 }
1221                 inet_twsk_put(inet_twsk(nsk));
1222                 return NULL;
1223         }
1224
1225 #if 0 /*def CONFIG_SYN_COOKIES*/
1226         if (!th->rst && !th->syn && th->ack)
1227                 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1228 #endif
1229         return sk;
1230 }
1231
1232 /* FIXME: this is substantially similar to the ipv4 code.
1233  * Can some kind of merge be done? -- erics
1234  */
1235 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1236 {
1237         struct inet6_request_sock *treq;
1238         struct ipv6_pinfo *np = inet6_sk(sk);
1239         struct tcp_options_received tmp_opt;
1240         struct tcp_sock *tp = tcp_sk(sk);
1241         struct request_sock *req = NULL;
1242         __u32 isn = TCP_SKB_CB(skb)->when;
1243
1244         if (skb->protocol == htons(ETH_P_IP))
1245                 return tcp_v4_conn_request(sk, skb);
1246
1247         if (!ipv6_unicast_destination(skb))
1248                 goto drop;
1249
1250         /*
1251          *      There are no SYN attacks on IPv6, yet...
1252          */
1253         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1254                 if (net_ratelimit())
1255                         printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1256                 goto drop;
1257         }
1258
1259         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1260                 goto drop;
1261
1262         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1263         if (req == NULL)
1264                 goto drop;
1265
1266 #ifdef CONFIG_TCP_MD5SIG
1267         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1268 #endif
1269
1270         tcp_clear_options(&tmp_opt);
1271         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1272         tmp_opt.user_mss = tp->rx_opt.user_mss;
1273
1274         tcp_parse_options(skb, &tmp_opt, 0);
1275
1276         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1277         tcp_openreq_init(req, &tmp_opt, skb);
1278
1279         treq = inet6_rsk(req);
1280         ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
1281         ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1282         TCP_ECN_create_request(req, skb->h.th);
1283         treq->pktopts = NULL;
1284         if (ipv6_opt_accepted(sk, skb) ||
1285             np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1286             np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1287                 atomic_inc(&skb->users);
1288                 treq->pktopts = skb;
1289         }
1290         treq->iif = sk->sk_bound_dev_if;
1291
1292         /* So that link locals have meaning */
1293         if (!sk->sk_bound_dev_if &&
1294             ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1295                 treq->iif = inet6_iif(skb);
1296
1297         if (isn == 0)
1298                 isn = tcp_v6_init_sequence(skb);
1299
1300         tcp_rsk(req)->snt_isn = isn;
1301
1302         security_inet_conn_request(sk, skb, req);
1303
1304         if (tcp_v6_send_synack(sk, req, NULL))
1305                 goto drop;
1306
1307         inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1308         return 0;
1309
1310 drop:
1311         if (req)
1312                 reqsk_free(req);
1313
1314         return 0; /* don't send reset */
1315 }
1316
1317 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1318                                           struct request_sock *req,
1319                                           struct dst_entry *dst)
1320 {
1321         struct inet6_request_sock *treq = inet6_rsk(req);
1322         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1323         struct tcp6_sock *newtcp6sk;
1324         struct inet_sock *newinet;
1325         struct tcp_sock *newtp;
1326         struct sock *newsk;
1327         struct ipv6_txoptions *opt;
1328 #ifdef CONFIG_TCP_MD5SIG
1329         struct tcp_md5sig_key *key;
1330 #endif
1331
1332         if (skb->protocol == htons(ETH_P_IP)) {
1333                 /*
1334                  *      v6 mapped
1335                  */
1336
1337                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1338
1339                 if (newsk == NULL)
1340                         return NULL;
1341
1342                 newtcp6sk = (struct tcp6_sock *)newsk;
1343                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1344
1345                 newinet = inet_sk(newsk);
1346                 newnp = inet6_sk(newsk);
1347                 newtp = tcp_sk(newsk);
1348
1349                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1350
1351                 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1352                               newinet->daddr);
1353
1354                 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1355                               newinet->saddr);
1356
1357                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1358
1359                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1360                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1361 #ifdef CONFIG_TCP_MD5SIG
1362                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1363 #endif
1364
1365                 newnp->pktoptions  = NULL;
1366                 newnp->opt         = NULL;
1367                 newnp->mcast_oif   = inet6_iif(skb);
1368                 newnp->mcast_hops  = skb->nh.ipv6h->hop_limit;
1369
1370                 /*
1371                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1372                  * here, tcp_create_openreq_child now does this for us, see the comment in
1373                  * that function for the gory details. -acme
1374                  */
1375
1376                 /* It is tricky place. Until this moment IPv4 tcp
1377                    worked with IPv6 icsk.icsk_af_ops.
1378                    Sync it now.
1379                  */
1380                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1381
1382                 return newsk;
1383         }
1384
1385         opt = np->opt;
1386
1387         if (sk_acceptq_is_full(sk))
1388                 goto out_overflow;
1389
1390         if (np->rxopt.bits.osrcrt == 2 &&
1391             opt == NULL && treq->pktopts) {
1392                 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1393                 if (rxopt->srcrt)
1394                         opt = ipv6_invert_rthdr(sk,
1395                    (struct ipv6_rt_hdr *)(skb_network_header(treq->pktopts) +
1396                                           rxopt->srcrt));
1397         }
1398
1399         if (dst == NULL) {
1400                 struct in6_addr *final_p = NULL, final;
1401                 struct flowi fl;
1402
1403                 memset(&fl, 0, sizeof(fl));
1404                 fl.proto = IPPROTO_TCP;
1405                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1406                 if (opt && opt->srcrt) {
1407                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1408                         ipv6_addr_copy(&final, &fl.fl6_dst);
1409                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1410                         final_p = &final;
1411                 }
1412                 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1413                 fl.oif = sk->sk_bound_dev_if;
1414                 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1415                 fl.fl_ip_sport = inet_sk(sk)->sport;
1416                 security_req_classify_flow(req, &fl);
1417
1418                 if (ip6_dst_lookup(sk, &dst, &fl))
1419                         goto out;
1420
1421                 if (final_p)
1422                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1423
1424                 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1425                         goto out;
1426         }
1427
1428         newsk = tcp_create_openreq_child(sk, req, skb);
1429         if (newsk == NULL)
1430                 goto out;
1431
1432         /*
1433          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1434          * count here, tcp_create_openreq_child now does this for us, see the
1435          * comment in that function for the gory details. -acme
1436          */
1437
1438         newsk->sk_gso_type = SKB_GSO_TCPV6;
1439         __ip6_dst_store(newsk, dst, NULL, NULL);
1440
1441         newtcp6sk = (struct tcp6_sock *)newsk;
1442         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1443
1444         newtp = tcp_sk(newsk);
1445         newinet = inet_sk(newsk);
1446         newnp = inet6_sk(newsk);
1447
1448         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1449
1450         ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1451         ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1452         ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1453         newsk->sk_bound_dev_if = treq->iif;
1454
1455         /* Now IPv6 options...
1456
1457            First: no IPv4 options.
1458          */
1459         newinet->opt = NULL;
1460         newnp->ipv6_fl_list = NULL;
1461
1462         /* Clone RX bits */
1463         newnp->rxopt.all = np->rxopt.all;
1464
1465         /* Clone pktoptions received with SYN */
1466         newnp->pktoptions = NULL;
1467         if (treq->pktopts != NULL) {
1468                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1469                 kfree_skb(treq->pktopts);
1470                 treq->pktopts = NULL;
1471                 if (newnp->pktoptions)
1472                         skb_set_owner_r(newnp->pktoptions, newsk);
1473         }
1474         newnp->opt        = NULL;
1475         newnp->mcast_oif  = inet6_iif(skb);
1476         newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1477
1478         /* Clone native IPv6 options from listening socket (if any)
1479
1480            Yes, keeping reference count would be much more clever,
1481            but we make one more one thing there: reattach optmem
1482            to newsk.
1483          */
1484         if (opt) {
1485                 newnp->opt = ipv6_dup_options(newsk, opt);
1486                 if (opt != np->opt)
1487                         sock_kfree_s(sk, opt, opt->tot_len);
1488         }
1489
1490         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1491         if (newnp->opt)
1492                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1493                                                      newnp->opt->opt_flen);
1494
1495         tcp_mtup_init(newsk);
1496         tcp_sync_mss(newsk, dst_mtu(dst));
1497         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1498         tcp_initialize_rcv_mss(newsk);
1499
1500         newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1501
1502 #ifdef CONFIG_TCP_MD5SIG
1503         /* Copy over the MD5 key from the original socket */
1504         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1505                 /* We're using one, so create a matching key
1506                  * on the newsk structure. If we fail to get
1507                  * memory, then we end up not copying the key
1508                  * across. Shucks.
1509                  */
1510                 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1511                 if (newkey != NULL)
1512                         tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1513                                           newkey, key->keylen);
1514         }
1515 #endif
1516
1517         __inet6_hash(&tcp_hashinfo, newsk);
1518         inet_inherit_port(&tcp_hashinfo, sk, newsk);
1519
1520         return newsk;
1521
1522 out_overflow:
1523         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1524 out:
1525         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1526         if (opt && opt != np->opt)
1527                 sock_kfree_s(sk, opt, opt->tot_len);
1528         dst_release(dst);
1529         return NULL;
1530 }
1531
1532 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1533 {
1534         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1535                 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1536                                   &skb->nh.ipv6h->daddr,skb->csum)) {
1537                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1538                         return 0;
1539                 }
1540         }
1541
1542         skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1543                                   &skb->nh.ipv6h->daddr, 0));
1544
1545         if (skb->len <= 76) {
1546                 return __skb_checksum_complete(skb);
1547         }
1548         return 0;
1549 }
1550
1551 /* The socket must have it's spinlock held when we get
1552  * here.
1553  *
1554  * We have a potential double-lock case here, so even when
1555  * doing backlog processing we use the BH locking scheme.
1556  * This is because we cannot sleep with the original spinlock
1557  * held.
1558  */
1559 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1560 {
1561         struct ipv6_pinfo *np = inet6_sk(sk);
1562         struct tcp_sock *tp;
1563         struct sk_buff *opt_skb = NULL;
1564
1565         /* Imagine: socket is IPv6. IPv4 packet arrives,
1566            goes to IPv4 receive handler and backlogged.
1567            From backlog it always goes here. Kerboom...
1568            Fortunately, tcp_rcv_established and rcv_established
1569            handle them correctly, but it is not case with
1570            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1571          */
1572
1573         if (skb->protocol == htons(ETH_P_IP))
1574                 return tcp_v4_do_rcv(sk, skb);
1575
1576 #ifdef CONFIG_TCP_MD5SIG
1577         if (tcp_v6_inbound_md5_hash (sk, skb))
1578                 goto discard;
1579 #endif
1580
1581         if (sk_filter(sk, skb))
1582                 goto discard;
1583
1584         /*
1585          *      socket locking is here for SMP purposes as backlog rcv
1586          *      is currently called with bh processing disabled.
1587          */
1588
1589         /* Do Stevens' IPV6_PKTOPTIONS.
1590
1591            Yes, guys, it is the only place in our code, where we
1592            may make it not affecting IPv4.
1593            The rest of code is protocol independent,
1594            and I do not like idea to uglify IPv4.
1595
1596            Actually, all the idea behind IPV6_PKTOPTIONS
1597            looks not very well thought. For now we latch
1598            options, received in the last packet, enqueued
1599            by tcp. Feel free to propose better solution.
1600                                                --ANK (980728)
1601          */
1602         if (np->rxopt.all)
1603                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1604
1605         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1606                 TCP_CHECK_TIMER(sk);
1607                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1608                         goto reset;
1609                 TCP_CHECK_TIMER(sk);
1610                 if (opt_skb)
1611                         goto ipv6_pktoptions;
1612                 return 0;
1613         }
1614
1615         if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1616                 goto csum_err;
1617
1618         if (sk->sk_state == TCP_LISTEN) {
1619                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1620                 if (!nsk)
1621                         goto discard;
1622
1623                 /*
1624                  * Queue it on the new socket if the new socket is active,
1625                  * otherwise we just shortcircuit this and continue with
1626                  * the new socket..
1627                  */
1628                 if(nsk != sk) {
1629                         if (tcp_child_process(sk, nsk, skb))
1630                                 goto reset;
1631                         if (opt_skb)
1632                                 __kfree_skb(opt_skb);
1633                         return 0;
1634                 }
1635         }
1636
1637         TCP_CHECK_TIMER(sk);
1638         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1639                 goto reset;
1640         TCP_CHECK_TIMER(sk);
1641         if (opt_skb)
1642                 goto ipv6_pktoptions;
1643         return 0;
1644
1645 reset:
1646         tcp_v6_send_reset(sk, skb);
1647 discard:
1648         if (opt_skb)
1649                 __kfree_skb(opt_skb);
1650         kfree_skb(skb);
1651         return 0;
1652 csum_err:
1653         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1654         goto discard;
1655
1656
1657 ipv6_pktoptions:
1658         /* Do you ask, what is it?
1659
1660            1. skb was enqueued by tcp.
1661            2. skb is added to tail of read queue, rather than out of order.
1662            3. socket is not in passive state.
1663            4. Finally, it really contains options, which user wants to receive.
1664          */
1665         tp = tcp_sk(sk);
1666         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1667             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1668                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1669                         np->mcast_oif = inet6_iif(opt_skb);
1670                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1671                         np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1672                 if (ipv6_opt_accepted(sk, opt_skb)) {
1673                         skb_set_owner_r(opt_skb, sk);
1674                         opt_skb = xchg(&np->pktoptions, opt_skb);
1675                 } else {
1676                         __kfree_skb(opt_skb);
1677                         opt_skb = xchg(&np->pktoptions, NULL);
1678                 }
1679         }
1680
1681         if (opt_skb)
1682                 kfree_skb(opt_skb);
1683         return 0;
1684 }
1685
1686 static int tcp_v6_rcv(struct sk_buff **pskb)
1687 {
1688         struct sk_buff *skb = *pskb;
1689         struct tcphdr *th;
1690         struct sock *sk;
1691         int ret;
1692
1693         if (skb->pkt_type != PACKET_HOST)
1694                 goto discard_it;
1695
1696         /*
1697          *      Count it even if it's bad.
1698          */
1699         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1700
1701         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1702                 goto discard_it;
1703
1704         th = skb->h.th;
1705
1706         if (th->doff < sizeof(struct tcphdr)/4)
1707                 goto bad_packet;
1708         if (!pskb_may_pull(skb, th->doff*4))
1709                 goto discard_it;
1710
1711         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1712              tcp_v6_checksum_init(skb)))
1713                 goto bad_packet;
1714
1715         th = skb->h.th;
1716         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1717         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1718                                     skb->len - th->doff*4);
1719         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1720         TCP_SKB_CB(skb)->when = 0;
1721         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1722         TCP_SKB_CB(skb)->sacked = 0;
1723
1724         sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1725                             &skb->nh.ipv6h->daddr, ntohs(th->dest),
1726                             inet6_iif(skb));
1727
1728         if (!sk)
1729                 goto no_tcp_socket;
1730
1731 process:
1732         if (sk->sk_state == TCP_TIME_WAIT)
1733                 goto do_time_wait;
1734
1735         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1736                 goto discard_and_relse;
1737
1738         if (sk_filter(sk, skb))
1739                 goto discard_and_relse;
1740
1741         skb->dev = NULL;
1742
1743         bh_lock_sock_nested(sk);
1744         ret = 0;
1745         if (!sock_owned_by_user(sk)) {
1746 #ifdef CONFIG_NET_DMA
1747                 struct tcp_sock *tp = tcp_sk(sk);
1748                 if (tp->ucopy.dma_chan)
1749                         ret = tcp_v6_do_rcv(sk, skb);
1750                 else
1751 #endif
1752                 {
1753                         if (!tcp_prequeue(sk, skb))
1754                                 ret = tcp_v6_do_rcv(sk, skb);
1755                 }
1756         } else
1757                 sk_add_backlog(sk, skb);
1758         bh_unlock_sock(sk);
1759
1760         sock_put(sk);
1761         return ret ? -1 : 0;
1762
1763 no_tcp_socket:
1764         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1765                 goto discard_it;
1766
1767         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1768 bad_packet:
1769                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1770         } else {
1771                 tcp_v6_send_reset(NULL, skb);
1772         }
1773
1774 discard_it:
1775
1776         /*
1777          *      Discard frame
1778          */
1779
1780         kfree_skb(skb);
1781         return 0;
1782
1783 discard_and_relse:
1784         sock_put(sk);
1785         goto discard_it;
1786
1787 do_time_wait:
1788         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1789                 inet_twsk_put(inet_twsk(sk));
1790                 goto discard_it;
1791         }
1792
1793         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1794                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1795                 inet_twsk_put(inet_twsk(sk));
1796                 goto discard_it;
1797         }
1798
1799         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1800         case TCP_TW_SYN:
1801         {
1802                 struct sock *sk2;
1803
1804                 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1805                                             &skb->nh.ipv6h->daddr,
1806                                             ntohs(th->dest), inet6_iif(skb));
1807                 if (sk2 != NULL) {
1808                         struct inet_timewait_sock *tw = inet_twsk(sk);
1809                         inet_twsk_deschedule(tw, &tcp_death_row);
1810                         inet_twsk_put(tw);
1811                         sk = sk2;
1812                         goto process;
1813                 }
1814                 /* Fall through to ACK */
1815         }
1816         case TCP_TW_ACK:
1817                 tcp_v6_timewait_ack(sk, skb);
1818                 break;
1819         case TCP_TW_RST:
1820                 goto no_tcp_socket;
1821         case TCP_TW_SUCCESS:;
1822         }
1823         goto discard_it;
1824 }
1825
1826 static int tcp_v6_remember_stamp(struct sock *sk)
1827 {
1828         /* Alas, not yet... */
1829         return 0;
1830 }
1831
1832 static struct inet_connection_sock_af_ops ipv6_specific = {
1833         .queue_xmit        = inet6_csk_xmit,
1834         .send_check        = tcp_v6_send_check,
1835         .rebuild_header    = inet6_sk_rebuild_header,
1836         .conn_request      = tcp_v6_conn_request,
1837         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1838         .remember_stamp    = tcp_v6_remember_stamp,
1839         .net_header_len    = sizeof(struct ipv6hdr),
1840         .setsockopt        = ipv6_setsockopt,
1841         .getsockopt        = ipv6_getsockopt,
1842         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1843         .sockaddr_len      = sizeof(struct sockaddr_in6),
1844 #ifdef CONFIG_COMPAT
1845         .compat_setsockopt = compat_ipv6_setsockopt,
1846         .compat_getsockopt = compat_ipv6_getsockopt,
1847 #endif
1848 };
1849
1850 #ifdef CONFIG_TCP_MD5SIG
1851 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1852         .md5_lookup     =       tcp_v6_md5_lookup,
1853         .calc_md5_hash  =       tcp_v6_calc_md5_hash,
1854         .md5_add        =       tcp_v6_md5_add_func,
1855         .md5_parse      =       tcp_v6_parse_md5_keys,
1856 };
1857 #endif
1858
1859 /*
1860  *      TCP over IPv4 via INET6 API
1861  */
1862
1863 static struct inet_connection_sock_af_ops ipv6_mapped = {
1864         .queue_xmit        = ip_queue_xmit,
1865         .send_check        = tcp_v4_send_check,
1866         .rebuild_header    = inet_sk_rebuild_header,
1867         .conn_request      = tcp_v6_conn_request,
1868         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1869         .remember_stamp    = tcp_v4_remember_stamp,
1870         .net_header_len    = sizeof(struct iphdr),
1871         .setsockopt        = ipv6_setsockopt,
1872         .getsockopt        = ipv6_getsockopt,
1873         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1874         .sockaddr_len      = sizeof(struct sockaddr_in6),
1875 #ifdef CONFIG_COMPAT
1876         .compat_setsockopt = compat_ipv6_setsockopt,
1877         .compat_getsockopt = compat_ipv6_getsockopt,
1878 #endif
1879 };
1880
1881 #ifdef CONFIG_TCP_MD5SIG
1882 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1883         .md5_lookup     =       tcp_v4_md5_lookup,
1884         .calc_md5_hash  =       tcp_v4_calc_md5_hash,
1885         .md5_add        =       tcp_v6_md5_add_func,
1886         .md5_parse      =       tcp_v6_parse_md5_keys,
1887 };
1888 #endif
1889
1890 /* NOTE: A lot of things set to zero explicitly by call to
1891  *       sk_alloc() so need not be done here.
1892  */
1893 static int tcp_v6_init_sock(struct sock *sk)
1894 {
1895         struct inet_connection_sock *icsk = inet_csk(sk);
1896         struct tcp_sock *tp = tcp_sk(sk);
1897
1898         skb_queue_head_init(&tp->out_of_order_queue);
1899         tcp_init_xmit_timers(sk);
1900         tcp_prequeue_init(tp);
1901
1902         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1903         tp->mdev = TCP_TIMEOUT_INIT;
1904
1905         /* So many TCP implementations out there (incorrectly) count the
1906          * initial SYN frame in their delayed-ACK and congestion control
1907          * algorithms that we must have the following bandaid to talk
1908          * efficiently to them.  -DaveM
1909          */
1910         tp->snd_cwnd = 2;
1911
1912         /* See draft-stevens-tcpca-spec-01 for discussion of the
1913          * initialization of these values.
1914          */
1915         tp->snd_ssthresh = 0x7fffffff;
1916         tp->snd_cwnd_clamp = ~0;
1917         tp->mss_cache = 536;
1918
1919         tp->reordering = sysctl_tcp_reordering;
1920
1921         sk->sk_state = TCP_CLOSE;
1922
1923         icsk->icsk_af_ops = &ipv6_specific;
1924         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1925         icsk->icsk_sync_mss = tcp_sync_mss;
1926         sk->sk_write_space = sk_stream_write_space;
1927         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1928
1929 #ifdef CONFIG_TCP_MD5SIG
1930         tp->af_specific = &tcp_sock_ipv6_specific;
1931 #endif
1932
1933         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1934         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1935
1936         atomic_inc(&tcp_sockets_allocated);
1937
1938         return 0;
1939 }
1940
1941 static int tcp_v6_destroy_sock(struct sock *sk)
1942 {
1943 #ifdef CONFIG_TCP_MD5SIG
1944         /* Clean up the MD5 key list */
1945         if (tcp_sk(sk)->md5sig_info)
1946                 tcp_v6_clear_md5_list(sk);
1947 #endif
1948         tcp_v4_destroy_sock(sk);
1949         return inet6_destroy_sock(sk);
1950 }
1951
1952 /* Proc filesystem TCPv6 sock list dumping. */
1953 static void get_openreq6(struct seq_file *seq,
1954                          struct sock *sk, struct request_sock *req, int i, int uid)
1955 {
1956         int ttd = req->expires - jiffies;
1957         struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1958         struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1959
1960         if (ttd < 0)
1961                 ttd = 0;
1962
1963         seq_printf(seq,
1964                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1965                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1966                    i,
1967                    src->s6_addr32[0], src->s6_addr32[1],
1968                    src->s6_addr32[2], src->s6_addr32[3],
1969                    ntohs(inet_sk(sk)->sport),
1970                    dest->s6_addr32[0], dest->s6_addr32[1],
1971                    dest->s6_addr32[2], dest->s6_addr32[3],
1972                    ntohs(inet_rsk(req)->rmt_port),
1973                    TCP_SYN_RECV,
1974                    0,0, /* could print option size, but that is af dependent. */
1975                    1,   /* timers active (only the expire timer) */
1976                    jiffies_to_clock_t(ttd),
1977                    req->retrans,
1978                    uid,
1979                    0,  /* non standard timer */
1980                    0, /* open_requests have no inode */
1981                    0, req);
1982 }
1983
1984 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1985 {
1986         struct in6_addr *dest, *src;
1987         __u16 destp, srcp;
1988         int timer_active;
1989         unsigned long timer_expires;
1990         struct inet_sock *inet = inet_sk(sp);
1991         struct tcp_sock *tp = tcp_sk(sp);
1992         const struct inet_connection_sock *icsk = inet_csk(sp);
1993         struct ipv6_pinfo *np = inet6_sk(sp);
1994
1995         dest  = &np->daddr;
1996         src   = &np->rcv_saddr;
1997         destp = ntohs(inet->dport);
1998         srcp  = ntohs(inet->sport);
1999
2000         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2001                 timer_active    = 1;
2002                 timer_expires   = icsk->icsk_timeout;
2003         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2004                 timer_active    = 4;
2005                 timer_expires   = icsk->icsk_timeout;
2006         } else if (timer_pending(&sp->sk_timer)) {
2007                 timer_active    = 2;
2008                 timer_expires   = sp->sk_timer.expires;
2009         } else {
2010                 timer_active    = 0;
2011                 timer_expires = jiffies;
2012         }
2013
2014         seq_printf(seq,
2015                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2016                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2017                    i,
2018                    src->s6_addr32[0], src->s6_addr32[1],
2019                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2020                    dest->s6_addr32[0], dest->s6_addr32[1],
2021                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2022                    sp->sk_state,
2023                    tp->write_seq-tp->snd_una,
2024                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2025                    timer_active,
2026                    jiffies_to_clock_t(timer_expires - jiffies),
2027                    icsk->icsk_retransmits,
2028                    sock_i_uid(sp),
2029                    icsk->icsk_probes_out,
2030                    sock_i_ino(sp),
2031                    atomic_read(&sp->sk_refcnt), sp,
2032                    icsk->icsk_rto,
2033                    icsk->icsk_ack.ato,
2034                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2035                    tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2036                    );
2037 }
2038
2039 static void get_timewait6_sock(struct seq_file *seq,
2040                                struct inet_timewait_sock *tw, int i)
2041 {
2042         struct in6_addr *dest, *src;
2043         __u16 destp, srcp;
2044         struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2045         int ttd = tw->tw_ttd - jiffies;
2046
2047         if (ttd < 0)
2048                 ttd = 0;
2049
2050         dest = &tw6->tw_v6_daddr;
2051         src  = &tw6->tw_v6_rcv_saddr;
2052         destp = ntohs(tw->tw_dport);
2053         srcp  = ntohs(tw->tw_sport);
2054
2055         seq_printf(seq,
2056                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2057                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2058                    i,
2059                    src->s6_addr32[0], src->s6_addr32[1],
2060                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2061                    dest->s6_addr32[0], dest->s6_addr32[1],
2062                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2063                    tw->tw_substate, 0, 0,
2064                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2065                    atomic_read(&tw->tw_refcnt), tw);
2066 }
2067
2068 #ifdef CONFIG_PROC_FS
2069 static int tcp6_seq_show(struct seq_file *seq, void *v)
2070 {
2071         struct tcp_iter_state *st;
2072
2073         if (v == SEQ_START_TOKEN) {
2074                 seq_puts(seq,
2075                          "  sl  "
2076                          "local_address                         "
2077                          "remote_address                        "
2078                          "st tx_queue rx_queue tr tm->when retrnsmt"
2079                          "   uid  timeout inode\n");
2080                 goto out;
2081         }
2082         st = seq->private;
2083
2084         switch (st->state) {
2085         case TCP_SEQ_STATE_LISTENING:
2086         case TCP_SEQ_STATE_ESTABLISHED:
2087                 get_tcp6_sock(seq, v, st->num);
2088                 break;
2089         case TCP_SEQ_STATE_OPENREQ:
2090                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2091                 break;
2092         case TCP_SEQ_STATE_TIME_WAIT:
2093                 get_timewait6_sock(seq, v, st->num);
2094                 break;
2095         }
2096 out:
2097         return 0;
2098 }
2099
2100 static struct file_operations tcp6_seq_fops;
2101 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2102         .owner          = THIS_MODULE,
2103         .name           = "tcp6",
2104         .family         = AF_INET6,
2105         .seq_show       = tcp6_seq_show,
2106         .seq_fops       = &tcp6_seq_fops,
2107 };
2108
2109 int __init tcp6_proc_init(void)
2110 {
2111         return tcp_proc_register(&tcp6_seq_afinfo);
2112 }
2113
2114 void tcp6_proc_exit(void)
2115 {
2116         tcp_proc_unregister(&tcp6_seq_afinfo);
2117 }
2118 #endif
2119
2120 struct proto tcpv6_prot = {
2121         .name                   = "TCPv6",
2122         .owner                  = THIS_MODULE,
2123         .close                  = tcp_close,
2124         .connect                = tcp_v6_connect,
2125         .disconnect             = tcp_disconnect,
2126         .accept                 = inet_csk_accept,
2127         .ioctl                  = tcp_ioctl,
2128         .init                   = tcp_v6_init_sock,
2129         .destroy                = tcp_v6_destroy_sock,
2130         .shutdown               = tcp_shutdown,
2131         .setsockopt             = tcp_setsockopt,
2132         .getsockopt             = tcp_getsockopt,
2133         .sendmsg                = tcp_sendmsg,
2134         .recvmsg                = tcp_recvmsg,
2135         .backlog_rcv            = tcp_v6_do_rcv,
2136         .hash                   = tcp_v6_hash,
2137         .unhash                 = tcp_unhash,
2138         .get_port               = tcp_v6_get_port,
2139         .enter_memory_pressure  = tcp_enter_memory_pressure,
2140         .sockets_allocated      = &tcp_sockets_allocated,
2141         .memory_allocated       = &tcp_memory_allocated,
2142         .memory_pressure        = &tcp_memory_pressure,
2143         .orphan_count           = &tcp_orphan_count,
2144         .sysctl_mem             = sysctl_tcp_mem,
2145         .sysctl_wmem            = sysctl_tcp_wmem,
2146         .sysctl_rmem            = sysctl_tcp_rmem,
2147         .max_header             = MAX_TCP_HEADER,
2148         .obj_size               = sizeof(struct tcp6_sock),
2149         .twsk_prot              = &tcp6_timewait_sock_ops,
2150         .rsk_prot               = &tcp6_request_sock_ops,
2151 #ifdef CONFIG_COMPAT
2152         .compat_setsockopt      = compat_tcp_setsockopt,
2153         .compat_getsockopt      = compat_tcp_getsockopt,
2154 #endif
2155 };
2156
2157 static struct inet6_protocol tcpv6_protocol = {
2158         .handler        =       tcp_v6_rcv,
2159         .err_handler    =       tcp_v6_err,
2160         .gso_send_check =       tcp_v6_gso_send_check,
2161         .gso_segment    =       tcp_tso_segment,
2162         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2163 };
2164
2165 static struct inet_protosw tcpv6_protosw = {
2166         .type           =       SOCK_STREAM,
2167         .protocol       =       IPPROTO_TCP,
2168         .prot           =       &tcpv6_prot,
2169         .ops            =       &inet6_stream_ops,
2170         .capability     =       -1,
2171         .no_check       =       0,
2172         .flags          =       INET_PROTOSW_PERMANENT |
2173                                 INET_PROTOSW_ICSK,
2174 };
2175
2176 void __init tcpv6_init(void)
2177 {
2178         /* register inet6 protocol */
2179         if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2180                 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2181         inet6_register_protosw(&tcpv6_protosw);
2182
2183         if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
2184                                      IPPROTO_TCP) < 0)
2185                 panic("Failed to create the TCPv6 control socket.\n");
2186 }