]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/ipv4/tcp_output.c
[NET]: Merge TSO/UFO fields in sk_buff
[net-next-2.6.git] / net / ipv4 / tcp_output.c
index a28ae593b97613d4d09a692e2363f24e2b59417b..bdd71db8bf908e3d20dad25ee43ec777970c139c 100644 (file)
@@ -59,6 +59,9 @@ int sysctl_tcp_tso_win_divisor = 3;
 int sysctl_tcp_mtu_probing = 0;
 int sysctl_tcp_base_mss = 512;
 
+/* By default, RFC2861 behavior.  */
+int sysctl_tcp_slow_start_after_idle = 1;
+
 static void update_send_head(struct sock *sk, struct tcp_sock *tp,
                             struct sk_buff *skb)
 {
@@ -138,7 +141,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        struct inet_connection_sock *icsk = inet_csk(sk);
        const u32 now = tcp_time_stamp;
 
-       if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
+       if (sysctl_tcp_slow_start_after_idle &&
+           (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
                tcp_cwnd_restart(sk, __sk_dst_get(sk));
 
        tp->lsndtime = now;
@@ -465,7 +469,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        TCP_INC_STATS(TCP_MIB_OUTSEGS);
 
        err = icsk->icsk_af_ops->queue_xmit(skb, 0);
-       if (unlikely(err <= 0))
+       if (likely(err <= 0))
                return err;
 
        tcp_enter_cwr(sk);
@@ -511,15 +515,17 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
                /* Avoid the costly divide in the normal
                 * non-TSO case.
                 */
-               skb_shinfo(skb)->tso_segs = 1;
-               skb_shinfo(skb)->tso_size = 0;
+               skb_shinfo(skb)->gso_segs = 1;
+               skb_shinfo(skb)->gso_size = 0;
+               skb_shinfo(skb)->gso_type = 0;
        } else {
                unsigned int factor;
 
                factor = skb->len + (mss_now - 1);
                factor /= mss_now;
-               skb_shinfo(skb)->tso_segs = factor;
-               skb_shinfo(skb)->tso_size = mss_now;
+               skb_shinfo(skb)->gso_segs = factor;
+               skb_shinfo(skb)->gso_size = mss_now;
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
        }
 }
 
@@ -642,7 +648,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
  * eventually). The difference is that pulled data not copied, but
  * immediately discarded.
  */
-static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
+static void __pskb_trim_head(struct sk_buff *skb, int len)
 {
        int i, k, eat;
 
@@ -667,7 +673,6 @@ static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
        skb->tail = skb->data;
        skb->data_len -= len;
        skb->len = skb->data_len;
-       return skb->tail;
 }
 
 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
@@ -676,12 +681,11 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
            pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
                return -ENOMEM;
 
-       if (len <= skb_headlen(skb)) {
+       /* If len == headlen, we avoid __skb_pull to preserve alignment. */
+       if (unlikely(len < skb_headlen(skb)))
                __skb_pull(skb, len);
-       } else {
-               if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL)
-                       return -ENOMEM;
-       }
+       else
+               __pskb_trim_head(skb, len - skb_headlen(skb));
 
        TCP_SKB_CB(skb)->seq += len;
        skb->ip_summed = CHECKSUM_HW;
@@ -912,7 +916,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int
 
        if (!tso_segs ||
            (tso_segs > 1 &&
-            skb_shinfo(skb)->tso_size != mss_now)) {
+            tcp_skb_mss(skb) != mss_now)) {
                tcp_set_skb_tso_segs(sk, skb, mss_now);
                tso_segs = tcp_skb_pcount(skb);
        }
@@ -1722,8 +1726,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
           tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
                if (!pskb_trim(skb, 0)) {
                        TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
-                       skb_shinfo(skb)->tso_segs = 1;
-                       skb_shinfo(skb)->tso_size = 0;
+                       skb_shinfo(skb)->gso_segs = 1;
+                       skb_shinfo(skb)->gso_size = 0;
+                       skb_shinfo(skb)->gso_type = 0;
                        skb->ip_summed = CHECKSUM_NONE;
                        skb->csum = 0;
                }
@@ -1928,8 +1933,9 @@ void tcp_send_fin(struct sock *sk)
                skb->csum = 0;
                TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
                TCP_SKB_CB(skb)->sacked = 0;
-               skb_shinfo(skb)->tso_segs = 1;
-               skb_shinfo(skb)->tso_size = 0;
+               skb_shinfo(skb)->gso_segs = 1;
+               skb_shinfo(skb)->gso_size = 0;
+               skb_shinfo(skb)->gso_type = 0;
 
                /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
                TCP_SKB_CB(skb)->seq = tp->write_seq;
@@ -1961,8 +1967,9 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
        skb->csum = 0;
        TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
        TCP_SKB_CB(skb)->sacked = 0;
-       skb_shinfo(skb)->tso_segs = 1;
-       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->gso_segs = 1;
+       skb_shinfo(skb)->gso_size = 0;
+       skb_shinfo(skb)->gso_type = 0;
 
        /* Send it off. */
        TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
@@ -2045,8 +2052,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
        TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
        TCP_SKB_CB(skb)->sacked = 0;
-       skb_shinfo(skb)->tso_segs = 1;
-       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->gso_segs = 1;
+       skb_shinfo(skb)->gso_size = 0;
+       skb_shinfo(skb)->gso_type = 0;
        th->seq = htonl(TCP_SKB_CB(skb)->seq);
        th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
        if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
@@ -2150,8 +2158,9 @@ int tcp_connect(struct sock *sk)
        TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
        TCP_ECN_send_syn(sk, tp, buff);
        TCP_SKB_CB(buff)->sacked = 0;
-       skb_shinfo(buff)->tso_segs = 1;
-       skb_shinfo(buff)->tso_size = 0;
+       skb_shinfo(buff)->gso_segs = 1;
+       skb_shinfo(buff)->gso_size = 0;
+       skb_shinfo(buff)->gso_type = 0;
        buff->csum = 0;
        TCP_SKB_CB(buff)->seq = tp->write_seq++;
        TCP_SKB_CB(buff)->end_seq = tp->write_seq;
@@ -2255,8 +2264,9 @@ void tcp_send_ack(struct sock *sk)
                buff->csum = 0;
                TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
                TCP_SKB_CB(buff)->sacked = 0;
-               skb_shinfo(buff)->tso_segs = 1;
-               skb_shinfo(buff)->tso_size = 0;
+               skb_shinfo(buff)->gso_segs = 1;
+               skb_shinfo(buff)->gso_size = 0;
+               skb_shinfo(buff)->gso_type = 0;
 
                /* Send it off, this clears delayed acks for us. */
                TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
@@ -2291,8 +2301,9 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
        skb->csum = 0;
        TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
        TCP_SKB_CB(skb)->sacked = urgent;
-       skb_shinfo(skb)->tso_segs = 1;
-       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->gso_segs = 1;
+       skb_shinfo(skb)->gso_size = 0;
+       skb_shinfo(skb)->gso_type = 0;
 
        /* Use a previous sequence.  This should cause the other
         * end to send an ack.  Don't queue or clone SKB, just