]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp_hybla.c
[NETFILTER]: merge ipt_owner/ip6t_owner in xt_owner
[net-next-2.6.git] / net / ipv4 / tcp_hybla.c
CommitLineData
835b3f0c
DL
1/*
2 * TCP HYBLA
3 *
4 * TCP-HYBLA Congestion control algorithm, based on:
5 * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement
6 * for Heterogeneous Networks",
7 * International Journal on satellite Communications,
8 * September 2004
9 * Daniele Lacamera
10 * root at danielinux.net
11 */
12
835b3f0c
DL
13#include <linux/module.h>
14#include <net/tcp.h>
15
16/* Tcp Hybla structure. */
17struct hybla {
18 u8 hybla_en;
19 u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
20 u32 rho; /* Rho parameter, integer part */
21 u32 rho2; /* Rho * Rho, integer part */
22 u32 rho_3ls; /* Rho parameter, <<3 */
23 u32 rho2_7ls; /* Rho^2, <<7 */
24 u32 minrtt; /* Minimum smoothed round trip time value seen */
25};
26
27/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
28 expressed in jiffies */
29static int rtt0 = 25;
30module_param(rtt0, int, 0644);
31MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
32
33
34/* This is called to refresh values for hybla parameters */
6687e988 35static inline void hybla_recalc_param (struct sock *sk)
835b3f0c 36{
6687e988 37 struct hybla *ca = inet_csk_ca(sk);
835b3f0c 38
6687e988 39 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
835b3f0c
DL
40 ca->rho = ca->rho_3ls >> 3;
41 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
42 ca->rho2 = ca->rho2_7ls >>7;
43}
44
6687e988 45static void hybla_init(struct sock *sk)
835b3f0c 46{
6687e988
ACM
47 struct tcp_sock *tp = tcp_sk(sk);
48 struct hybla *ca = inet_csk_ca(sk);
835b3f0c
DL
49
50 ca->rho = 0;
51 ca->rho2 = 0;
52 ca->rho_3ls = 0;
53 ca->rho2_7ls = 0;
54 ca->snd_cwnd_cents = 0;
55 ca->hybla_en = 1;
56 tp->snd_cwnd = 2;
57 tp->snd_cwnd_clamp = 65535;
58
59 /* 1st Rho measurement based on initial srtt */
6687e988 60 hybla_recalc_param(sk);
835b3f0c
DL
61
62 /* set minimum rtt as this is the 1st ever seen */
63 ca->minrtt = tp->srtt;
64 tp->snd_cwnd = ca->rho;
65}
66
6687e988 67static void hybla_state(struct sock *sk, u8 ca_state)
835b3f0c 68{
6687e988 69 struct hybla *ca = inet_csk_ca(sk);
835b3f0c
DL
70 ca->hybla_en = (ca_state == TCP_CA_Open);
71}
72
73static inline u32 hybla_fraction(u32 odds)
74{
75 static const u32 fractions[] = {
76 128, 139, 152, 165, 181, 197, 215, 234,
77 };
78
79 return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128;
80}
81
82/* TCP Hybla main routine.
83 * This is the algorithm behavior:
84 * o Recalc Hybla parameters if min_rtt has changed
85 * o Give cwnd a new value based on the model proposed
86 * o remember increments <1
87 */
c3a05c60 88static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
835b3f0c 89{
6687e988
ACM
90 struct tcp_sock *tp = tcp_sk(sk);
91 struct hybla *ca = inet_csk_ca(sk);
835b3f0c
DL
92 u32 increment, odd, rho_fractions;
93 int is_slowstart = 0;
94
95 /* Recalculate rho only if this srtt is the lowest */
96 if (tp->srtt < ca->minrtt){
6687e988 97 hybla_recalc_param(sk);
835b3f0c
DL
98 ca->minrtt = tp->srtt;
99 }
100
f4805ede
SH
101 if (!tcp_is_cwnd_limited(sk, in_flight))
102 return;
103
835b3f0c 104 if (!ca->hybla_en)
c3a05c60 105 return tcp_reno_cong_avoid(sk, ack, in_flight);
835b3f0c 106
835b3f0c 107 if (ca->rho == 0)
6687e988 108 hybla_recalc_param(sk);
835b3f0c
DL
109
110 rho_fractions = ca->rho_3ls - (ca->rho << 3);
111
112 if (tp->snd_cwnd < tp->snd_ssthresh) {
113 /*
114 * slow start
115 * INC = 2^RHO - 1
116 * This is done by splitting the rho parameter
117 * into 2 parts: an integer part and a fraction part.
118 * Inrement<<7 is estimated by doing:
119 * [2^(int+fract)]<<7
120 * that is equal to:
121 * (2^int) * [(2^fract) <<7]
122 * 2^int is straightly computed as 1<<int,
123 * while we will use hybla_slowstart_fraction_increment() to
124 * calculate 2^fract in a <<7 value.
125 */
126 is_slowstart = 1;
127 increment = ((1 << ca->rho) * hybla_fraction(rho_fractions))
128 - 128;
129 } else {
130 /*
131 * congestion avoidance
132 * INC = RHO^2 / W
133 * as long as increment is estimated as (rho<<7)/window
134 * it already is <<7 and we can easily count its fractions.
135 */
136 increment = ca->rho2_7ls / tp->snd_cwnd;
137 if (increment < 128)
138 tp->snd_cwnd_cnt++;
139 }
140
141 odd = increment % 128;
142 tp->snd_cwnd += increment >> 7;
143 ca->snd_cwnd_cents += odd;
144
145 /* check when fractions goes >=128 and increase cwnd by 1. */
2de979bd 146 while (ca->snd_cwnd_cents >= 128) {
835b3f0c
DL
147 tp->snd_cwnd++;
148 ca->snd_cwnd_cents -= 128;
149 tp->snd_cwnd_cnt = 0;
150 }
151
152 /* clamp down slowstart cwnd to ssthresh value. */
153 if (is_slowstart)
154 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
155
156 tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
157}
158
159static struct tcp_congestion_ops tcp_hybla = {
160 .init = hybla_init,
161 .ssthresh = tcp_reno_ssthresh,
162 .min_cwnd = tcp_reno_min_cwnd,
163 .cong_avoid = hybla_cong_avoid,
164 .set_state = hybla_state,
165
166 .owner = THIS_MODULE,
167 .name = "hybla"
168};
169
170static int __init hybla_register(void)
171{
74975d40 172 BUILD_BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE);
835b3f0c
DL
173 return tcp_register_congestion_control(&tcp_hybla);
174}
175
176static void __exit hybla_unregister(void)
177{
178 tcp_unregister_congestion_control(&tcp_hybla);
179}
180
181module_init(hybla_register);
182module_exit(hybla_unregister);
183
184MODULE_AUTHOR("Daniele Lacamera");
185MODULE_LICENSE("GPL");
186MODULE_DESCRIPTION("TCP Hybla");