]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/dccp/ccid.c
dccp: Lockless integration of CCID congestion-control plugins
[net-next-2.6.git] / net / dccp / ccid.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/ccid.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * CCID infrastructure
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include "ccid.h"
15
ddebc973
GR
16static struct ccid_operations *ccids[] = {
17 &ccid2_ops,
18#ifdef CONFIG_IP_DCCP_CCID3
19 &ccid3_ops,
20#endif
21};
22
23static struct ccid_operations *ccid_by_number(const u8 id)
24{
25 int i;
26
27 for (i = 0; i < ARRAY_SIZE(ccids); i++)
28 if (ccids[i]->ccid_id == id)
29 return ccids[i];
30 return NULL;
31}
32
33/* check that up to @array_len members in @ccid_array are supported */
34bool ccid_support_check(u8 const *ccid_array, u8 array_len)
35{
36 while (array_len > 0)
37 if (ccid_by_number(ccid_array[--array_len]) == NULL)
38 return false;
39 return true;
40}
41
42/**
43 * ccid_get_builtin_ccids - Populate a list of built-in CCIDs
44 * @ccid_array: pointer to copy into
45 * @array_len: value to return length into
46 * This function allocates memory - caller must see that it is freed after use.
47 */
48int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
49{
50 *ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any());
51 if (*ccid_array == NULL)
52 return -ENOBUFS;
53
54 for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1)
55 (*ccid_array)[*array_len] = ccids[*array_len]->ccid_id;
56 return 0;
57}
58
59int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
60 char __user *optval, int __user *optlen)
61{
62 u8 *ccid_array, array_len;
63 int err = 0;
64
65 if (len < ARRAY_SIZE(ccids))
66 return -EINVAL;
67
68 if (ccid_get_builtin_ccids(&ccid_array, &array_len))
69 return -ENOBUFS;
70
71 if (put_user(array_len, optlen) ||
72 copy_to_user(optval, ccid_array, array_len))
73 err = -EFAULT;
74
75 kfree(ccid_array);
76 return err;
77}
78
79#ifdef ___OLD_INTERFACE_TO_BE_REMOVED___
d90ebcbf
GR
80static u8 builtin_ccids[] = {
81 DCCPC_CCID2, /* CCID2 is supported by default */
82#if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE)
83 DCCPC_CCID3,
84#endif
85};
86
91f0ebf7 87static struct ccid_operations *ccids[CCID_MAX];
7c657876
ACM
88#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
89static atomic_t ccids_lockct = ATOMIC_INIT(0);
90static DEFINE_SPINLOCK(ccids_lock);
91
92/*
93 * The strategy is: modifications ccids vector are short, do not sleep and
94 * veeery rare, but read access should be free of any exclusive locks.
95 */
96static void ccids_write_lock(void)
97{
98 spin_lock(&ccids_lock);
99 while (atomic_read(&ccids_lockct) != 0) {
100 spin_unlock(&ccids_lock);
101 yield();
102 spin_lock(&ccids_lock);
103 }
104}
105
106static inline void ccids_write_unlock(void)
107{
108 spin_unlock(&ccids_lock);
109}
110
111static inline void ccids_read_lock(void)
112{
113 atomic_inc(&ccids_lockct);
d725fdc8 114 smp_mb__after_atomic_inc();
7c657876
ACM
115 spin_unlock_wait(&ccids_lock);
116}
117
118static inline void ccids_read_unlock(void)
119{
120 atomic_dec(&ccids_lockct);
121}
122
123#else
124#define ccids_write_lock() do { } while(0)
125#define ccids_write_unlock() do { } while(0)
126#define ccids_read_lock() do { } while(0)
127#define ccids_read_unlock() do { } while(0)
128#endif
ddebc973 129#endif /* ___OLD_INTERFACE_TO_BE_REMOVED___ */
7c657876 130
e18b890b 131static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
7c657876 132{
e18b890b 133 struct kmem_cache *slab;
91f0ebf7
ACM
134 char slab_name_fmt[32], *slab_name;
135 va_list args;
136
137 va_start(args, fmt);
138 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
139 va_end(args);
140
141 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
142 if (slab_name == NULL)
143 return NULL;
144 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
20c2df83 145 SLAB_HWCACHE_ALIGN, NULL);
91f0ebf7
ACM
146 if (slab == NULL)
147 kfree(slab_name);
148 return slab;
149}
150
e18b890b 151static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
91f0ebf7
ACM
152{
153 if (slab != NULL) {
154 const char *name = kmem_cache_name(slab);
155
156 kmem_cache_destroy(slab);
157 kfree(name);
158 }
159}
160
ddebc973 161#ifdef ___OLD_INTERFACE_TO_BE_REMOVED___
d90ebcbf
GR
162/* check that up to @array_len members in @ccid_array are supported */
163bool ccid_support_check(u8 const *ccid_array, u8 array_len)
164{
165 u8 i, j, found;
166
167 for (i = 0, found = 0; i < array_len; i++, found = 0) {
168 for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++)
169 found = (ccid_array[i] == builtin_ccids[j]);
170 if (!found)
171 return false;
172 }
173 return true;
174}
175
176/**
177 * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array
178 * @ccid_array: pointer to copy into
179 * @array_len: value to return length into
180 * This function allocates memory - caller must see that it is freed after use.
181 */
182int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
183{
184 *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any());
185 if (*ccid_array == NULL)
186 return -ENOBUFS;
187 *array_len = ARRAY_SIZE(builtin_ccids);
188 return 0;
189}
190
191int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
192 char __user *optval, int __user *optlen)
193{
194 if (len < sizeof(builtin_ccids))
195 return -EINVAL;
196
197 if (put_user(sizeof(builtin_ccids), optlen) ||
198 copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids)))
199 return -EFAULT;
200 return 0;
201}
ddebc973 202#endif /* ___OLD_INTERFACE_TO_BE_REMOVED___ */
d90ebcbf 203
ddebc973 204static int ccid_activate(struct ccid_operations *ccid_ops)
91f0ebf7
ACM
205{
206 int err = -ENOBUFS;
207
208 ccid_ops->ccid_hc_rx_slab =
209 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
84a97b0a
GR
210 "ccid%u_hc_rx_sock",
211 ccid_ops->ccid_id);
91f0ebf7
ACM
212 if (ccid_ops->ccid_hc_rx_slab == NULL)
213 goto out;
214
215 ccid_ops->ccid_hc_tx_slab =
216 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
84a97b0a
GR
217 "ccid%u_hc_tx_sock",
218 ccid_ops->ccid_id);
91f0ebf7
ACM
219 if (ccid_ops->ccid_hc_tx_slab == NULL)
220 goto out_free_rx_slab;
7c657876 221
ddebc973 222 pr_info("CCID: Activated CCID %d (%s)\n",
91f0ebf7 223 ccid_ops->ccid_id, ccid_ops->ccid_name);
ddebc973 224 err = 0;
91f0ebf7 225out:
7c657876 226 return err;
91f0ebf7
ACM
227out_free_rx_slab:
228 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
229 ccid_ops->ccid_hc_rx_slab = NULL;
230 goto out;
7c657876
ACM
231}
232
ddebc973 233static void ccid_deactivate(struct ccid_operations *ccid_ops)
7c657876 234{
91f0ebf7
ACM
235 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
236 ccid_ops->ccid_hc_tx_slab = NULL;
237 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
238 ccid_ops->ccid_hc_rx_slab = NULL;
239
ddebc973 240 pr_info("CCID: Deactivated CCID %d (%s)\n",
91f0ebf7 241 ccid_ops->ccid_id, ccid_ops->ccid_name);
7c657876
ACM
242}
243
91f0ebf7 244struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
7c657876 245{
ddebc973 246 struct ccid_operations *ccid_ops = ccid_by_number(id);
91f0ebf7 247 struct ccid *ccid = NULL;
7c657876 248
91f0ebf7 249 if (ccid_ops == NULL)
ddebc973 250 goto out;
7c657876 251
91f0ebf7
ACM
252 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
253 ccid_ops->ccid_hc_tx_slab, gfp);
254 if (ccid == NULL)
ddebc973 255 goto out;
91f0ebf7
ACM
256 ccid->ccid_ops = ccid_ops;
257 if (rx) {
258 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
259 if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
260 ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
261 goto out_free_ccid;
262 } else {
263 memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
264 if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
265 ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
266 goto out_free_ccid;
267 }
7c657876 268out:
7c657876 269 return ccid;
91f0ebf7
ACM
270out_free_ccid:
271 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
272 ccid_ops->ccid_hc_tx_slab, ccid);
7c657876
ACM
273 ccid = NULL;
274 goto out;
275}
276
91f0ebf7 277EXPORT_SYMBOL_GPL(ccid_new);
7c657876 278
91f0ebf7
ACM
279static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
280{
281 struct ccid_operations *ccid_ops;
282
7c657876
ACM
283 if (ccid == NULL)
284 return;
285
91f0ebf7
ACM
286 ccid_ops = ccid->ccid_ops;
287 if (rx) {
288 if (ccid_ops->ccid_hc_rx_exit != NULL)
289 ccid_ops->ccid_hc_rx_exit(sk);
290 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
291 } else {
292 if (ccid_ops->ccid_hc_tx_exit != NULL)
293 ccid_ops->ccid_hc_tx_exit(sk);
294 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
295 }
91f0ebf7 296}
7c657876 297
91f0ebf7
ACM
298void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
299{
300 ccid_delete(ccid, sk, 1);
301}
7c657876 302
91f0ebf7
ACM
303EXPORT_SYMBOL_GPL(ccid_hc_rx_delete);
304
305void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
306{
307 ccid_delete(ccid, sk, 0);
7c657876
ACM
308}
309
91f0ebf7 310EXPORT_SYMBOL_GPL(ccid_hc_tx_delete);
ddebc973
GR
311
312int __init ccid_initialize_builtins(void)
313{
314 int i, err;
315
316 for (i = 0; i < ARRAY_SIZE(ccids); i++) {
317 err = ccid_activate(ccids[i]);
318 if (err)
319 goto unwind_registrations;
320 }
321 return 0;
322
323unwind_registrations:
324 while(--i >= 0)
325 ccid_deactivate(ccids[i]);
326 return err;
327}
328
329void ccid_cleanup_builtins(void)
330{
331 int i;
332
333 for (i = 0; i < ARRAY_SIZE(ccids); i++)
334 ccid_deactivate(ccids[i]);
335}