]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
RDS: Fix potential race around rds_i[bw]_allocation
authorAndy Grover <andy.grover@oracle.com>
Fri, 30 Oct 2009 08:51:54 +0000 (08:51 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 30 Oct 2009 22:06:38 +0000 (15:06 -0700)
"At rds_ib_recv_refill_one(), it first executes atomic_read(&rds_ib_allocation)
for if-condition checking,

and then executes atomic_inc(&rds_ib_allocation) if the condition was
not satisfied.

However, if any other code which updates rds_ib_allocation executes
between these two atomic operation executions,
it seems that it may result race condition. (especially when
rds_ib_allocation + 1 == rds_ib_sysctl_max_recv_allocation)"

This patch fixes this by using atomic_inc_unless to eliminate the
possibility of allocating more than rds_ib_sysctl_max_recv_allocation
and then decrementing the count if the allocation fails. It also
makes an identical change to the iwarp transport.

Reported-by: Shin Hong <hongshin@gmail.com>
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/rds/ib_recv.c
net/rds/iw_recv.c

index cd7a6cfcab03b4aa25bd335d6622aef881d6dc6d..2f009d391c293c4ae59fcf44bf6bbe8476b1e43d 100644 (file)
@@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
        int ret = -ENOMEM;
 
        if (recv->r_ibinc == NULL) {
-               if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) {
+               if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
                        rds_ib_stats_inc(s_ib_rx_alloc_limit);
                        goto out;
                }
                recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
                                                 kptr_gfp);
-               if (recv->r_ibinc == NULL)
+               if (recv->r_ibinc == NULL) {
+                       atomic_dec(&rds_ib_allocation);
                        goto out;
-               atomic_inc(&rds_ib_allocation);
+               }
                INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
                rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
        }
index 8683f5f66c4b2931583b84e1f40daeeecaf2e64b..9f98150af19f52435281190606439e196bbef13e 100644 (file)
@@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
        int ret = -ENOMEM;
 
        if (recv->r_iwinc == NULL) {
-               if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) {
+               if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
                        rds_iw_stats_inc(s_iw_rx_alloc_limit);
                        goto out;
                }
                recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
                                                 kptr_gfp);
-               if (recv->r_iwinc == NULL)
+               if (recv->r_iwinc == NULL) {
+                       atomic_dec(&rds_iw_allocation);
                        goto out;
-               atomic_inc(&rds_iw_allocation);
+               }
                INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
                rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
        }