]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/rds/ib_recv.c
RDS/IB: Disallow connections less than RDS 3.1
[net-next-2.6.git] / net / rds / ib_recv.c
index c74e9904a6b2c20872917c79bf06a905c1ecd403..a68a3a721c6cfad8d3c3c9c40c1b82b44a5f4f30 100644 (file)
@@ -53,7 +53,7 @@ static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
 static void rds_ib_frag_free(struct rds_page_frag *frag)
 {
        rdsdebug("frag %p page %p\n", frag, frag->f_page);
-       BUG_ON(frag->f_page != NULL);
+       BUG_ON(frag->f_page);
        kmem_cache_free(rds_ib_frag_slab, frag);
 }
 
@@ -143,14 +143,14 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
        struct ib_sge *sge;
        int ret = -ENOMEM;
 
-       if (recv->r_ibinc == NULL) {
+       if (!recv->r_ibinc) {
                if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
                        rds_ib_stats_inc(s_ib_rx_alloc_limit);
                        goto out;
                }
                recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
                                                 kptr_gfp);
-               if (recv->r_ibinc == NULL) {
+               if (!recv->r_ibinc) {
                        atomic_dec(&rds_ib_allocation);
                        goto out;
                }
@@ -158,17 +158,17 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
                rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
        }
 
-       if (recv->r_frag == NULL) {
+       if (!recv->r_frag) {
                recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp);
-               if (recv->r_frag == NULL)
+               if (!recv->r_frag)
                        goto out;
                INIT_LIST_HEAD(&recv->r_frag->f_item);
                recv->r_frag->f_page = NULL;
        }
 
-       if (ic->i_frag.f_page == NULL) {
+       if (!ic->i_frag.f_page) {
                ic->i_frag.f_page = alloc_page(page_gfp);
-               if (ic->i_frag.f_page == NULL)
+               if (!ic->i_frag.f_page)
                        goto out;
                ic->i_frag.f_offset = 0;
        }
@@ -273,7 +273,7 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
        return ret;
 }
 
-void rds_ib_inc_purge(struct rds_incoming *inc)
+static void rds_ib_inc_purge(struct rds_incoming *inc)
 {
        struct rds_ib_incoming *ibinc;
        struct rds_page_frag *frag;
@@ -557,47 +557,6 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
        return rds_ib_get_ack(ic);
 }
 
-static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
-                                           struct rds_ib_recv_work *recv,
-                                           u32 data_len)
-{
-       struct rds_ib_connection *ic = conn->c_transport_data;
-       void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
-       void *addr;
-       u32 misplaced_hdr_bytes;
-
-       /*
-        * Support header at the front (RDS 3.1+) as well as header-at-end.
-        *
-        * Cases:
-        * 1) header all in header buff (great!)
-        * 2) header all in data page (copy all to header buff)
-        * 3) header split across hdr buf + data page
-        *    (move bit in hdr buff to end before copying other bit from data page)
-        */
-       if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
-               return hdr_buff;
-
-       if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
-               addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
-               memcpy(hdr_buff,
-                      addr + recv->r_frag->f_offset + data_len,
-                      sizeof(struct rds_header));
-               kunmap_atomic(addr, KM_SOFTIRQ0);
-               return hdr_buff;
-       }
-
-       misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
-
-       memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
-
-       addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
-       memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
-              sizeof(struct rds_header) - misplaced_hdr_bytes);
-       kunmap_atomic(addr, KM_SOFTIRQ0);
-       return hdr_buff;
-}
-
 /*
  * It's kind of lame that we're copying from the posted receive pages into
  * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
@@ -710,7 +669,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
        }
        data_len -= sizeof(struct rds_header);
 
-       ihdr = rds_ib_get_header(conn, recv, data_len);
+       ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
 
        /* Validate the checksum. */
        if (!rds_message_verify_checksum(ihdr)) {
@@ -757,7 +716,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
         * into the inc and save the inc so we can hang upcoming fragments
         * off its list.
         */
-       if (ibinc == NULL) {
+       if (!ibinc) {
                ibinc = recv->r_ibinc;
                recv->r_ibinc = NULL;
                ic->i_ibinc = ibinc;
@@ -940,13 +899,13 @@ int __init rds_ib_recv_init(void)
        rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
                                        sizeof(struct rds_ib_incoming),
                                        0, 0, NULL);
-       if (rds_ib_incoming_slab == NULL)
+       if (!rds_ib_incoming_slab)
                goto out;
 
        rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
                                        sizeof(struct rds_page_frag),
                                        0, 0, NULL);
-       if (rds_ib_frag_slab == NULL)
+       if (!rds_ib_frag_slab)
                kmem_cache_destroy(rds_ib_incoming_slab);
        else
                ret = 0;