]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - include/net/sock.h
net: reorder struct sock fields
[net-next-2.6.git] / include / net / sock.h
index 73a4f9702a65c816c3701ab7fa9777fdbcaa2c72..5557dfb3dd68056575156009ec7220db51514627 100644 (file)
@@ -57,7 +57,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/poll.h>
 
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 
@@ -241,59 +241,67 @@ struct sock {
 #define sk_bind_node           __sk_common.skc_bind_node
 #define sk_prot                        __sk_common.skc_prot
 #define sk_net                 __sk_common.skc_net
-       kmemcheck_bitfield_begin(flags);
-       unsigned int            sk_shutdown  : 2,
-                               sk_no_check  : 2,
-                               sk_userlocks : 4,
-                               sk_protocol  : 8,
-                               sk_type      : 16;
-       kmemcheck_bitfield_end(flags);
-       int                     sk_rcvbuf;
        socket_lock_t           sk_lock;
+       struct sk_buff_head     sk_receive_queue;
        /*
         * The backlog queue is special, it is always used with
         * the per-socket spinlock held and requires low latency
         * access. Therefore we special case it's implementation.
+        * Note : rmem_alloc is in this structure to fill a hole
+        * on 64bit arches, not because its logically part of
+        * backlog.
         */
        struct {
-               struct sk_buff *head;
-               struct sk_buff *tail;
-               int len;
+               atomic_t        rmem_alloc;
+               int             len;
+               struct sk_buff  *head;
+               struct sk_buff  *tail;
        } sk_backlog;
+#define sk_rmem_alloc sk_backlog.rmem_alloc
+       int                     sk_forward_alloc;
+#ifdef CONFIG_RPS
+       __u32                   sk_rxhash;
+#endif
+       atomic_t                sk_drops;
+       int                     sk_rcvbuf;
+
+       struct sk_filter __rcu  *sk_filter;
        struct socket_wq        *sk_wq;
-       struct dst_entry        *sk_dst_cache;
+
+#ifdef CONFIG_NET_DMA
+       struct sk_buff_head     sk_async_wait_queue;
+#endif
+
 #ifdef CONFIG_XFRM
        struct xfrm_policy      *sk_policy[2];
 #endif
+       unsigned long           sk_flags;
+       struct dst_entry        *sk_dst_cache;
        spinlock_t              sk_dst_lock;
-       atomic_t                sk_rmem_alloc;
        atomic_t                sk_wmem_alloc;
        atomic_t                sk_omem_alloc;
        int                     sk_sndbuf;
-       struct sk_buff_head     sk_receive_queue;
        struct sk_buff_head     sk_write_queue;
-#ifdef CONFIG_NET_DMA
-       struct sk_buff_head     sk_async_wait_queue;
-#endif
+       kmemcheck_bitfield_begin(flags);
+       unsigned int            sk_shutdown  : 2,
+                               sk_no_check  : 2,
+                               sk_userlocks : 4,
+                               sk_protocol  : 8,
+                               sk_type      : 16;
+       kmemcheck_bitfield_end(flags);
        int                     sk_wmem_queued;
-       int                     sk_forward_alloc;
        gfp_t                   sk_allocation;
        int                     sk_route_caps;
        int                     sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
        int                     sk_rcvlowat;
-#ifdef CONFIG_RPS
-       __u32                   sk_rxhash;
-#endif
-       unsigned long           sk_flags;
        unsigned long           sk_lingertime;
        struct sk_buff_head     sk_error_queue;
        struct proto            *sk_prot_creator;
        rwlock_t                sk_callback_lock;
        int                     sk_err,
                                sk_err_soft;
-       atomic_t                sk_drops;
        unsigned short          sk_ack_backlog;
        unsigned short          sk_max_ack_backlog;
        __u32                   sk_priority;
@@ -301,7 +309,6 @@ struct sock {
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
        long                    sk_sndtimeo;
-       struct sk_filter        *sk_filter;
        void                    *sk_protinfo;
        struct timer_list       sk_timer;
        ktime_t                 sk_stamp;
@@ -762,7 +769,7 @@ struct proto {
 
        /* Memory pressure */
        void                    (*enter_memory_pressure)(struct sock *sk);
-       atomic_t                *memory_allocated;      /* Current allocated memory. */
+       atomic_long_t           *memory_allocated;      /* Current allocated memory. */
        struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
        /*
         * Pressure flag: try to collapse.
@@ -771,7 +778,7 @@ struct proto {
         * is strict, actions are advisory and have some latency.
         */
        int                     *memory_pressure;
-       int                     *sysctl_mem;
+       long                    *sysctl_mem;
        int                     *sysctl_wmem;
        int                     *sysctl_rmem;
        int                     max_header;