}
/* The per-socket spinlock must be held here. */
-#define sk_add_backlog(__sk, __skb) \
-do { if (!(__sk)->sk_backlog.tail) { \
- (__sk)->sk_backlog.head = \
- (__sk)->sk_backlog.tail = (__skb); \
- } else { \
- ((__sk)->sk_backlog.tail)->next = (__skb); \
- (__sk)->sk_backlog.tail = (__skb); \
- } \
- (__skb)->next = NULL; \
-} while(0)
+static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ if (!sk->sk_backlog.tail) {
+ sk->sk_backlog.head = sk->sk_backlog.tail = skb;
+ } else {
+ sk->sk_backlog.tail->next = skb;
+ sk->sk_backlog.tail = skb;
+ }
+ skb->next = NULL;
+}
#define sk_wait_event(__sk, __timeo, __condition) \
({ int rc; \
extern int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops;
+struct timewait_sock_ops;
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
kmem_cache_t *slab;
unsigned int obj_size;
- kmem_cache_t *twsk_slab;
- unsigned int twsk_obj_size;
atomic_t *orphan_count;
struct request_sock_ops *rsk_prot;
+ struct timewait_sock_ops *twsk_prot;
struct module *owner;
static inline int sock_error(struct sock *sk)
{
- int err = xchg(&sk->sk_err, 0);
+ int err;
+ if (likely(!sk->sk_err))
+ return 0;
+ err = xchg(&sk->sk_err, 0);
return -err;
}
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)
+/*from STCP for fast SACK Process*/
+#define sk_stream_for_retrans_queue_from(skb, sk) \
+ for (; (skb != (sk)->sk_send_head) && \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
+ skb = skb->next)
+
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/