]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/rds/ib.h
RDS: Move atomic stats from general to ib-specific area
[net-next-2.6.git] / net / rds / ib.h
1 #ifndef _RDS_IB_H
2 #define _RDS_IB_H
3
4 #include <rdma/ib_verbs.h>
5 #include <rdma/rdma_cm.h>
6 #include "rds.h"
7 #include "rdma_transport.h"
8
9 #define RDS_FMR_SIZE                    256
10 #define RDS_FMR_POOL_SIZE               4096
11
12 #define RDS_IB_MAX_SGE                  8
13 #define RDS_IB_RECV_SGE                 2
14
15 #define RDS_IB_DEFAULT_RECV_WR          1024
16 #define RDS_IB_DEFAULT_SEND_WR          256
17
18 #define RDS_IB_DEFAULT_RETRY_COUNT      2
19
20 #define RDS_IB_SUPPORTED_PROTOCOLS      0x00000003      /* minor versions supported */
21
22 extern struct list_head rds_ib_devices;
23
24 /*
25  * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
26  * try and minimize the amount of memory tied up both the device and
27  * socket receive queues.
28  */
29 /* page offset of the final full frag that fits in the page */
30 #define RDS_PAGE_LAST_OFF (((PAGE_SIZE  / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
31 struct rds_page_frag {
32         struct list_head        f_item;
33         struct page             *f_page;
34         unsigned long           f_offset;
35         dma_addr_t              f_mapped;
36 };
37
38 struct rds_ib_incoming {
39         struct list_head        ii_frags;
40         struct rds_incoming     ii_inc;
41 };
42
43 struct rds_ib_connect_private {
44         /* Add new fields at the end, and don't permute existing fields. */
45         __be32                  dp_saddr;
46         __be32                  dp_daddr;
47         u8                      dp_protocol_major;
48         u8                      dp_protocol_minor;
49         __be16                  dp_protocol_minor_mask; /* bitmask */
50         __be32                  dp_reserved1;
51         __be64                  dp_ack_seq;
52         __be32                  dp_credit;              /* non-zero enables flow ctl */
53 };
54
55 struct rds_ib_send_work {
56         void                    *s_op;
57         struct ib_send_wr       s_wr;
58         struct ib_sge           s_sge[RDS_IB_MAX_SGE];
59         unsigned long           s_queued;
60 };
61
62 struct rds_ib_recv_work {
63         struct rds_ib_incoming  *r_ibinc;
64         struct rds_page_frag    *r_frag;
65         struct ib_recv_wr       r_wr;
66         struct ib_sge           r_sge[2];
67 };
68
69 struct rds_ib_work_ring {
70         u32             w_nr;
71         u32             w_alloc_ptr;
72         u32             w_alloc_ctr;
73         u32             w_free_ptr;
74         atomic_t        w_free_ctr;
75 };
76
77 struct rds_ib_device;
78
79 struct rds_ib_connection {
80
81         struct list_head        ib_node;
82         struct rds_ib_device    *rds_ibdev;
83         struct rds_connection   *conn;
84
85         /* alphabet soup, IBTA style */
86         struct rdma_cm_id       *i_cm_id;
87         struct ib_pd            *i_pd;
88         struct ib_mr            *i_mr;
89         struct ib_cq            *i_send_cq;
90         struct ib_cq            *i_recv_cq;
91
92         /* tx */
93         struct rds_ib_work_ring i_send_ring;
94         struct rm_data_op       *i_data_op;
95         struct rds_header       *i_send_hdrs;
96         u64                     i_send_hdrs_dma;
97         struct rds_ib_send_work *i_sends;
98
99         /* rx */
100         struct tasklet_struct   i_recv_tasklet;
101         struct mutex            i_recv_mutex;
102         struct rds_ib_work_ring i_recv_ring;
103         struct rds_ib_incoming  *i_ibinc;
104         u32                     i_recv_data_rem;
105         struct rds_header       *i_recv_hdrs;
106         u64                     i_recv_hdrs_dma;
107         struct rds_ib_recv_work *i_recvs;
108         struct rds_page_frag    i_frag;
109         u64                     i_ack_recv;     /* last ACK received */
110
111         /* sending acks */
112         unsigned long           i_ack_flags;
113 #ifdef KERNEL_HAS_ATOMIC64
114         atomic64_t              i_ack_next;     /* next ACK to send */
115 #else
116         spinlock_t              i_ack_lock;     /* protect i_ack_next */
117         u64                     i_ack_next;     /* next ACK to send */
118 #endif
119         struct rds_header       *i_ack;
120         struct ib_send_wr       i_ack_wr;
121         struct ib_sge           i_ack_sge;
122         u64                     i_ack_dma;
123         unsigned long           i_ack_queued;
124
125         /* Flow control related information
126          *
127          * Our algorithm uses a pair variables that we need to access
128          * atomically - one for the send credits, and one posted
129          * recv credits we need to transfer to remote.
130          * Rather than protect them using a slow spinlock, we put both into
131          * a single atomic_t and update it using cmpxchg
132          */
133         atomic_t                i_credits;
134
135         /* Protocol version specific information */
136         unsigned int            i_flowctl:1;    /* enable/disable flow ctl */
137
138         /* Batched completions */
139         unsigned int            i_unsignaled_wrs;
140 };
141
142 /* This assumes that atomic_t is at least 32 bits */
143 #define IB_GET_SEND_CREDITS(v)  ((v) & 0xffff)
144 #define IB_GET_POST_CREDITS(v)  ((v) >> 16)
145 #define IB_SET_SEND_CREDITS(v)  ((v) & 0xffff)
146 #define IB_SET_POST_CREDITS(v)  ((v) << 16)
147
148 struct rds_ib_ipaddr {
149         struct list_head        list;
150         __be32                  ipaddr;
151 };
152
153 struct rds_ib_device {
154         struct list_head        list;
155         struct list_head        ipaddr_list;
156         struct list_head        conn_list;
157         struct ib_device        *dev;
158         struct ib_pd            *pd;
159         struct ib_mr            *mr;
160         struct rds_ib_mr_pool   *mr_pool;
161         unsigned int            fmr_max_remaps;
162         unsigned int            max_fmrs;
163         int                     max_sge;
164         unsigned int            max_wrs;
165         unsigned int            max_initiator_depth;
166         unsigned int            max_responder_resources;
167         spinlock_t              spinlock;       /* protect the above */
168 };
169
170 /* bits for i_ack_flags */
171 #define IB_ACK_IN_FLIGHT        0
172 #define IB_ACK_REQUESTED        1
173
174 /* Magic WR_ID for ACKs */
175 #define RDS_IB_ACK_WR_ID        (~(u64) 0)
176
177 struct rds_ib_statistics {
178         uint64_t        s_ib_connect_raced;
179         uint64_t        s_ib_listen_closed_stale;
180         uint64_t        s_ib_tx_cq_call;
181         uint64_t        s_ib_tx_cq_event;
182         uint64_t        s_ib_tx_ring_full;
183         uint64_t        s_ib_tx_throttle;
184         uint64_t        s_ib_tx_sg_mapping_failure;
185         uint64_t        s_ib_tx_stalled;
186         uint64_t        s_ib_tx_credit_updates;
187         uint64_t        s_ib_rx_cq_call;
188         uint64_t        s_ib_rx_cq_event;
189         uint64_t        s_ib_rx_ring_empty;
190         uint64_t        s_ib_rx_refill_from_cq;
191         uint64_t        s_ib_rx_refill_from_thread;
192         uint64_t        s_ib_rx_alloc_limit;
193         uint64_t        s_ib_rx_credit_updates;
194         uint64_t        s_ib_ack_sent;
195         uint64_t        s_ib_ack_send_failure;
196         uint64_t        s_ib_ack_send_delayed;
197         uint64_t        s_ib_ack_send_piggybacked;
198         uint64_t        s_ib_ack_received;
199         uint64_t        s_ib_rdma_mr_alloc;
200         uint64_t        s_ib_rdma_mr_free;
201         uint64_t        s_ib_rdma_mr_used;
202         uint64_t        s_ib_rdma_mr_pool_flush;
203         uint64_t        s_ib_rdma_mr_pool_wait;
204         uint64_t        s_ib_rdma_mr_pool_depleted;
205         uint64_t        s_ib_atomic_cswp;
206         uint64_t        s_ib_atomic_fadd;
207 };
208
209 extern struct workqueue_struct *rds_ib_wq;
210
211 /*
212  * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
213  * doesn't define it.
214  */
215 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
216                 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
217 {
218         unsigned int i;
219
220         for (i = 0; i < sg_dma_len; ++i) {
221                 ib_dma_sync_single_for_cpu(dev,
222                                 ib_sg_dma_address(dev, &sg[i]),
223                                 ib_sg_dma_len(dev, &sg[i]),
224                                 direction);
225         }
226 }
227 #define ib_dma_sync_sg_for_cpu  rds_ib_dma_sync_sg_for_cpu
228
229 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
230                 struct scatterlist *sg, unsigned int sg_dma_len, int direction)
231 {
232         unsigned int i;
233
234         for (i = 0; i < sg_dma_len; ++i) {
235                 ib_dma_sync_single_for_device(dev,
236                                 ib_sg_dma_address(dev, &sg[i]),
237                                 ib_sg_dma_len(dev, &sg[i]),
238                                 direction);
239         }
240 }
241 #define ib_dma_sync_sg_for_device       rds_ib_dma_sync_sg_for_device
242
243
244 /* ib.c */
245 extern struct rds_transport rds_ib_transport;
246 extern void rds_ib_add_one(struct ib_device *device);
247 extern void rds_ib_remove_one(struct ib_device *device);
248 extern struct ib_client rds_ib_client;
249
250 extern unsigned int fmr_pool_size;
251 extern unsigned int fmr_message_size;
252 extern unsigned int rds_ib_retry_count;
253
254 extern spinlock_t ib_nodev_conns_lock;
255 extern struct list_head ib_nodev_conns;
256
257 /* ib_cm.c */
258 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
259 void rds_ib_conn_free(void *arg);
260 int rds_ib_conn_connect(struct rds_connection *conn);
261 void rds_ib_conn_shutdown(struct rds_connection *conn);
262 void rds_ib_state_change(struct sock *sk);
263 int __init rds_ib_listen_init(void);
264 void rds_ib_listen_stop(void);
265 void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
266 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
267                              struct rdma_cm_event *event);
268 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
269 void rds_ib_cm_connect_complete(struct rds_connection *conn,
270                                 struct rdma_cm_event *event);
271
272
273 #define rds_ib_conn_error(conn, fmt...) \
274         __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
275
276 /* ib_rdma.c */
277 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
278 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
279 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
280 void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock);
281 static inline void rds_ib_destroy_nodev_conns(void)
282 {
283         __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
284 }
285 static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
286 {
287         __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
288 }
289 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
290 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
291 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
292 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
293                     struct rds_sock *rs, u32 *key_ret);
294 void rds_ib_sync_mr(void *trans_private, int dir);
295 void rds_ib_free_mr(void *trans_private, int invalidate);
296 void rds_ib_flush_mrs(void);
297
298 /* ib_recv.c */
299 int __init rds_ib_recv_init(void);
300 void rds_ib_recv_exit(void);
301 int rds_ib_recv(struct rds_connection *conn);
302 int rds_ib_recv_refill(struct rds_connection *conn, int prefill);
303 void rds_ib_inc_free(struct rds_incoming *inc);
304 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
305                              size_t size);
306 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
307 void rds_ib_recv_tasklet_fn(unsigned long data);
308 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
309 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
310 void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
311 void rds_ib_attempt_ack(struct rds_ib_connection *ic);
312 void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
313 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
314
315 /* ib_ring.c */
316 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
317 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
318 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
319 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
320 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
321 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
322 int rds_ib_ring_low(struct rds_ib_work_ring *ring);
323 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
324 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
325 extern wait_queue_head_t rds_ib_ring_empty_wait;
326
327 /* ib_send.c */
328 void rds_ib_xmit_complete(struct rds_connection *conn);
329 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
330                 unsigned int hdr_off, unsigned int sg, unsigned int off);
331 void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
332 void rds_ib_send_init_ring(struct rds_ib_connection *ic);
333 void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
334 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
335 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
336 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
337 int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
338                              u32 *adv_credits, int need_posted, int max_posted);
339 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
340
341 /* ib_stats.c */
342 DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
343 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
344 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
345                                     unsigned int avail);
346
347 /* ib_sysctl.c */
348 int __init rds_ib_sysctl_init(void);
349 void rds_ib_sysctl_exit(void);
350 extern unsigned long rds_ib_sysctl_max_send_wr;
351 extern unsigned long rds_ib_sysctl_max_recv_wr;
352 extern unsigned long rds_ib_sysctl_max_unsig_wrs;
353 extern unsigned long rds_ib_sysctl_max_unsig_bytes;
354 extern unsigned long rds_ib_sysctl_max_recv_allocation;
355 extern unsigned int rds_ib_sysctl_flow_control;
356 extern ctl_table rds_ib_sysctl_table[];
357
358 #endif