]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ehea/ehea_qmr.h
ehea: Fix a checksum issue on the receive path
[net-next-2.6.git] / drivers / net / ehea / ehea_qmr.h
CommitLineData
7a291083
JBT
1/*
2 * linux/drivers/net/ehea/ehea_qmr.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_QMR_H__
30#define __EHEA_QMR_H__
31
32#include "ehea.h"
33#include "ehea_hw.h"
34
35/*
36 * page size of ehea hardware queues
37 */
38
44c82152
TK
39#define EHEA_PAGESHIFT 12
40#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
41#define EHEA_SECTSIZE (1UL << 24)
2c69448b 42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
3fd09c45
TK
43#define EHEA_HUGEPAGESHIFT 34
44#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
45#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
44c82152 46
f67c6275
DM
47#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
48#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
44c82152 49#endif
7a291083
JBT
50
51/* Some abbreviations used here:
52 *
53 * WQE - Work Queue Entry
54 * SWQE - Send Work Queue Entry
55 * RWQE - Receive Work Queue Entry
56 * CQE - Completion Queue Entry
57 * EQE - Event Queue Entry
58 * MR - Memory Region
59 */
60
61/* Use of WR_ID field for EHEA */
62#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
63#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
64#define EHEA_SWQE2_TYPE 0x1
65#define EHEA_SWQE3_TYPE 0x2
66#define EHEA_RWQE2_TYPE 0x3
67#define EHEA_RWQE3_TYPE 0x4
68#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
69#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
70
71struct ehea_vsgentry {
72 u64 vaddr;
73 u32 l_key;
74 u32 len;
75};
76
77/* maximum number of sg entries allowed in a WQE */
78#define EHEA_MAX_WQE_SG_ENTRIES 252
79#define SWQE2_MAX_IMM (0xD0 - 0x30)
80#define SWQE3_MAX_IMM 224
81
82/* tx control flags for swqe */
83#define EHEA_SWQE_CRC 0x8000
84#define EHEA_SWQE_IP_CHECKSUM 0x4000
85#define EHEA_SWQE_TCP_CHECKSUM 0x2000
86#define EHEA_SWQE_TSO 0x1000
87#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
88#define EHEA_SWQE_VLAN_INSERT 0x0400
89#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
90#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
91#define EHEA_SWQE_WRAP_CTL_REC 0x0080
92#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
93#define EHEA_SWQE_BIND 0x0020
94#define EHEA_SWQE_PURGE 0x0010
95
96/* sizeof(struct ehea_swqe) less the union */
97#define SWQE_HEADER_SIZE 32
98
99struct ehea_swqe {
100 u64 wr_id;
101 u16 tx_control;
102 u16 vlan_tag;
103 u8 reserved1;
104 u8 ip_start;
105 u8 ip_end;
106 u8 immediate_data_length;
107 u8 tcp_offset;
108 u8 reserved2;
109 u16 tcp_end;
110 u8 wrap_tag;
111 u8 descriptors; /* number of valid descriptors in WQE */
112 u16 reserved3;
113 u16 reserved4;
114 u16 mss;
115 u32 reserved5;
116 union {
117 /* Send WQE Format 1 */
118 struct {
119 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
120 } no_immediate_data;
121
122 /* Send WQE Format 2 */
123 struct {
124 struct ehea_vsgentry sg_entry;
125 /* 0x30 */
126 u8 immediate_data[SWQE2_MAX_IMM];
127 /* 0xd0 */
128 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
ba2d3587 129 } immdata_desc __packed;
7a291083
JBT
130
131 /* Send WQE Format 3 */
132 struct {
133 u8 immediate_data[SWQE3_MAX_IMM];
134 } immdata_nodesc;
135 } u;
136};
137
138struct ehea_rwqe {
139 u64 wr_id; /* work request ID */
140 u8 reserved1[5];
141 u8 data_segments;
142 u16 reserved2;
143 u64 reserved3;
144 u64 reserved4;
145 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
146};
147
148#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
149
150#define EHEA_CQE_TYPE_RQ 0x60
58dd8258
TK
151#define EHEA_CQE_STAT_ERR_MASK 0x700F
152#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
71085ce8 153#define EHEA_CQE_BLIND_CKSUM 0x8000
7a291083 154#define EHEA_CQE_STAT_ERR_TCP 0x4000
acbddb59
JBT
155#define EHEA_CQE_STAT_ERR_IP 0x2000
156#define EHEA_CQE_STAT_ERR_CRC 0x1000
7a291083 157
ea96ceac
TK
158/* Defines which bad send cqe stati lead to a port reset */
159#define EHEA_CQE_STAT_RESET_MASK 0x0002
160
7a291083
JBT
161struct ehea_cqe {
162 u64 wr_id; /* work request ID from WQE */
163 u8 type;
164 u8 valid;
165 u16 status;
166 u16 reserved1;
167 u16 num_bytes_transfered;
168 u16 vlan_tag;
169 u16 inet_checksum_value;
170 u8 reserved2;
171 u8 header_length;
172 u16 reserved3;
173 u16 page_offset;
174 u16 wqe_count;
175 u32 qp_token;
176 u32 timestamp;
177 u32 reserved4;
178 u64 reserved5[3];
179};
180
181#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
182#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
183#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
184#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
185#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
186#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
187#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
188#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
189#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
190#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
191#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
192#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
193
ea96ceac
TK
194#define EHEA_AER_RESTYPE_QP 0x8
195#define EHEA_AER_RESTYPE_CQ 0x4
196#define EHEA_AER_RESTYPE_EQ 0x3
197
198/* Defines which affiliated errors lead to a port reset */
199#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
200#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
201
7a291083
JBT
202struct ehea_eqe {
203 u64 entry;
204};
205
f67c6275
DM
206#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
207#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
d2db9eea 208
7a291083
JBT
209static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
210{
211 struct ehea_page *current_page;
212
213 if (q_offset >= queue->queue_length)
214 q_offset -= queue->queue_length;
215 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
216 return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
217}
218
219static inline void *hw_qeit_get(struct hw_queue *queue)
220{
221 return hw_qeit_calc(queue, queue->current_q_offset);
222}
223
224static inline void hw_qeit_inc(struct hw_queue *queue)
225{
226 queue->current_q_offset += queue->qe_size;
227 if (queue->current_q_offset >= queue->queue_length) {
228 queue->current_q_offset = 0;
229 /* toggle the valid flag */
230 queue->toggle_state = (~queue->toggle_state) & 1;
231 }
232}
233
234static inline void *hw_qeit_get_inc(struct hw_queue *queue)
235{
236 void *retvalue = hw_qeit_get(queue);
237 hw_qeit_inc(queue);
238 return retvalue;
239}
240
241static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
242{
243 struct ehea_cqe *retvalue = hw_qeit_get(queue);
244 u8 valid = retvalue->valid;
245 void *pref;
246
247 if ((valid >> 7) == (queue->toggle_state & 1)) {
248 /* this is a good one */
249 hw_qeit_inc(queue);
250 pref = hw_qeit_calc(queue, queue->current_q_offset);
251 prefetch(pref);
252 prefetch(pref + 128);
253 } else
254 retvalue = NULL;
255 return retvalue;
256}
257
258static inline void *hw_qeit_get_valid(struct hw_queue *queue)
259{
260 struct ehea_cqe *retvalue = hw_qeit_get(queue);
261 void *pref;
262 u8 valid;
263
264 pref = hw_qeit_calc(queue, queue->current_q_offset);
265 prefetch(pref);
266 prefetch(pref + 128);
267 prefetch(pref + 256);
268 valid = retvalue->valid;
269 if (!((valid >> 7) == (queue->toggle_state & 1)))
270 retvalue = NULL;
271 return retvalue;
272}
273
274static inline void *hw_qeit_reset(struct hw_queue *queue)
275{
276 queue->current_q_offset = 0;
277 return hw_qeit_get(queue);
278}
279
280static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
281{
282 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
283 void *retvalue;
284
285 retvalue = hw_qeit_get(queue);
286 queue->current_q_offset += queue->qe_size;
287 if (queue->current_q_offset > last_entry_in_q) {
288 queue->current_q_offset = 0;
289 queue->toggle_state = (~queue->toggle_state) & 1;
290 }
291 return retvalue;
292}
293
294static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
295{
296 void *retvalue = hw_qeit_get(queue);
f67c6275 297 u32 qe = *(u8 *)retvalue;
7a291083
JBT
298 if ((qe >> 7) == (queue->toggle_state & 1))
299 hw_qeit_eq_get_inc(queue);
300 else
301 retvalue = NULL;
302 return retvalue;
303}
304
305static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
306 int rq_nr)
307{
308 struct hw_queue *queue;
309
310 if (rq_nr == 1)
311 queue = &qp->hw_rqueue1;
312 else if (rq_nr == 2)
313 queue = &qp->hw_rqueue2;
314 else
315 queue = &qp->hw_rqueue3;
316
317 return hw_qeit_get_inc(queue);
318}
319
320static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
321 int *wqe_index)
322{
323 struct hw_queue *queue = &my_qp->hw_squeue;
324 struct ehea_swqe *wqe_p;
325
326 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
327 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
328
329 return wqe_p;
330}
331
332static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
333{
334 iosync();
335 ehea_update_sqa(my_qp, 1);
336}
337
338static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
339{
340 struct hw_queue *queue = &qp->hw_rqueue1;
341
342 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
343 return hw_qeit_get_valid(queue);
344}
345
18604c54
JBT
346static inline void ehea_inc_cq(struct ehea_cq *cq)
347{
348 hw_qeit_inc(&cq->hw_queue);
349}
350
7a291083
JBT
351static inline void ehea_inc_rq1(struct ehea_qp *qp)
352{
353 hw_qeit_inc(&qp->hw_rqueue1);
354}
355
356static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
357{
18604c54 358 return hw_qeit_get_valid(&my_cq->hw_queue);
7a291083
JBT
359}
360
361#define EHEA_CQ_REGISTER_ORIG 0
362#define EHEA_EQ_REGISTER_ORIG 0
363
364enum ehea_eq_type {
365 EHEA_EQ = 0, /* event queue */
366 EHEA_NEQ /* notification event queue */
367};
368
369struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
370 enum ehea_eq_type type,
371 const u32 length, const u8 eqe_gen);
372
373int ehea_destroy_eq(struct ehea_eq *eq);
374
375struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
376
377struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
378 u64 eq_handle, u32 cq_token);
379
380int ehea_destroy_cq(struct ehea_cq *cq);
381
f67c6275 382struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
7a291083
JBT
383 struct ehea_qp_init_attr *init_attr);
384
385int ehea_destroy_qp(struct ehea_qp *qp);
386
e542aa6b
JBT
387int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
388
389int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
390 struct ehea_mr *shared_mr);
391
392int ehea_rem_mr(struct ehea_mr *mr);
7a291083 393
ea96ceac
TK
394u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
395 u64 *aer, u64 *aerr);
d2db9eea 396
d4f12daf
HH
397int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
398int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
f67c6275
DM
399int ehea_create_busmap(void);
400void ehea_destroy_busmap(void);
44c82152
TK
401u64 ehea_map_vaddr(void *caddr);
402
7a291083 403#endif /* __EHEA_QMR_H__ */