]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/infiniband/hw/cxgb4/cm.c
RDMA/cxgb4: debugfs files for dumping active stags
[net-next-2.6.git] / drivers / infiniband / hw / cxgb4 / cm.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41
42#include <net/neighbour.h>
43#include <net/netevent.h>
44#include <net/route.h>
45
46#include "iw_cxgb4.h"
47
48static char *states[] = {
49 "idle",
50 "listen",
51 "connecting",
52 "mpa_wait_req",
53 "mpa_req_sent",
54 "mpa_req_rcvd",
55 "mpa_rep_sent",
56 "fpdu_mode",
57 "aborting",
58 "closing",
59 "moribund",
60 "dead",
61 NULL,
62};
63
ba6d3925
SW
64static int dack_mode;
65module_param(dack_mode, int, 0644);
66MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
67
be4c9bad
RD
68int c4iw_max_read_depth = 8;
69module_param(c4iw_max_read_depth, int, 0644);
70MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
71
cfdda9d7
SW
72static int enable_tcp_timestamps;
73module_param(enable_tcp_timestamps, int, 0644);
74MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
75
76static int enable_tcp_sack;
77module_param(enable_tcp_sack, int, 0644);
78MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
79
80static int enable_tcp_window_scaling = 1;
81module_param(enable_tcp_window_scaling, int, 0644);
82MODULE_PARM_DESC(enable_tcp_window_scaling,
83 "Enable tcp window scaling (default=1)");
84
85int c4iw_debug;
86module_param(c4iw_debug, int, 0644);
87MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
88
89static int peer2peer;
90module_param(peer2peer, int, 0644);
91MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
92
93static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
94module_param(p2p_type, int, 0644);
95MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
96 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
97
98static int ep_timeout_secs = 60;
99module_param(ep_timeout_secs, int, 0644);
100MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
101 "in seconds (default=60)");
102
103static int mpa_rev = 1;
104module_param(mpa_rev, int, 0644);
105MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
106 "1 is spec compliant. (default=1)");
107
108static int markers_enabled;
109module_param(markers_enabled, int, 0644);
110MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
111
112static int crc_enabled = 1;
113module_param(crc_enabled, int, 0644);
114MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
115
116static int rcv_win = 256 * 1024;
117module_param(rcv_win, int, 0644);
118MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
119
120static int snd_win = 32 * 1024;
121module_param(snd_win, int, 0644);
122MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
123
cfdda9d7 124static struct workqueue_struct *workq;
cfdda9d7
SW
125
126static struct sk_buff_head rxq;
cfdda9d7
SW
127
128static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
129static void ep_timeout(unsigned long arg);
130static void connect_reply_upcall(struct c4iw_ep *ep, int status);
131
be4c9bad
RD
132static LIST_HEAD(timeout_list);
133static spinlock_t timeout_lock;
134
cfdda9d7
SW
135static void start_ep_timer(struct c4iw_ep *ep)
136{
137 PDBG("%s ep %p\n", __func__, ep);
138 if (timer_pending(&ep->timer)) {
139 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
140 del_timer_sync(&ep->timer);
141 } else
142 c4iw_get_ep(&ep->com);
143 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
144 ep->timer.data = (unsigned long)ep;
145 ep->timer.function = ep_timeout;
146 add_timer(&ep->timer);
147}
148
149static void stop_ep_timer(struct c4iw_ep *ep)
150{
151 PDBG("%s ep %p\n", __func__, ep);
152 if (!timer_pending(&ep->timer)) {
153 printk(KERN_ERR "%s timer stopped when its not running! "
154 "ep %p state %u\n", __func__, ep, ep->com.state);
155 WARN_ON(1);
156 return;
157 }
158 del_timer_sync(&ep->timer);
159 c4iw_put_ep(&ep->com);
160}
161
162static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
163 struct l2t_entry *l2e)
164{
165 int error = 0;
166
167 if (c4iw_fatal_error(rdev)) {
168 kfree_skb(skb);
169 PDBG("%s - device in error state - dropping\n", __func__);
170 return -EIO;
171 }
172 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
173 if (error < 0)
174 kfree_skb(skb);
74594861 175 return error < 0 ? error : 0;
cfdda9d7
SW
176}
177
178int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
179{
180 int error = 0;
181
182 if (c4iw_fatal_error(rdev)) {
183 kfree_skb(skb);
184 PDBG("%s - device in error state - dropping\n", __func__);
185 return -EIO;
186 }
187 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
188 if (error < 0)
189 kfree_skb(skb);
74594861 190 return error < 0 ? error : 0;
cfdda9d7
SW
191}
192
193static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
194{
195 struct cpl_tid_release *req;
196
197 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
198 if (!skb)
199 return;
200 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
201 INIT_TP_WR(req, hwtid);
202 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
203 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
204 c4iw_ofld_send(rdev, skb);
205 return;
206}
207
208static void set_emss(struct c4iw_ep *ep, u16 opt)
209{
210 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
211 ep->mss = ep->emss;
212 if (GET_TCPOPT_TSTAMP(opt))
213 ep->emss -= 12;
214 if (ep->emss < 128)
215 ep->emss = 128;
216 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
217 ep->mss, ep->emss);
218}
219
220static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
221{
222 unsigned long flags;
223 enum c4iw_ep_state state;
224
225 spin_lock_irqsave(&epc->lock, flags);
226 state = epc->state;
227 spin_unlock_irqrestore(&epc->lock, flags);
228 return state;
229}
230
231static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
232{
233 epc->state = new;
234}
235
236static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
237{
238 unsigned long flags;
239
240 spin_lock_irqsave(&epc->lock, flags);
241 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
242 __state_set(epc, new);
243 spin_unlock_irqrestore(&epc->lock, flags);
244 return;
245}
246
247static void *alloc_ep(int size, gfp_t gfp)
248{
249 struct c4iw_ep_common *epc;
250
251 epc = kzalloc(size, gfp);
252 if (epc) {
253 kref_init(&epc->kref);
254 spin_lock_init(&epc->lock);
255 init_waitqueue_head(&epc->waitq);
256 }
257 PDBG("%s alloc ep %p\n", __func__, epc);
258 return epc;
259}
260
261void _c4iw_free_ep(struct kref *kref)
262{
263 struct c4iw_ep *ep;
264
265 ep = container_of(kref, struct c4iw_ep, com.kref);
266 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
267 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
268 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
269 dst_release(ep->dst);
270 cxgb4_l2t_release(ep->l2t);
271 }
272 kfree(ep);
273}
274
275static void release_ep_resources(struct c4iw_ep *ep)
276{
277 set_bit(RELEASE_RESOURCES, &ep->com.flags);
278 c4iw_put_ep(&ep->com);
279}
280
cfdda9d7
SW
281static int status2errno(int status)
282{
283 switch (status) {
284 case CPL_ERR_NONE:
285 return 0;
286 case CPL_ERR_CONN_RESET:
287 return -ECONNRESET;
288 case CPL_ERR_ARP_MISS:
289 return -EHOSTUNREACH;
290 case CPL_ERR_CONN_TIMEDOUT:
291 return -ETIMEDOUT;
292 case CPL_ERR_TCAM_FULL:
293 return -ENOMEM;
294 case CPL_ERR_CONN_EXIST:
295 return -EADDRINUSE;
296 default:
297 return -EIO;
298 }
299}
300
301/*
302 * Try and reuse skbs already allocated...
303 */
304static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
305{
306 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
307 skb_trim(skb, 0);
308 skb_get(skb);
309 skb_reset_transport_header(skb);
310 } else {
311 skb = alloc_skb(len, gfp);
312 }
313 return skb;
314}
315
316static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
317 __be32 peer_ip, __be16 local_port,
318 __be16 peer_port, u8 tos)
319{
320 struct rtable *rt;
321 struct flowi fl = {
322 .oif = 0,
323 .nl_u = {
324 .ip4_u = {
325 .daddr = peer_ip,
326 .saddr = local_ip,
327 .tos = tos}
328 },
329 .proto = IPPROTO_TCP,
330 .uli_u = {
331 .ports = {
332 .sport = local_port,
333 .dport = peer_port}
334 }
335 };
336
337 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
338 return NULL;
339 return rt;
340}
341
342static void arp_failure_discard(void *handle, struct sk_buff *skb)
343{
344 PDBG("%s c4iw_dev %p\n", __func__, handle);
345 kfree_skb(skb);
346}
347
348/*
349 * Handle an ARP failure for an active open.
350 */
351static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
352{
353 printk(KERN_ERR MOD "ARP failure duing connect\n");
354 kfree_skb(skb);
355}
356
357/*
358 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
359 * and send it along.
360 */
361static void abort_arp_failure(void *handle, struct sk_buff *skb)
362{
363 struct c4iw_rdev *rdev = handle;
364 struct cpl_abort_req *req = cplhdr(skb);
365
366 PDBG("%s rdev %p\n", __func__, rdev);
367 req->cmd = CPL_ABORT_NO_RST;
368 c4iw_ofld_send(rdev, skb);
369}
370
371static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
372{
373 unsigned int flowclen = 80;
374 struct fw_flowc_wr *flowc;
375 int i;
376
377 skb = get_skb(skb, flowclen, GFP_KERNEL);
378 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
379
380 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
381 FW_FLOWC_WR_NPARAMS(8));
382 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
383 16)) | FW_WR_FLOWID(ep->hwtid));
384
385 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
386 flowc->mnemval[0].val = cpu_to_be32(0);
387 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
388 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
389 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
390 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
391 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
392 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
393 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
394 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
395 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
396 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
397 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
398 flowc->mnemval[6].val = cpu_to_be32(snd_win);
399 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
400 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
401 /* Pad WR to 16 byte boundary */
402 flowc->mnemval[8].mnemonic = 0;
403 flowc->mnemval[8].val = 0;
404 for (i = 0; i < 9; i++) {
405 flowc->mnemval[i].r4[0] = 0;
406 flowc->mnemval[i].r4[1] = 0;
407 flowc->mnemval[i].r4[2] = 0;
408 }
409
410 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
411 c4iw_ofld_send(&ep->com.dev->rdev, skb);
412}
413
414static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
415{
416 struct cpl_close_con_req *req;
417 struct sk_buff *skb;
418 int wrlen = roundup(sizeof *req, 16);
419
420 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
421 skb = get_skb(NULL, wrlen, gfp);
422 if (!skb) {
423 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
424 return -ENOMEM;
425 }
426 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
427 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
428 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
429 memset(req, 0, wrlen);
430 INIT_TP_WR(req, ep->hwtid);
431 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
432 ep->hwtid));
433 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
434}
435
436static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
437{
438 struct cpl_abort_req *req;
439 int wrlen = roundup(sizeof *req, 16);
440
441 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
442 skb = get_skb(skb, wrlen, gfp);
443 if (!skb) {
444 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
445 __func__);
446 return -ENOMEM;
447 }
448 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
449 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
450 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
451 memset(req, 0, wrlen);
452 INIT_TP_WR(req, ep->hwtid);
453 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
454 req->cmd = CPL_ABORT_SEND_RST;
455 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
456}
457
458static int send_connect(struct c4iw_ep *ep)
459{
460 struct cpl_act_open_req *req;
461 struct sk_buff *skb;
462 u64 opt0;
463 u32 opt2;
464 unsigned int mtu_idx;
465 int wscale;
466 int wrlen = roundup(sizeof *req, 16);
467
468 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
469
470 skb = get_skb(NULL, wrlen, GFP_KERNEL);
471 if (!skb) {
472 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
473 __func__);
474 return -ENOMEM;
475 }
d4f1a5c6 476 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cfdda9d7
SW
477
478 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
479 wscale = compute_wscale(rcv_win);
480 opt0 = KEEP_ALIVE(1) |
ba6d3925 481 DELACK(1) |
cfdda9d7
SW
482 WND_SCALE(wscale) |
483 MSS_IDX(mtu_idx) |
484 L2T_IDX(ep->l2t->idx) |
485 TX_CHAN(ep->tx_chan) |
486 SMAC_SEL(ep->smac_idx) |
487 DSCP(ep->tos) |
488 RCV_BUFSIZ(rcv_win>>10);
489 opt2 = RX_CHANNEL(0) |
490 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
491 if (enable_tcp_timestamps)
492 opt2 |= TSTAMPS_EN(1);
493 if (enable_tcp_sack)
494 opt2 |= SACK_EN(1);
495 if (wscale && enable_tcp_window_scaling)
496 opt2 |= WND_SCALE_EN(1);
497 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
498
499 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
500 INIT_TP_WR(req, 0);
501 OPCODE_TID(req) = cpu_to_be32(
502 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
503 req->local_port = ep->com.local_addr.sin_port;
504 req->peer_port = ep->com.remote_addr.sin_port;
505 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
506 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
507 req->opt0 = cpu_to_be64(opt0);
508 req->params = 0;
509 req->opt2 = cpu_to_be32(opt2);
510 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
511}
512
513static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
514{
515 int mpalen, wrlen;
516 struct fw_ofld_tx_data_wr *req;
517 struct mpa_message *mpa;
518
519 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
520
521 BUG_ON(skb_cloned(skb));
522
523 mpalen = sizeof(*mpa) + ep->plen;
524 wrlen = roundup(mpalen + sizeof *req, 16);
525 skb = get_skb(skb, wrlen, GFP_KERNEL);
526 if (!skb) {
527 connect_reply_upcall(ep, -ENOMEM);
528 return;
529 }
530 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
531
532 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
533 memset(req, 0, wrlen);
534 req->op_to_immdlen = cpu_to_be32(
535 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
536 FW_WR_COMPL(1) |
537 FW_WR_IMMDLEN(mpalen));
538 req->flowid_len16 = cpu_to_be32(
539 FW_WR_FLOWID(ep->hwtid) |
540 FW_WR_LEN16(wrlen >> 4));
541 req->plen = cpu_to_be32(mpalen);
542 req->tunnel_to_proxy = cpu_to_be32(
543 FW_OFLD_TX_DATA_WR_FLUSH(1) |
544 FW_OFLD_TX_DATA_WR_SHOVE(1));
545
546 mpa = (struct mpa_message *)(req + 1);
547 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
548 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
549 (markers_enabled ? MPA_MARKERS : 0);
550 mpa->private_data_size = htons(ep->plen);
551 mpa->revision = mpa_rev;
552
553 if (ep->plen)
554 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
555
556 /*
557 * Reference the mpa skb. This ensures the data area
558 * will remain in memory until the hw acks the tx.
559 * Function fw4_ack() will deref it.
560 */
561 skb_get(skb);
562 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
563 BUG_ON(ep->mpa_skb);
564 ep->mpa_skb = skb;
565 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
566 start_ep_timer(ep);
567 state_set(&ep->com, MPA_REQ_SENT);
568 ep->mpa_attr.initiator = 1;
569 return;
570}
571
572static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
573{
574 int mpalen, wrlen;
575 struct fw_ofld_tx_data_wr *req;
576 struct mpa_message *mpa;
577 struct sk_buff *skb;
578
579 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
580
581 mpalen = sizeof(*mpa) + plen;
582 wrlen = roundup(mpalen + sizeof *req, 16);
583
584 skb = get_skb(NULL, wrlen, GFP_KERNEL);
585 if (!skb) {
586 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
587 return -ENOMEM;
588 }
589 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
590
591 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
592 memset(req, 0, wrlen);
593 req->op_to_immdlen = cpu_to_be32(
594 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
595 FW_WR_COMPL(1) |
596 FW_WR_IMMDLEN(mpalen));
597 req->flowid_len16 = cpu_to_be32(
598 FW_WR_FLOWID(ep->hwtid) |
599 FW_WR_LEN16(wrlen >> 4));
600 req->plen = cpu_to_be32(mpalen);
601 req->tunnel_to_proxy = cpu_to_be32(
602 FW_OFLD_TX_DATA_WR_FLUSH(1) |
603 FW_OFLD_TX_DATA_WR_SHOVE(1));
604
605 mpa = (struct mpa_message *)(req + 1);
606 memset(mpa, 0, sizeof(*mpa));
607 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
608 mpa->flags = MPA_REJECT;
609 mpa->revision = mpa_rev;
610 mpa->private_data_size = htons(plen);
611 if (plen)
612 memcpy(mpa->private_data, pdata, plen);
613
614 /*
615 * Reference the mpa skb again. This ensures the data area
616 * will remain in memory until the hw acks the tx.
617 * Function fw4_ack() will deref it.
618 */
619 skb_get(skb);
620 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
621 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
622 BUG_ON(ep->mpa_skb);
623 ep->mpa_skb = skb;
624 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
625}
626
627static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
628{
629 int mpalen, wrlen;
630 struct fw_ofld_tx_data_wr *req;
631 struct mpa_message *mpa;
632 struct sk_buff *skb;
633
634 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
635
636 mpalen = sizeof(*mpa) + plen;
637 wrlen = roundup(mpalen + sizeof *req, 16);
638
639 skb = get_skb(NULL, wrlen, GFP_KERNEL);
640 if (!skb) {
641 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
642 return -ENOMEM;
643 }
644 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
645
646 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
647 memset(req, 0, wrlen);
648 req->op_to_immdlen = cpu_to_be32(
649 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
650 FW_WR_COMPL(1) |
651 FW_WR_IMMDLEN(mpalen));
652 req->flowid_len16 = cpu_to_be32(
653 FW_WR_FLOWID(ep->hwtid) |
654 FW_WR_LEN16(wrlen >> 4));
655 req->plen = cpu_to_be32(mpalen);
656 req->tunnel_to_proxy = cpu_to_be32(
657 FW_OFLD_TX_DATA_WR_FLUSH(1) |
658 FW_OFLD_TX_DATA_WR_SHOVE(1));
659
660 mpa = (struct mpa_message *)(req + 1);
661 memset(mpa, 0, sizeof(*mpa));
662 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
663 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
664 (markers_enabled ? MPA_MARKERS : 0);
665 mpa->revision = mpa_rev;
666 mpa->private_data_size = htons(plen);
667 if (plen)
668 memcpy(mpa->private_data, pdata, plen);
669
670 /*
671 * Reference the mpa skb. This ensures the data area
672 * will remain in memory until the hw acks the tx.
673 * Function fw4_ack() will deref it.
674 */
675 skb_get(skb);
676 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
677 ep->mpa_skb = skb;
678 state_set(&ep->com, MPA_REP_SENT);
679 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
680}
681
682static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
683{
684 struct c4iw_ep *ep;
685 struct cpl_act_establish *req = cplhdr(skb);
686 unsigned int tid = GET_TID(req);
687 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
688 struct tid_info *t = dev->rdev.lldi.tids;
689
690 ep = lookup_atid(t, atid);
691
692 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
693 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
694
695 dst_confirm(ep->dst);
696
697 /* setup the hwtid for this connection */
698 ep->hwtid = tid;
699 cxgb4_insert_tid(t, ep, tid);
700
701 ep->snd_seq = be32_to_cpu(req->snd_isn);
702 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
703
704 set_emss(ep, ntohs(req->tcp_opt));
705
706 /* dealloc the atid */
707 cxgb4_free_atid(t, atid);
708
709 /* start MPA negotiation */
710 send_flowc(ep, NULL);
711 send_mpa_req(ep, skb);
712
713 return 0;
714}
715
716static void close_complete_upcall(struct c4iw_ep *ep)
717{
718 struct iw_cm_event event;
719
720 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
721 memset(&event, 0, sizeof(event));
722 event.event = IW_CM_EVENT_CLOSE;
723 if (ep->com.cm_id) {
724 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
725 ep, ep->com.cm_id, ep->hwtid);
726 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
727 ep->com.cm_id->rem_ref(ep->com.cm_id);
728 ep->com.cm_id = NULL;
729 ep->com.qp = NULL;
730 }
731}
732
733static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
734{
735 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
736 close_complete_upcall(ep);
737 state_set(&ep->com, ABORTING);
738 return send_abort(ep, skb, gfp);
739}
740
741static void peer_close_upcall(struct c4iw_ep *ep)
742{
743 struct iw_cm_event event;
744
745 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
746 memset(&event, 0, sizeof(event));
747 event.event = IW_CM_EVENT_DISCONNECT;
748 if (ep->com.cm_id) {
749 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
750 ep, ep->com.cm_id, ep->hwtid);
751 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
752 }
753}
754
755static void peer_abort_upcall(struct c4iw_ep *ep)
756{
757 struct iw_cm_event event;
758
759 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
760 memset(&event, 0, sizeof(event));
761 event.event = IW_CM_EVENT_CLOSE;
762 event.status = -ECONNRESET;
763 if (ep->com.cm_id) {
764 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
765 ep->com.cm_id, ep->hwtid);
766 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
767 ep->com.cm_id->rem_ref(ep->com.cm_id);
768 ep->com.cm_id = NULL;
769 ep->com.qp = NULL;
770 }
771}
772
773static void connect_reply_upcall(struct c4iw_ep *ep, int status)
774{
775 struct iw_cm_event event;
776
777 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
778 memset(&event, 0, sizeof(event));
779 event.event = IW_CM_EVENT_CONNECT_REPLY;
780 event.status = status;
781 event.local_addr = ep->com.local_addr;
782 event.remote_addr = ep->com.remote_addr;
783
784 if ((status == 0) || (status == -ECONNREFUSED)) {
785 event.private_data_len = ep->plen;
786 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
787 }
85963e4c
RD
788
789 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
790 ep->hwtid, status);
791 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
792
cfdda9d7
SW
793 if (status < 0) {
794 ep->com.cm_id->rem_ref(ep->com.cm_id);
795 ep->com.cm_id = NULL;
796 ep->com.qp = NULL;
797 }
798}
799
800static void connect_request_upcall(struct c4iw_ep *ep)
801{
802 struct iw_cm_event event;
803
804 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
805 memset(&event, 0, sizeof(event));
806 event.event = IW_CM_EVENT_CONNECT_REQUEST;
807 event.local_addr = ep->com.local_addr;
808 event.remote_addr = ep->com.remote_addr;
809 event.private_data_len = ep->plen;
810 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
811 event.provider_data = ep;
812 if (state_read(&ep->parent_ep->com) != DEAD) {
813 c4iw_get_ep(&ep->com);
814 ep->parent_ep->com.cm_id->event_handler(
815 ep->parent_ep->com.cm_id,
816 &event);
817 }
818 c4iw_put_ep(&ep->parent_ep->com);
819 ep->parent_ep = NULL;
820}
821
822static void established_upcall(struct c4iw_ep *ep)
823{
824 struct iw_cm_event event;
825
826 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
827 memset(&event, 0, sizeof(event));
828 event.event = IW_CM_EVENT_ESTABLISHED;
829 if (ep->com.cm_id) {
830 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
831 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
832 }
833}
834
835static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
836{
837 struct cpl_rx_data_ack *req;
838 struct sk_buff *skb;
839 int wrlen = roundup(sizeof *req, 16);
840
841 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
842 skb = get_skb(NULL, wrlen, GFP_KERNEL);
843 if (!skb) {
844 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
845 return 0;
846 }
847
848 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
849 memset(req, 0, wrlen);
850 INIT_TP_WR(req, ep->hwtid);
851 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
852 ep->hwtid));
ba6d3925
SW
853 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
854 F_RX_DACK_CHANGE |
855 V_RX_DACK_MODE(dack_mode));
d4f1a5c6 856 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
cfdda9d7
SW
857 c4iw_ofld_send(&ep->com.dev->rdev, skb);
858 return credits;
859}
860
861static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
862{
863 struct mpa_message *mpa;
864 u16 plen;
865 struct c4iw_qp_attributes attrs;
866 enum c4iw_qp_attr_mask mask;
867 int err;
868
869 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
870
871 /*
872 * Stop mpa timer. If it expired, then the state has
873 * changed and we bail since ep_timeout already aborted
874 * the connection.
875 */
876 stop_ep_timer(ep);
877 if (state_read(&ep->com) != MPA_REQ_SENT)
878 return;
879
880 /*
881 * If we get more than the supported amount of private data
882 * then we must fail this connection.
883 */
884 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
885 err = -EINVAL;
886 goto err;
887 }
888
889 /*
890 * copy the new data into our accumulation buffer.
891 */
892 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
893 skb->len);
894 ep->mpa_pkt_len += skb->len;
895
896 /*
897 * if we don't even have the mpa message, then bail.
898 */
899 if (ep->mpa_pkt_len < sizeof(*mpa))
900 return;
901 mpa = (struct mpa_message *) ep->mpa_pkt;
902
903 /* Validate MPA header. */
904 if (mpa->revision != mpa_rev) {
905 err = -EPROTO;
906 goto err;
907 }
908 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
909 err = -EPROTO;
910 goto err;
911 }
912
913 plen = ntohs(mpa->private_data_size);
914
915 /*
916 * Fail if there's too much private data.
917 */
918 if (plen > MPA_MAX_PRIVATE_DATA) {
919 err = -EPROTO;
920 goto err;
921 }
922
923 /*
924 * If plen does not account for pkt size
925 */
926 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
927 err = -EPROTO;
928 goto err;
929 }
930
931 ep->plen = (u8) plen;
932
933 /*
934 * If we don't have all the pdata yet, then bail.
935 * We'll continue process when more data arrives.
936 */
937 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
938 return;
939
940 if (mpa->flags & MPA_REJECT) {
941 err = -ECONNREFUSED;
942 goto err;
943 }
944
945 /*
946 * If we get here we have accumulated the entire mpa
947 * start reply message including private data. And
948 * the MPA header is valid.
949 */
950 state_set(&ep->com, FPDU_MODE);
951 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
952 ep->mpa_attr.recv_marker_enabled = markers_enabled;
953 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
954 ep->mpa_attr.version = mpa_rev;
955 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
956 FW_RI_INIT_P2PTYPE_DISABLED;
957 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
958 "xmit_marker_enabled=%d, version=%d\n", __func__,
959 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
960 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
961
962 attrs.mpa_attr = ep->mpa_attr;
963 attrs.max_ird = ep->ird;
964 attrs.max_ord = ep->ord;
965 attrs.llp_stream_handle = ep;
966 attrs.next_state = C4IW_QP_STATE_RTS;
967
968 mask = C4IW_QP_ATTR_NEXT_STATE |
969 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
970 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
971
972 /* bind QP and TID with INIT_WR */
973 err = c4iw_modify_qp(ep->com.qp->rhp,
974 ep->com.qp, mask, &attrs, 1);
975 if (err)
976 goto err;
977 goto out;
978err:
b21ef16a
SW
979 state_set(&ep->com, ABORTING);
980 send_abort(ep, skb, GFP_KERNEL);
cfdda9d7
SW
981out:
982 connect_reply_upcall(ep, err);
983 return;
984}
985
986static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
987{
988 struct mpa_message *mpa;
989 u16 plen;
990
991 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
992
993 if (state_read(&ep->com) != MPA_REQ_WAIT)
994 return;
995
996 /*
997 * If we get more than the supported amount of private data
998 * then we must fail this connection.
999 */
1000 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1001 stop_ep_timer(ep);
1002 abort_connection(ep, skb, GFP_KERNEL);
1003 return;
1004 }
1005
1006 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1007
1008 /*
1009 * Copy the new data into our accumulation buffer.
1010 */
1011 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1012 skb->len);
1013 ep->mpa_pkt_len += skb->len;
1014
1015 /*
1016 * If we don't even have the mpa message, then bail.
1017 * We'll continue process when more data arrives.
1018 */
1019 if (ep->mpa_pkt_len < sizeof(*mpa))
1020 return;
1021
1022 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1023 stop_ep_timer(ep);
1024 mpa = (struct mpa_message *) ep->mpa_pkt;
1025
1026 /*
1027 * Validate MPA Header.
1028 */
1029 if (mpa->revision != mpa_rev) {
1030 abort_connection(ep, skb, GFP_KERNEL);
1031 return;
1032 }
1033
1034 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1035 abort_connection(ep, skb, GFP_KERNEL);
1036 return;
1037 }
1038
1039 plen = ntohs(mpa->private_data_size);
1040
1041 /*
1042 * Fail if there's too much private data.
1043 */
1044 if (plen > MPA_MAX_PRIVATE_DATA) {
1045 abort_connection(ep, skb, GFP_KERNEL);
1046 return;
1047 }
1048
1049 /*
1050 * If plen does not account for pkt size
1051 */
1052 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1053 abort_connection(ep, skb, GFP_KERNEL);
1054 return;
1055 }
1056 ep->plen = (u8) plen;
1057
1058 /*
1059 * If we don't have all the pdata yet, then bail.
1060 */
1061 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1062 return;
1063
1064 /*
1065 * If we get here we have accumulated the entire mpa
1066 * start reply message including private data.
1067 */
1068 ep->mpa_attr.initiator = 0;
1069 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1070 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1071 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1072 ep->mpa_attr.version = mpa_rev;
1073 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1074 FW_RI_INIT_P2PTYPE_DISABLED;
1075 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1076 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1077 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1078 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1079 ep->mpa_attr.p2p_type);
1080
1081 state_set(&ep->com, MPA_REQ_RCVD);
1082
1083 /* drive upcall */
1084 connect_request_upcall(ep);
1085 return;
1086}
1087
1088static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1089{
1090 struct c4iw_ep *ep;
1091 struct cpl_rx_data *hdr = cplhdr(skb);
1092 unsigned int dlen = ntohs(hdr->len);
1093 unsigned int tid = GET_TID(hdr);
1094 struct tid_info *t = dev->rdev.lldi.tids;
1095
1096 ep = lookup_tid(t, tid);
1097 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1098 skb_pull(skb, sizeof(*hdr));
1099 skb_trim(skb, dlen);
1100
1101 ep->rcv_seq += dlen;
1102 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1103
1104 /* update RX credits */
1105 update_rx_credits(ep, dlen);
1106
1107 switch (state_read(&ep->com)) {
1108 case MPA_REQ_SENT:
1109 process_mpa_reply(ep, skb);
1110 break;
1111 case MPA_REQ_WAIT:
1112 process_mpa_request(ep, skb);
1113 break;
1114 case MPA_REP_SENT:
1115 break;
1116 default:
1117 printk(KERN_ERR MOD "%s Unexpected streaming data."
1118 " ep %p state %d tid %u\n",
1119 __func__, ep, state_read(&ep->com), ep->hwtid);
1120
1121 /*
1122 * The ep will timeout and inform the ULP of the failure.
1123 * See ep_timeout().
1124 */
1125 break;
1126 }
1127 return 0;
1128}
1129
1130static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1131{
1132 struct c4iw_ep *ep;
1133 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1134 unsigned long flags;
1135 int release = 0;
1136 unsigned int tid = GET_TID(rpl);
1137 struct tid_info *t = dev->rdev.lldi.tids;
1138
1139 ep = lookup_tid(t, tid);
1140 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1141 BUG_ON(!ep);
1142 spin_lock_irqsave(&ep->com.lock, flags);
1143 switch (ep->com.state) {
1144 case ABORTING:
1145 __state_set(&ep->com, DEAD);
1146 release = 1;
1147 break;
1148 default:
1149 printk(KERN_ERR "%s ep %p state %d\n",
1150 __func__, ep, ep->com.state);
1151 break;
1152 }
1153 spin_unlock_irqrestore(&ep->com.lock, flags);
1154
1155 if (release)
1156 release_ep_resources(ep);
1157 return 0;
1158}
1159
1160/*
1161 * Return whether a failed active open has allocated a TID
1162 */
1163static inline int act_open_has_tid(int status)
1164{
1165 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1166 status != CPL_ERR_ARP_MISS;
1167}
1168
1169static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1170{
1171 struct c4iw_ep *ep;
1172 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1173 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1174 ntohl(rpl->atid_status)));
1175 struct tid_info *t = dev->rdev.lldi.tids;
1176 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1177
1178 ep = lookup_atid(t, atid);
1179
1180 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1181 status, status2errno(status));
1182
1183 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1184 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1185 atid);
1186 return 0;
1187 }
1188
1189 connect_reply_upcall(ep, status2errno(status));
1190 state_set(&ep->com, DEAD);
1191
1192 if (status && act_open_has_tid(status))
1193 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1194
1195 cxgb4_free_atid(t, atid);
1196 dst_release(ep->dst);
1197 cxgb4_l2t_release(ep->l2t);
1198 c4iw_put_ep(&ep->com);
1199
1200 return 0;
1201}
1202
1203static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1204{
1205 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1206 struct tid_info *t = dev->rdev.lldi.tids;
1207 unsigned int stid = GET_TID(rpl);
1208 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1209
1210 if (!ep) {
1211 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1212 return 0;
1213 }
1214 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1215 rpl->status, status2errno(rpl->status));
1216 ep->com.rpl_err = status2errno(rpl->status);
1217 ep->com.rpl_done = 1;
1218 wake_up(&ep->com.waitq);
1219
1220 return 0;
1221}
1222
1223static int listen_stop(struct c4iw_listen_ep *ep)
1224{
1225 struct sk_buff *skb;
1226 struct cpl_close_listsvr_req *req;
1227
1228 PDBG("%s ep %p\n", __func__, ep);
1229 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1230 if (!skb) {
1231 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1232 return -ENOMEM;
1233 }
1234 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1235 INIT_TP_WR(req, 0);
1236 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1237 ep->stid));
1238 req->reply_ctrl = cpu_to_be16(
1239 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1240 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1241 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1242}
1243
1244static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1245{
1246 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1247 struct tid_info *t = dev->rdev.lldi.tids;
1248 unsigned int stid = GET_TID(rpl);
1249 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1250
1251 PDBG("%s ep %p\n", __func__, ep);
1252 ep->com.rpl_err = status2errno(rpl->status);
1253 ep->com.rpl_done = 1;
1254 wake_up(&ep->com.waitq);
1255 return 0;
1256}
1257
1258static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1259 struct cpl_pass_accept_req *req)
1260{
1261 struct cpl_pass_accept_rpl *rpl;
1262 unsigned int mtu_idx;
1263 u64 opt0;
1264 u32 opt2;
1265 int wscale;
1266
1267 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1268 BUG_ON(skb_cloned(skb));
1269 skb_trim(skb, sizeof(*rpl));
1270 skb_get(skb);
1271 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1272 wscale = compute_wscale(rcv_win);
1273 opt0 = KEEP_ALIVE(1) |
ba6d3925 1274 DELACK(1) |
cfdda9d7
SW
1275 WND_SCALE(wscale) |
1276 MSS_IDX(mtu_idx) |
1277 L2T_IDX(ep->l2t->idx) |
1278 TX_CHAN(ep->tx_chan) |
1279 SMAC_SEL(ep->smac_idx) |
1280 DSCP(ep->tos) |
1281 RCV_BUFSIZ(rcv_win>>10);
1282 opt2 = RX_CHANNEL(0) |
1283 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1284
1285 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1286 opt2 |= TSTAMPS_EN(1);
1287 if (enable_tcp_sack && req->tcpopt.sack)
1288 opt2 |= SACK_EN(1);
1289 if (wscale && enable_tcp_window_scaling)
1290 opt2 |= WND_SCALE_EN(1);
1291
1292 rpl = cplhdr(skb);
1293 INIT_TP_WR(rpl, ep->hwtid);
1294 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1295 ep->hwtid));
1296 rpl->opt0 = cpu_to_be64(opt0);
1297 rpl->opt2 = cpu_to_be32(opt2);
d4f1a5c6 1298 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cfdda9d7
SW
1299 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1300
1301 return;
1302}
1303
1304static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1305 struct sk_buff *skb)
1306{
1307 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1308 peer_ip);
1309 BUG_ON(skb_cloned(skb));
1310 skb_trim(skb, sizeof(struct cpl_tid_release));
1311 skb_get(skb);
1312 release_tid(&dev->rdev, hwtid, skb);
1313 return;
1314}
1315
1316static void get_4tuple(struct cpl_pass_accept_req *req,
1317 __be32 *local_ip, __be32 *peer_ip,
1318 __be16 *local_port, __be16 *peer_port)
1319{
1320 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1321 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1322 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1323 struct tcphdr *tcp = (struct tcphdr *)
1324 ((u8 *)(req + 1) + eth_len + ip_len);
1325
1326 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1327 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1328 ntohs(tcp->dest));
1329
1330 *peer_ip = ip->saddr;
1331 *local_ip = ip->daddr;
1332 *peer_port = tcp->source;
1333 *local_port = tcp->dest;
1334
1335 return;
1336}
1337
1338static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1339{
1340 struct c4iw_ep *child_ep, *parent_ep;
1341 struct cpl_pass_accept_req *req = cplhdr(skb);
1342 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1343 struct tid_info *t = dev->rdev.lldi.tids;
1344 unsigned int hwtid = GET_TID(req);
1345 struct dst_entry *dst;
1346 struct l2t_entry *l2t;
1347 struct rtable *rt;
1348 __be32 local_ip, peer_ip;
1349 __be16 local_port, peer_port;
1350 struct net_device *pdev;
1351 u32 tx_chan, smac_idx;
1352 u16 rss_qid;
1353 u32 mtu;
1354 int step;
d4f1a5c6 1355 int txq_idx, ctrlq_idx;
cfdda9d7
SW
1356
1357 parent_ep = lookup_stid(t, stid);
1358 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1359
1360 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1361
1362 if (state_read(&parent_ep->com) != LISTEN) {
1363 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1364 __func__);
1365 goto reject;
1366 }
1367
1368 /* Find output route */
1369 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1370 GET_POPEN_TOS(ntohl(req->tos_stid)));
1371 if (!rt) {
1372 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1373 __func__);
1374 goto reject;
1375 }
d8d1f30b 1376 dst = &rt->dst;
cfdda9d7
SW
1377 if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1378 pdev = ip_dev_find(&init_net, peer_ip);
1379 BUG_ON(!pdev);
1380 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1381 pdev, 0);
1382 mtu = pdev->mtu;
1383 tx_chan = cxgb4_port_chan(pdev);
2c5934bf 1384 smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
cfdda9d7
SW
1385 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1386 txq_idx = cxgb4_port_idx(pdev) * step;
d4f1a5c6 1387 ctrlq_idx = cxgb4_port_idx(pdev);
cfdda9d7
SW
1388 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1389 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1390 dev_put(pdev);
1391 } else {
1392 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1393 dst->neighbour->dev, 0);
1394 mtu = dst_mtu(dst);
1395 tx_chan = cxgb4_port_chan(dst->neighbour->dev);
2c5934bf 1396 smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
cfdda9d7
SW
1397 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1398 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
d4f1a5c6 1399 ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
cfdda9d7
SW
1400 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1401 rss_qid = dev->rdev.lldi.rxq_ids[
1402 cxgb4_port_idx(dst->neighbour->dev) * step];
1403 }
1404 if (!l2t) {
1405 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1406 __func__);
1407 dst_release(dst);
1408 goto reject;
1409 }
1410
1411 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1412 if (!child_ep) {
1413 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1414 __func__);
1415 cxgb4_l2t_release(l2t);
1416 dst_release(dst);
1417 goto reject;
1418 }
1419 state_set(&child_ep->com, CONNECTING);
1420 child_ep->com.dev = dev;
1421 child_ep->com.cm_id = NULL;
1422 child_ep->com.local_addr.sin_family = PF_INET;
1423 child_ep->com.local_addr.sin_port = local_port;
1424 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1425 child_ep->com.remote_addr.sin_family = PF_INET;
1426 child_ep->com.remote_addr.sin_port = peer_port;
1427 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1428 c4iw_get_ep(&parent_ep->com);
1429 child_ep->parent_ep = parent_ep;
1430 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1431 child_ep->l2t = l2t;
1432 child_ep->dst = dst;
1433 child_ep->hwtid = hwtid;
1434 child_ep->tx_chan = tx_chan;
1435 child_ep->smac_idx = smac_idx;
1436 child_ep->rss_qid = rss_qid;
1437 child_ep->mtu = mtu;
1438 child_ep->txq_idx = txq_idx;
d4f1a5c6 1439 child_ep->ctrlq_idx = ctrlq_idx;
cfdda9d7
SW
1440
1441 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1442 tx_chan, smac_idx, rss_qid);
1443
1444 init_timer(&child_ep->timer);
1445 cxgb4_insert_tid(t, child_ep, hwtid);
1446 accept_cr(child_ep, peer_ip, skb, req);
1447 goto out;
1448reject:
1449 reject_cr(dev, hwtid, peer_ip, skb);
1450out:
1451 return 0;
1452}
1453
1454static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1455{
1456 struct c4iw_ep *ep;
1457 struct cpl_pass_establish *req = cplhdr(skb);
1458 struct tid_info *t = dev->rdev.lldi.tids;
1459 unsigned int tid = GET_TID(req);
1460
1461 ep = lookup_tid(t, tid);
1462 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1463 ep->snd_seq = be32_to_cpu(req->snd_isn);
1464 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1465
1466 set_emss(ep, ntohs(req->tcp_opt));
1467
1468 dst_confirm(ep->dst);
1469 state_set(&ep->com, MPA_REQ_WAIT);
1470 start_ep_timer(ep);
1471 send_flowc(ep, skb);
1472
1473 return 0;
1474}
1475
1476static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1477{
1478 struct cpl_peer_close *hdr = cplhdr(skb);
1479 struct c4iw_ep *ep;
1480 struct c4iw_qp_attributes attrs;
1481 unsigned long flags;
1482 int disconnect = 1;
1483 int release = 0;
1484 int closing = 0;
1485 struct tid_info *t = dev->rdev.lldi.tids;
1486 unsigned int tid = GET_TID(hdr);
cfdda9d7
SW
1487
1488 ep = lookup_tid(t, tid);
1489 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1490 dst_confirm(ep->dst);
1491
1492 spin_lock_irqsave(&ep->com.lock, flags);
1493 switch (ep->com.state) {
1494 case MPA_REQ_WAIT:
1495 __state_set(&ep->com, CLOSING);
1496 break;
1497 case MPA_REQ_SENT:
1498 __state_set(&ep->com, CLOSING);
1499 connect_reply_upcall(ep, -ECONNRESET);
1500 break;
1501 case MPA_REQ_RCVD:
1502
1503 /*
1504 * We're gonna mark this puppy DEAD, but keep
1505 * the reference on it until the ULP accepts or
1506 * rejects the CR. Also wake up anyone waiting
1507 * in rdma connection migration (see c4iw_accept_cr()).
1508 */
1509 __state_set(&ep->com, CLOSING);
1510 ep->com.rpl_done = 1;
1511 ep->com.rpl_err = -ECONNRESET;
1512 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1513 wake_up(&ep->com.waitq);
1514 break;
1515 case MPA_REP_SENT:
1516 __state_set(&ep->com, CLOSING);
1517 ep->com.rpl_done = 1;
1518 ep->com.rpl_err = -ECONNRESET;
1519 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1520 wake_up(&ep->com.waitq);
1521 break;
1522 case FPDU_MODE:
ca5a2202 1523 start_ep_timer(ep);
cfdda9d7
SW
1524 __state_set(&ep->com, CLOSING);
1525 closing = 1;
1526 peer_close_upcall(ep);
1527 break;
1528 case ABORTING:
1529 disconnect = 0;
1530 break;
1531 case CLOSING:
1532 __state_set(&ep->com, MORIBUND);
1533 disconnect = 0;
1534 break;
1535 case MORIBUND:
ca5a2202 1536 stop_ep_timer(ep);
cfdda9d7
SW
1537 if (ep->com.cm_id && ep->com.qp) {
1538 attrs.next_state = C4IW_QP_STATE_IDLE;
1539 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1540 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1541 }
1542 close_complete_upcall(ep);
1543 __state_set(&ep->com, DEAD);
1544 release = 1;
1545 disconnect = 0;
1546 break;
1547 case DEAD:
1548 disconnect = 0;
1549 break;
1550 default:
1551 BUG_ON(1);
1552 }
1553 spin_unlock_irqrestore(&ep->com.lock, flags);
1554 if (closing) {
1555 attrs.next_state = C4IW_QP_STATE_CLOSING;
1556 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1557 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1558 }
cfdda9d7
SW
1559 if (disconnect)
1560 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1561 if (release)
1562 release_ep_resources(ep);
1563 return 0;
1564}
1565
1566/*
1567 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1568 */
1569static int is_neg_adv_abort(unsigned int status)
1570{
1571 return status == CPL_ERR_RTX_NEG_ADVICE ||
1572 status == CPL_ERR_PERSIST_NEG_ADVICE;
1573}
1574
1575static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1576{
1577 struct cpl_abort_req_rss *req = cplhdr(skb);
1578 struct c4iw_ep *ep;
1579 struct cpl_abort_rpl *rpl;
1580 struct sk_buff *rpl_skb;
1581 struct c4iw_qp_attributes attrs;
1582 int ret;
1583 int release = 0;
1584 unsigned long flags;
1585 struct tid_info *t = dev->rdev.lldi.tids;
1586 unsigned int tid = GET_TID(req);
cfdda9d7
SW
1587
1588 ep = lookup_tid(t, tid);
1589 if (is_neg_adv_abort(req->status)) {
1590 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1591 ep->hwtid);
1592 return 0;
1593 }
1594 spin_lock_irqsave(&ep->com.lock, flags);
1595 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1596 ep->com.state);
1597 switch (ep->com.state) {
1598 case CONNECTING:
1599 break;
1600 case MPA_REQ_WAIT:
ca5a2202 1601 stop_ep_timer(ep);
cfdda9d7
SW
1602 break;
1603 case MPA_REQ_SENT:
ca5a2202 1604 stop_ep_timer(ep);
cfdda9d7
SW
1605 connect_reply_upcall(ep, -ECONNRESET);
1606 break;
1607 case MPA_REP_SENT:
1608 ep->com.rpl_done = 1;
1609 ep->com.rpl_err = -ECONNRESET;
1610 PDBG("waking up ep %p\n", ep);
1611 wake_up(&ep->com.waitq);
1612 break;
1613 case MPA_REQ_RCVD:
1614
1615 /*
1616 * We're gonna mark this puppy DEAD, but keep
1617 * the reference on it until the ULP accepts or
1618 * rejects the CR. Also wake up anyone waiting
1619 * in rdma connection migration (see c4iw_accept_cr()).
1620 */
1621 ep->com.rpl_done = 1;
1622 ep->com.rpl_err = -ECONNRESET;
1623 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1624 wake_up(&ep->com.waitq);
1625 break;
1626 case MORIBUND:
1627 case CLOSING:
ca5a2202 1628 stop_ep_timer(ep);
cfdda9d7
SW
1629 /*FALLTHROUGH*/
1630 case FPDU_MODE:
1631 if (ep->com.cm_id && ep->com.qp) {
1632 attrs.next_state = C4IW_QP_STATE_ERROR;
1633 ret = c4iw_modify_qp(ep->com.qp->rhp,
1634 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1635 &attrs, 1);
1636 if (ret)
1637 printk(KERN_ERR MOD
1638 "%s - qp <- error failed!\n",
1639 __func__);
1640 }
1641 peer_abort_upcall(ep);
1642 break;
1643 case ABORTING:
1644 break;
1645 case DEAD:
1646 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1647 spin_unlock_irqrestore(&ep->com.lock, flags);
1648 return 0;
1649 default:
1650 BUG_ON(1);
1651 break;
1652 }
1653 dst_confirm(ep->dst);
1654 if (ep->com.state != ABORTING) {
1655 __state_set(&ep->com, DEAD);
1656 release = 1;
1657 }
1658 spin_unlock_irqrestore(&ep->com.lock, flags);
1659
1660 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1661 if (!rpl_skb) {
1662 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1663 __func__);
1664 release = 1;
1665 goto out;
1666 }
1667 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1668 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1669 INIT_TP_WR(rpl, ep->hwtid);
1670 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1671 rpl->cmd = CPL_ABORT_NO_RST;
1672 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1673out:
cfdda9d7
SW
1674 if (release)
1675 release_ep_resources(ep);
1676 return 0;
1677}
1678
1679static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1680{
1681 struct c4iw_ep *ep;
1682 struct c4iw_qp_attributes attrs;
1683 struct cpl_close_con_rpl *rpl = cplhdr(skb);
1684 unsigned long flags;
1685 int release = 0;
1686 struct tid_info *t = dev->rdev.lldi.tids;
1687 unsigned int tid = GET_TID(rpl);
cfdda9d7
SW
1688
1689 ep = lookup_tid(t, tid);
1690
1691 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1692 BUG_ON(!ep);
1693
1694 /* The cm_id may be null if we failed to connect */
1695 spin_lock_irqsave(&ep->com.lock, flags);
1696 switch (ep->com.state) {
1697 case CLOSING:
1698 __state_set(&ep->com, MORIBUND);
1699 break;
1700 case MORIBUND:
ca5a2202 1701 stop_ep_timer(ep);
cfdda9d7
SW
1702 if ((ep->com.cm_id) && (ep->com.qp)) {
1703 attrs.next_state = C4IW_QP_STATE_IDLE;
1704 c4iw_modify_qp(ep->com.qp->rhp,
1705 ep->com.qp,
1706 C4IW_QP_ATTR_NEXT_STATE,
1707 &attrs, 1);
1708 }
1709 close_complete_upcall(ep);
1710 __state_set(&ep->com, DEAD);
1711 release = 1;
1712 break;
1713 case ABORTING:
1714 case DEAD:
1715 break;
1716 default:
1717 BUG_ON(1);
1718 break;
1719 }
1720 spin_unlock_irqrestore(&ep->com.lock, flags);
cfdda9d7
SW
1721 if (release)
1722 release_ep_resources(ep);
1723 return 0;
1724}
1725
1726static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1727{
0e42c1f4 1728 struct cpl_rdma_terminate *rpl = cplhdr(skb);
cfdda9d7 1729 struct tid_info *t = dev->rdev.lldi.tids;
0e42c1f4
SW
1730 unsigned int tid = GET_TID(rpl);
1731 struct c4iw_ep *ep;
1732 struct c4iw_qp_attributes attrs;
cfdda9d7
SW
1733
1734 ep = lookup_tid(t, tid);
0e42c1f4 1735 BUG_ON(!ep);
cfdda9d7 1736
0e42c1f4
SW
1737 if (ep->com.qp) {
1738 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
1739 ep->com.qp->wq.sq.qid);
1740 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1741 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1742 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1743 } else
1744 printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
cfdda9d7 1745
cfdda9d7
SW
1746 return 0;
1747}
1748
1749/*
1750 * Upcall from the adapter indicating data has been transmitted.
1751 * For us its just the single MPA request or reply. We can now free
1752 * the skb holding the mpa message.
1753 */
1754static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1755{
1756 struct c4iw_ep *ep;
1757 struct cpl_fw4_ack *hdr = cplhdr(skb);
1758 u8 credits = hdr->credits;
1759 unsigned int tid = GET_TID(hdr);
1760 struct tid_info *t = dev->rdev.lldi.tids;
1761
1762
1763 ep = lookup_tid(t, tid);
1764 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1765 if (credits == 0) {
1766 PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
1767 __func__, ep, ep->hwtid, state_read(&ep->com));
1768 return 0;
1769 }
1770
1771 dst_confirm(ep->dst);
1772 if (ep->mpa_skb) {
1773 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1774 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1775 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1776 kfree_skb(ep->mpa_skb);
1777 ep->mpa_skb = NULL;
1778 }
1779 return 0;
1780}
1781
cfdda9d7
SW
1782int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1783{
1784 int err;
1785 struct c4iw_ep *ep = to_ep(cm_id);
1786 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1787
1788 if (state_read(&ep->com) == DEAD) {
1789 c4iw_put_ep(&ep->com);
1790 return -ECONNRESET;
1791 }
1792 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1793 if (mpa_rev == 0)
1794 abort_connection(ep, NULL, GFP_KERNEL);
1795 else {
1796 err = send_mpa_reject(ep, pdata, pdata_len);
1797 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1798 }
1799 c4iw_put_ep(&ep->com);
1800 return 0;
1801}
1802
1803int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1804{
1805 int err;
1806 struct c4iw_qp_attributes attrs;
1807 enum c4iw_qp_attr_mask mask;
1808 struct c4iw_ep *ep = to_ep(cm_id);
1809 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1810 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1811
1812 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1813 if (state_read(&ep->com) == DEAD) {
1814 err = -ECONNRESET;
1815 goto err;
1816 }
1817
1818 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1819 BUG_ON(!qp);
1820
be4c9bad
RD
1821 if ((conn_param->ord > c4iw_max_read_depth) ||
1822 (conn_param->ird > c4iw_max_read_depth)) {
cfdda9d7
SW
1823 abort_connection(ep, NULL, GFP_KERNEL);
1824 err = -EINVAL;
1825 goto err;
1826 }
1827
1828 cm_id->add_ref(cm_id);
1829 ep->com.cm_id = cm_id;
1830 ep->com.qp = qp;
1831
1832 ep->ird = conn_param->ird;
1833 ep->ord = conn_param->ord;
1834
1835 if (peer2peer && ep->ird == 0)
1836 ep->ird = 1;
1837
1838 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1839
1840 /* bind QP to EP and move to RTS */
1841 attrs.mpa_attr = ep->mpa_attr;
1842 attrs.max_ird = ep->ird;
1843 attrs.max_ord = ep->ord;
1844 attrs.llp_stream_handle = ep;
1845 attrs.next_state = C4IW_QP_STATE_RTS;
1846
1847 /* bind QP and TID with INIT_WR */
1848 mask = C4IW_QP_ATTR_NEXT_STATE |
1849 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1850 C4IW_QP_ATTR_MPA_ATTR |
1851 C4IW_QP_ATTR_MAX_IRD |
1852 C4IW_QP_ATTR_MAX_ORD;
1853
1854 err = c4iw_modify_qp(ep->com.qp->rhp,
1855 ep->com.qp, mask, &attrs, 1);
1856 if (err)
1857 goto err1;
1858 err = send_mpa_reply(ep, conn_param->private_data,
1859 conn_param->private_data_len);
1860 if (err)
1861 goto err1;
1862
1863 state_set(&ep->com, FPDU_MODE);
1864 established_upcall(ep);
1865 c4iw_put_ep(&ep->com);
1866 return 0;
1867err1:
1868 ep->com.cm_id = NULL;
1869 ep->com.qp = NULL;
1870 cm_id->rem_ref(cm_id);
1871err:
1872 c4iw_put_ep(&ep->com);
1873 return err;
1874}
1875
1876int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1877{
1878 int err = 0;
1879 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
1880 struct c4iw_ep *ep;
1881 struct rtable *rt;
1882 struct net_device *pdev;
1883 int step;
1884
be4c9bad
RD
1885 if ((conn_param->ord > c4iw_max_read_depth) ||
1886 (conn_param->ird > c4iw_max_read_depth)) {
1887 err = -EINVAL;
1888 goto out;
1889 }
cfdda9d7
SW
1890 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1891 if (!ep) {
1892 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1893 err = -ENOMEM;
1894 goto out;
1895 }
1896 init_timer(&ep->timer);
1897 ep->plen = conn_param->private_data_len;
1898 if (ep->plen)
1899 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1900 conn_param->private_data, ep->plen);
1901 ep->ird = conn_param->ird;
1902 ep->ord = conn_param->ord;
1903
1904 if (peer2peer && ep->ord == 0)
1905 ep->ord = 1;
1906
1907 cm_id->add_ref(cm_id);
1908 ep->com.dev = dev;
1909 ep->com.cm_id = cm_id;
1910 ep->com.qp = get_qhp(dev, conn_param->qpn);
1911 BUG_ON(!ep->com.qp);
1912 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1913 ep->com.qp, cm_id);
1914
1915 /*
1916 * Allocate an active TID to initiate a TCP connection.
1917 */
1918 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
1919 if (ep->atid == -1) {
1920 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1921 err = -ENOMEM;
1922 goto fail2;
1923 }
1924
1925 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
1926 ntohl(cm_id->local_addr.sin_addr.s_addr),
1927 ntohs(cm_id->local_addr.sin_port),
1928 ntohl(cm_id->remote_addr.sin_addr.s_addr),
1929 ntohs(cm_id->remote_addr.sin_port));
1930
1931 /* find a route */
1932 rt = find_route(dev,
1933 cm_id->local_addr.sin_addr.s_addr,
1934 cm_id->remote_addr.sin_addr.s_addr,
1935 cm_id->local_addr.sin_port,
1936 cm_id->remote_addr.sin_port, 0);
1937 if (!rt) {
1938 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1939 err = -EHOSTUNREACH;
1940 goto fail3;
1941 }
d8d1f30b 1942 ep->dst = &rt->dst;
cfdda9d7
SW
1943
1944 /* get a l2t entry */
1945 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
1946 PDBG("%s LOOPBACK\n", __func__);
1947 pdev = ip_dev_find(&init_net,
1948 cm_id->remote_addr.sin_addr.s_addr);
1949 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1950 ep->dst->neighbour,
1951 pdev, 0);
1952 ep->mtu = pdev->mtu;
1953 ep->tx_chan = cxgb4_port_chan(pdev);
2c5934bf 1954 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
cfdda9d7
SW
1955 step = ep->com.dev->rdev.lldi.ntxq /
1956 ep->com.dev->rdev.lldi.nchan;
1957 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1958 step = ep->com.dev->rdev.lldi.nrxq /
1959 ep->com.dev->rdev.lldi.nchan;
d4f1a5c6 1960 ep->ctrlq_idx = cxgb4_port_idx(pdev);
cfdda9d7
SW
1961 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1962 cxgb4_port_idx(pdev) * step];
1963 dev_put(pdev);
1964 } else {
1965 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1966 ep->dst->neighbour,
1967 ep->dst->neighbour->dev, 0);
1968 ep->mtu = dst_mtu(ep->dst);
1969 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
2c5934bf
SW
1970 ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
1971 0x7F) << 1;
cfdda9d7
SW
1972 step = ep->com.dev->rdev.lldi.ntxq /
1973 ep->com.dev->rdev.lldi.nchan;
1974 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
d4f1a5c6 1975 ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
cfdda9d7
SW
1976 step = ep->com.dev->rdev.lldi.nrxq /
1977 ep->com.dev->rdev.lldi.nchan;
1978 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1979 cxgb4_port_idx(ep->dst->neighbour->dev) * step];
1980 }
1981 if (!ep->l2t) {
1982 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1983 err = -ENOMEM;
1984 goto fail4;
1985 }
1986
1987 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1988 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1989 ep->l2t->idx);
1990
1991 state_set(&ep->com, CONNECTING);
1992 ep->tos = 0;
1993 ep->com.local_addr = cm_id->local_addr;
1994 ep->com.remote_addr = cm_id->remote_addr;
1995
1996 /* send connect request to rnic */
1997 err = send_connect(ep);
1998 if (!err)
1999 goto out;
2000
2001 cxgb4_l2t_release(ep->l2t);
2002fail4:
2003 dst_release(ep->dst);
2004fail3:
2005 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2006fail2:
2007 cm_id->rem_ref(cm_id);
2008 c4iw_put_ep(&ep->com);
2009out:
2010 return err;
2011}
2012
2013int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2014{
2015 int err = 0;
2016 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2017 struct c4iw_listen_ep *ep;
2018
2019
2020 might_sleep();
2021
2022 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2023 if (!ep) {
2024 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2025 err = -ENOMEM;
2026 goto fail1;
2027 }
2028 PDBG("%s ep %p\n", __func__, ep);
2029 cm_id->add_ref(cm_id);
2030 ep->com.cm_id = cm_id;
2031 ep->com.dev = dev;
2032 ep->backlog = backlog;
2033 ep->com.local_addr = cm_id->local_addr;
2034
2035 /*
2036 * Allocate a server TID.
2037 */
2038 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2039 if (ep->stid == -1) {
be4c9bad 2040 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
cfdda9d7
SW
2041 err = -ENOMEM;
2042 goto fail2;
2043 }
2044
2045 state_set(&ep->com, LISTEN);
2046 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2047 ep->com.local_addr.sin_addr.s_addr,
2048 ep->com.local_addr.sin_port,
2049 ep->com.dev->rdev.lldi.rxq_ids[0]);
2050 if (err)
2051 goto fail3;
2052
2053 /* wait for pass_open_rpl */
a5f4a078
SW
2054 wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
2055 if (ep->com.rpl_done)
2056 err = ep->com.rpl_err;
2057 else {
2058 printk(KERN_ERR MOD "Device %s not responding!\n",
2059 pci_name(ep->com.dev->rdev.lldi.pdev));
2060 ep->com.dev->rdev.flags = T4_FATAL_ERROR;
2061 err = -EIO;
2062 }
cfdda9d7
SW
2063 if (!err) {
2064 cm_id->provider_data = ep;
2065 goto out;
2066 }
2067fail3:
2068 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2069fail2:
2070 cm_id->rem_ref(cm_id);
2071 c4iw_put_ep(&ep->com);
2072fail1:
2073out:
2074 return err;
2075}
2076
2077int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2078{
2079 int err;
2080 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2081
2082 PDBG("%s ep %p\n", __func__, ep);
2083
2084 might_sleep();
2085 state_set(&ep->com, DEAD);
2086 ep->com.rpl_done = 0;
2087 ep->com.rpl_err = 0;
2088 err = listen_stop(ep);
2089 if (err)
2090 goto done;
a5f4a078
SW
2091 wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
2092 if (ep->com.rpl_done)
2093 err = ep->com.rpl_err;
2094 else {
2095 printk(KERN_ERR MOD "Device %s not responding!\n",
2096 pci_name(ep->com.dev->rdev.lldi.pdev));
2097 ep->com.dev->rdev.flags = T4_FATAL_ERROR;
2098 err = -EIO;
2099 }
cfdda9d7
SW
2100 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2101done:
cfdda9d7
SW
2102 cm_id->rem_ref(cm_id);
2103 c4iw_put_ep(&ep->com);
2104 return err;
2105}
2106
2107int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2108{
2109 int ret = 0;
2110 unsigned long flags;
2111 int close = 0;
2112 int fatal = 0;
2113 struct c4iw_rdev *rdev;
cfdda9d7
SW
2114
2115 spin_lock_irqsave(&ep->com.lock, flags);
2116
2117 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2118 states[ep->com.state], abrupt);
2119
2120 rdev = &ep->com.dev->rdev;
2121 if (c4iw_fatal_error(rdev)) {
2122 fatal = 1;
2123 close_complete_upcall(ep);
2124 ep->com.state = DEAD;
2125 }
2126 switch (ep->com.state) {
2127 case MPA_REQ_WAIT:
2128 case MPA_REQ_SENT:
2129 case MPA_REQ_RCVD:
2130 case MPA_REP_SENT:
2131 case FPDU_MODE:
2132 close = 1;
2133 if (abrupt)
2134 ep->com.state = ABORTING;
2135 else {
2136 ep->com.state = CLOSING;
ca5a2202 2137 start_ep_timer(ep);
cfdda9d7
SW
2138 }
2139 set_bit(CLOSE_SENT, &ep->com.flags);
2140 break;
2141 case CLOSING:
2142 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2143 close = 1;
2144 if (abrupt) {
ca5a2202 2145 stop_ep_timer(ep);
cfdda9d7
SW
2146 ep->com.state = ABORTING;
2147 } else
2148 ep->com.state = MORIBUND;
2149 }
2150 break;
2151 case MORIBUND:
2152 case ABORTING:
2153 case DEAD:
2154 PDBG("%s ignoring disconnect ep %p state %u\n",
2155 __func__, ep, ep->com.state);
2156 break;
2157 default:
2158 BUG();
2159 break;
2160 }
2161
2162 spin_unlock_irqrestore(&ep->com.lock, flags);
cfdda9d7
SW
2163 if (close) {
2164 if (abrupt)
2165 ret = abort_connection(ep, NULL, gfp);
2166 else
2167 ret = send_halfclose(ep, gfp);
2168 if (ret)
2169 fatal = 1;
2170 }
2171 if (fatal)
2172 release_ep_resources(ep);
2173 return ret;
2174}
2175
be4c9bad
RD
2176/*
2177 * These are the real handlers that are called from a
2178 * work queue.
2179 */
2180static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2181 [CPL_ACT_ESTABLISH] = act_establish,
2182 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2183 [CPL_RX_DATA] = rx_data,
2184 [CPL_ABORT_RPL_RSS] = abort_rpl,
2185 [CPL_ABORT_RPL] = abort_rpl,
2186 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2187 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2188 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2189 [CPL_PASS_ESTABLISH] = pass_establish,
2190 [CPL_PEER_CLOSE] = peer_close,
2191 [CPL_ABORT_REQ_RSS] = peer_abort,
2192 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2193 [CPL_RDMA_TERMINATE] = terminate,
2194 [CPL_FW4_ACK] = fw4_ack
2195};
2196
2197static void process_timeout(struct c4iw_ep *ep)
2198{
2199 struct c4iw_qp_attributes attrs;
2200 int abort = 1;
2201
2202 spin_lock_irq(&ep->com.lock);
2203 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2204 ep->com.state);
2205 switch (ep->com.state) {
2206 case MPA_REQ_SENT:
2207 __state_set(&ep->com, ABORTING);
2208 connect_reply_upcall(ep, -ETIMEDOUT);
2209 break;
2210 case MPA_REQ_WAIT:
2211 __state_set(&ep->com, ABORTING);
2212 break;
2213 case CLOSING:
2214 case MORIBUND:
2215 if (ep->com.cm_id && ep->com.qp) {
2216 attrs.next_state = C4IW_QP_STATE_ERROR;
2217 c4iw_modify_qp(ep->com.qp->rhp,
2218 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2219 &attrs, 1);
2220 }
2221 __state_set(&ep->com, ABORTING);
2222 break;
2223 default:
2224 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2225 __func__, ep, ep->hwtid, ep->com.state);
2226 WARN_ON(1);
2227 abort = 0;
2228 }
2229 spin_unlock_irq(&ep->com.lock);
2230 if (abort)
2231 abort_connection(ep, NULL, GFP_KERNEL);
2232 c4iw_put_ep(&ep->com);
2233}
2234
2235static void process_timedout_eps(void)
2236{
2237 struct c4iw_ep *ep;
2238
2239 spin_lock_irq(&timeout_lock);
2240 while (!list_empty(&timeout_list)) {
2241 struct list_head *tmp;
2242
2243 tmp = timeout_list.next;
2244 list_del(tmp);
2245 spin_unlock_irq(&timeout_lock);
2246 ep = list_entry(tmp, struct c4iw_ep, entry);
2247 process_timeout(ep);
2248 spin_lock_irq(&timeout_lock);
2249 }
2250 spin_unlock_irq(&timeout_lock);
2251}
2252
2253static void process_work(struct work_struct *work)
2254{
2255 struct sk_buff *skb = NULL;
2256 struct c4iw_dev *dev;
c1d7356c 2257 struct cpl_act_establish *rpl;
be4c9bad
RD
2258 unsigned int opcode;
2259 int ret;
2260
2261 while ((skb = skb_dequeue(&rxq))) {
2262 rpl = cplhdr(skb);
2263 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2264 opcode = rpl->ot.opcode;
2265
2266 BUG_ON(!work_handlers[opcode]);
2267 ret = work_handlers[opcode](dev, skb);
2268 if (!ret)
2269 kfree_skb(skb);
2270 }
2271 process_timedout_eps();
2272}
2273
2274static DECLARE_WORK(skb_work, process_work);
2275
2276static void ep_timeout(unsigned long arg)
2277{
2278 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2279
2280 spin_lock(&timeout_lock);
2281 list_add_tail(&ep->entry, &timeout_list);
2282 spin_unlock(&timeout_lock);
2283 queue_work(workq, &skb_work);
2284}
2285
cfdda9d7
SW
2286/*
2287 * All the CM events are handled on a work queue to have a safe context.
2288 */
2289static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
2290{
2291
2292 /*
2293 * Save dev in the skb->cb area.
2294 */
2295 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
2296
2297 /*
2298 * Queue the skb and schedule the worker thread.
2299 */
2300 skb_queue_tail(&rxq, skb);
2301 queue_work(workq, &skb_work);
2302 return 0;
2303}
2304
2305static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2306{
2307 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2308
2309 if (rpl->status != CPL_ERR_NONE) {
2310 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2311 "for tid %u\n", rpl->status, GET_TID(rpl));
2312 }
2313 return 0;
2314}
2315
be4c9bad
RD
2316static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2317{
2318 struct cpl_fw6_msg *rpl = cplhdr(skb);
2319 struct c4iw_wr_wait *wr_waitp;
2320 int ret;
2321
2322 PDBG("%s type %u\n", __func__, rpl->type);
2323
2324 switch (rpl->type) {
2325 case 1:
2326 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
c8e081a1 2327 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
be4c9bad
RD
2328 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2329 if (wr_waitp) {
2330 wr_waitp->ret = ret;
2331 wr_waitp->done = 1;
2332 wake_up(&wr_waitp->wait);
2333 }
2334 break;
2335 case 2:
2336 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2337 break;
2338 default:
2339 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2340 rpl->type);
2341 break;
2342 }
2343 return 0;
2344}
2345
2346/*
2347 * Most upcalls from the T4 Core go to sched() to
2348 * schedule the processing on a work queue.
2349 */
2350c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2351 [CPL_ACT_ESTABLISH] = sched,
2352 [CPL_ACT_OPEN_RPL] = sched,
2353 [CPL_RX_DATA] = sched,
2354 [CPL_ABORT_RPL_RSS] = sched,
2355 [CPL_ABORT_RPL] = sched,
2356 [CPL_PASS_OPEN_RPL] = sched,
2357 [CPL_CLOSE_LISTSRV_RPL] = sched,
2358 [CPL_PASS_ACCEPT_REQ] = sched,
2359 [CPL_PASS_ESTABLISH] = sched,
2360 [CPL_PEER_CLOSE] = sched,
2361 [CPL_CLOSE_CON_RPL] = sched,
2362 [CPL_ABORT_REQ_RSS] = sched,
2363 [CPL_RDMA_TERMINATE] = sched,
2364 [CPL_FW4_ACK] = sched,
2365 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2366 [CPL_FW6_MSG] = fw6_msg
2367};
2368
cfdda9d7
SW
2369int __init c4iw_cm_init(void)
2370{
be4c9bad 2371 spin_lock_init(&timeout_lock);
cfdda9d7
SW
2372 skb_queue_head_init(&rxq);
2373
2374 workq = create_singlethread_workqueue("iw_cxgb4");
2375 if (!workq)
2376 return -ENOMEM;
2377
cfdda9d7
SW
2378 return 0;
2379}
2380
2381void __exit c4iw_cm_term(void)
2382{
be4c9bad 2383 WARN_ON(!list_empty(&timeout_list));
cfdda9d7
SW
2384 flush_workqueue(workq);
2385 destroy_workqueue(workq);
2386}