]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ehea/ehea_main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[net-next-2.6.git] / drivers / net / ehea / ehea_main.c
CommitLineData
7a291083
JBT
1/*
2 * linux/drivers/net/ehea/ehea_main.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
508d2b5d
DM
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
7a291083
JBT
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/udp.h>
33#include <linux/if.h>
34#include <linux/list.h>
5a0e3ad6 35#include <linux/slab.h>
7a291083 36#include <linux/if_ether.h>
2a6f4e49
JBT
37#include <linux/notifier.h>
38#include <linux/reboot.h>
48cfb14f 39#include <linux/memory.h>
21eee2dd 40#include <asm/kexec.h>
06f89edf 41#include <linux/mutex.h>
2a6f4e49 42
7a291083
JBT
43#include <net/ip.h>
44
45#include "ehea.h"
46#include "ehea_qmr.h"
47#include "ehea_phyp.h"
48
49
50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
52MODULE_DESCRIPTION("IBM eServer HEA Driver");
53MODULE_VERSION(DRV_VERSION);
54
55
56static int msg_level = -1;
57static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
58static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
59static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
60static int sq_entries = EHEA_DEF_ENTRIES_SQ;
508d2b5d
DM
61static int use_mcs;
62static int use_lro;
d4dc4ec9 63static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
18604c54 64static int num_tx_qps = EHEA_NUM_TX_QP;
508d2b5d 65static int prop_carrier_state;
7a291083
JBT
66
67module_param(msg_level, int, 0);
68module_param(rq1_entries, int, 0);
69module_param(rq2_entries, int, 0);
70module_param(rq3_entries, int, 0);
71module_param(sq_entries, int, 0);
8759cf76 72module_param(prop_carrier_state, int, 0);
18604c54 73module_param(use_mcs, int, 0);
d4dc4ec9
JBT
74module_param(use_lro, int, 0);
75module_param(lro_max_aggr, int, 0);
18604c54 76module_param(num_tx_qps, int, 0);
7a291083 77
18604c54 78MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
7a291083 79MODULE_PARM_DESC(msg_level, "msg_level");
8759cf76
JBT
80MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
7a291083
JBT
82MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
85MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
88MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
91MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
18072a5b 94MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
7a291083 95
d4dc4ec9
JBT
96MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
97 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
98MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
99 "Default = 0");
100
508d2b5d 101static int port_name_cnt;
44c82152 102static LIST_HEAD(adapter_list);
48e4cc77 103static unsigned long ehea_driver_flags;
44c82152 104struct work_struct ehea_rereg_mr_task;
06f89edf 105static DEFINE_MUTEX(dlpar_mem_lock);
21eee2dd
TK
106struct ehea_fw_handle_array ehea_fw_handles;
107struct ehea_bcmc_reg_array ehea_bcmc_regs;
108
d1dea38d 109
2dc11581 110static int __devinit ehea_probe_adapter(struct platform_device *dev,
d1d25aab 111 const struct of_device_id *id);
d1dea38d 112
2dc11581 113static int __devexit ehea_remove(struct platform_device *dev);
d1dea38d
TK
114
115static struct of_device_id ehea_device_table[] = {
116 {
117 .name = "lhea",
118 .compatible = "IBM,lhea",
119 },
120 {},
121};
b0afffe8 122MODULE_DEVICE_TABLE(of, ehea_device_table);
d1dea38d 123
6b08f3ae 124static struct of_platform_driver ehea_driver = {
4018294b
GL
125 .driver = {
126 .name = "ehea",
127 .owner = THIS_MODULE,
128 .of_match_table = ehea_device_table,
129 },
d1dea38d
TK
130 .probe = ehea_probe_adapter,
131 .remove = ehea_remove,
132};
133
508d2b5d
DM
134void ehea_dump(void *adr, int len, char *msg)
135{
7a291083
JBT
136 int x;
137 unsigned char *deb = adr;
138 for (x = 0; x < len; x += 16) {
a1c5a893 139 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
508d2b5d 140 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
7a291083
JBT
141 deb += 16;
142 }
143}
144
2f69ae01
JBT
145void ehea_schedule_port_reset(struct ehea_port *port)
146{
147 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
148 schedule_work(&port->reset_task);
149}
150
21eee2dd
TK
151static void ehea_update_firmware_handles(void)
152{
153 struct ehea_fw_handle_entry *arr = NULL;
154 struct ehea_adapter *adapter;
155 int num_adapters = 0;
156 int num_ports = 0;
157 int num_portres = 0;
158 int i = 0;
159 int num_fw_handles, k, l;
160
161 /* Determine number of handles */
52e21b1b
JBT
162 mutex_lock(&ehea_fw_handles.lock);
163
21eee2dd
TK
164 list_for_each_entry(adapter, &adapter_list, list) {
165 num_adapters++;
166
167 for (k = 0; k < EHEA_MAX_PORTS; k++) {
168 struct ehea_port *port = adapter->port[k];
169
170 if (!port || (port->state != EHEA_PORT_UP))
171 continue;
172
173 num_ports++;
174 num_portres += port->num_def_qps + port->num_add_tx_qps;
175 }
176 }
177
178 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
179 num_ports * EHEA_NUM_PORT_FW_HANDLES +
180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
181
182 if (num_fw_handles) {
baeb2ffa 183 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
21eee2dd 184 if (!arr)
52e21b1b 185 goto out; /* Keep the existing array */
21eee2dd
TK
186 } else
187 goto out_update;
188
189 list_for_each_entry(adapter, &adapter_list, list) {
52e21b1b
JBT
190 if (num_adapters == 0)
191 break;
192
21eee2dd
TK
193 for (k = 0; k < EHEA_MAX_PORTS; k++) {
194 struct ehea_port *port = adapter->port[k];
195
8e95a202
JP
196 if (!port || (port->state != EHEA_PORT_UP) ||
197 (num_ports == 0))
21eee2dd
TK
198 continue;
199
200 for (l = 0;
201 l < port->num_def_qps + port->num_add_tx_qps;
202 l++) {
203 struct ehea_port_res *pr = &port->port_res[l];
204
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = pr->qp->fw_handle;
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = pr->send_cq->fw_handle;
209 arr[i].adh = adapter->handle;
210 arr[i++].fwh = pr->recv_cq->fw_handle;
211 arr[i].adh = adapter->handle;
212 arr[i++].fwh = pr->eq->fw_handle;
213 arr[i].adh = adapter->handle;
214 arr[i++].fwh = pr->send_mr.handle;
215 arr[i].adh = adapter->handle;
216 arr[i++].fwh = pr->recv_mr.handle;
217 }
218 arr[i].adh = adapter->handle;
219 arr[i++].fwh = port->qp_eq->fw_handle;
52e21b1b 220 num_ports--;
21eee2dd
TK
221 }
222
223 arr[i].adh = adapter->handle;
224 arr[i++].fwh = adapter->neq->fw_handle;
225
226 if (adapter->mr.handle) {
227 arr[i].adh = adapter->handle;
228 arr[i++].fwh = adapter->mr.handle;
229 }
52e21b1b 230 num_adapters--;
21eee2dd
TK
231 }
232
233out_update:
234 kfree(ehea_fw_handles.arr);
235 ehea_fw_handles.arr = arr;
236 ehea_fw_handles.num_entries = i;
52e21b1b
JBT
237out:
238 mutex_unlock(&ehea_fw_handles.lock);
21eee2dd
TK
239}
240
241static void ehea_update_bcmc_registrations(void)
242{
52e21b1b 243 unsigned long flags;
21eee2dd
TK
244 struct ehea_bcmc_reg_entry *arr = NULL;
245 struct ehea_adapter *adapter;
246 struct ehea_mc_list *mc_entry;
247 int num_registrations = 0;
248 int i = 0;
249 int k;
250
52e21b1b
JBT
251 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
252
21eee2dd
TK
253 /* Determine number of registrations */
254 list_for_each_entry(adapter, &adapter_list, list)
255 for (k = 0; k < EHEA_MAX_PORTS; k++) {
256 struct ehea_port *port = adapter->port[k];
257
258 if (!port || (port->state != EHEA_PORT_UP))
259 continue;
260
261 num_registrations += 2; /* Broadcast registrations */
262
263 list_for_each_entry(mc_entry, &port->mc_list->list,list)
264 num_registrations += 2;
265 }
266
267 if (num_registrations) {
baeb2ffa 268 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
21eee2dd 269 if (!arr)
52e21b1b 270 goto out; /* Keep the existing array */
21eee2dd
TK
271 } else
272 goto out_update;
273
274 list_for_each_entry(adapter, &adapter_list, list) {
275 for (k = 0; k < EHEA_MAX_PORTS; k++) {
276 struct ehea_port *port = adapter->port[k];
277
278 if (!port || (port->state != EHEA_PORT_UP))
279 continue;
280
52e21b1b
JBT
281 if (num_registrations == 0)
282 goto out_update;
283
21eee2dd
TK
284 arr[i].adh = adapter->handle;
285 arr[i].port_id = port->logical_port_id;
286 arr[i].reg_type = EHEA_BCMC_BROADCAST |
287 EHEA_BCMC_UNTAGGED;
288 arr[i++].macaddr = port->mac_addr;
289
290 arr[i].adh = adapter->handle;
291 arr[i].port_id = port->logical_port_id;
292 arr[i].reg_type = EHEA_BCMC_BROADCAST |
293 EHEA_BCMC_VLANID_ALL;
294 arr[i++].macaddr = port->mac_addr;
52e21b1b 295 num_registrations -= 2;
21eee2dd
TK
296
297 list_for_each_entry(mc_entry,
298 &port->mc_list->list, list) {
52e21b1b
JBT
299 if (num_registrations == 0)
300 goto out_update;
301
21eee2dd
TK
302 arr[i].adh = adapter->handle;
303 arr[i].port_id = port->logical_port_id;
304 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
305 EHEA_BCMC_MULTICAST |
306 EHEA_BCMC_UNTAGGED;
307 arr[i++].macaddr = mc_entry->macaddr;
308
309 arr[i].adh = adapter->handle;
310 arr[i].port_id = port->logical_port_id;
311 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
312 EHEA_BCMC_MULTICAST |
313 EHEA_BCMC_VLANID_ALL;
314 arr[i++].macaddr = mc_entry->macaddr;
52e21b1b 315 num_registrations -= 2;
21eee2dd
TK
316 }
317 }
318 }
319
320out_update:
321 kfree(ehea_bcmc_regs.arr);
322 ehea_bcmc_regs.arr = arr;
323 ehea_bcmc_regs.num_entries = i;
52e21b1b
JBT
324out:
325 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
21eee2dd
TK
326}
327
7a291083
JBT
328static struct net_device_stats *ehea_get_stats(struct net_device *dev)
329{
330 struct ehea_port *port = netdev_priv(dev);
331 struct net_device_stats *stats = &port->stats;
332 struct hcp_ehea_port_cb2 *cb2;
ce45b873 333 u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
7a291083
JBT
334 int i;
335
336 memset(stats, 0, sizeof(*stats));
337
3d8009c7 338 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
339 if (!cb2) {
340 ehea_error("no mem for cb2");
341 goto out;
342 }
343
344 hret = ehea_h_query_ehea_port(port->adapter->handle,
345 port->logical_port_id,
346 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
347 if (hret != H_SUCCESS) {
348 ehea_error("query_ehea_port failed");
349 goto out_herr;
350 }
351
352 if (netif_msg_hw(port))
353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
354
355 rx_packets = 0;
ce45b873 356 for (i = 0; i < port->num_def_qps; i++) {
7a291083 357 rx_packets += port->port_res[i].rx_packets;
ce45b873
BL
358 rx_bytes += port->port_res[i].rx_bytes;
359 }
7a291083 360
7393b87c 361 tx_packets = 0;
ce45b873 362 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
7393b87c 363 tx_packets += port->port_res[i].tx_packets;
ce45b873
BL
364 tx_bytes += port->port_res[i].tx_bytes;
365 }
7393b87c
TK
366
367 stats->tx_packets = tx_packets;
7a291083
JBT
368 stats->multicast = cb2->rxmcp;
369 stats->rx_errors = cb2->rxuerr;
ce45b873
BL
370 stats->rx_bytes = rx_bytes;
371 stats->tx_bytes = tx_bytes;
7a291083
JBT
372 stats->rx_packets = rx_packets;
373
374out_herr:
3faf2693 375 free_page((unsigned long)cb2);
7a291083
JBT
376out:
377 return stats;
378}
379
380static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
381{
382 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
383 struct net_device *dev = pr->port->netdev;
384 int max_index_mask = pr->rq1_skba.len - 1;
2c69448b
JBT
385 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
386 int adder = 0;
7a291083
JBT
387 int i;
388
2c69448b
JBT
389 pr->rq1_skba.os_skbs = 0;
390
391 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
44fb3126
TK
392 if (nr_of_wqes > 0)
393 pr->rq1_skba.index = index;
2c69448b 394 pr->rq1_skba.os_skbs = fill_wqes;
7a291083 395 return;
2c69448b 396 }
7a291083 397
2c69448b 398 for (i = 0; i < fill_wqes; i++) {
7a291083
JBT
399 if (!skb_arr_rq1[index]) {
400 skb_arr_rq1[index] = netdev_alloc_skb(dev,
401 EHEA_L_PKT_SIZE);
402 if (!skb_arr_rq1[index]) {
2c69448b 403 pr->rq1_skba.os_skbs = fill_wqes - i;
7a291083
JBT
404 break;
405 }
406 }
407 index--;
408 index &= max_index_mask;
2c69448b 409 adder++;
7a291083 410 }
2c69448b
JBT
411
412 if (adder == 0)
413 return;
414
7a291083 415 /* Ring doorbell */
2c69448b 416 ehea_update_rq1a(pr->qp, adder);
7a291083
JBT
417}
418
e2878806 419static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
7a291083 420{
7a291083
JBT
421 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
422 struct net_device *dev = pr->port->netdev;
423 int i;
424
425 for (i = 0; i < pr->rq1_skba.len; i++) {
426 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
e2878806
TK
427 if (!skb_arr_rq1[i])
428 break;
7a291083
JBT
429 }
430 /* Ring doorbell */
431 ehea_update_rq1a(pr->qp, nr_rq1a);
7a291083
JBT
432}
433
434static int ehea_refill_rq_def(struct ehea_port_res *pr,
435 struct ehea_q_skb_arr *q_skba, int rq_nr,
436 int num_wqes, int wqe_type, int packet_size)
437{
438 struct net_device *dev = pr->port->netdev;
439 struct ehea_qp *qp = pr->qp;
440 struct sk_buff **skb_arr = q_skba->arr;
441 struct ehea_rwqe *rwqe;
442 int i, index, max_index_mask, fill_wqes;
2c69448b 443 int adder = 0;
7a291083
JBT
444 int ret = 0;
445
446 fill_wqes = q_skba->os_skbs + num_wqes;
2c69448b 447 q_skba->os_skbs = 0;
7a291083 448
2c69448b
JBT
449 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
450 q_skba->os_skbs = fill_wqes;
7a291083 451 return ret;
2c69448b 452 }
7a291083
JBT
453
454 index = q_skba->index;
455 max_index_mask = q_skba->len - 1;
456 for (i = 0; i < fill_wqes; i++) {
2c69448b 457 u64 tmp_addr;
89d71a66
ED
458 struct sk_buff *skb;
459
460 skb = netdev_alloc_skb_ip_align(dev, packet_size);
7a291083 461 if (!skb) {
7a291083 462 q_skba->os_skbs = fill_wqes - i;
e2878806
TK
463 if (q_skba->os_skbs == q_skba->len - 2) {
464 ehea_info("%s: rq%i ran dry - no mem for skb",
465 pr->port->netdev->name, rq_nr);
466 ret = -ENOMEM;
467 }
7a291083
JBT
468 break;
469 }
7a291083
JBT
470
471 skb_arr[index] = skb;
2c69448b
JBT
472 tmp_addr = ehea_map_vaddr(skb->data);
473 if (tmp_addr == -1) {
474 dev_kfree_skb(skb);
475 q_skba->os_skbs = fill_wqes - i;
476 ret = 0;
477 break;
478 }
7a291083
JBT
479
480 rwqe = ehea_get_next_rwqe(qp, rq_nr);
481 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
d1d25aab 482 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
7a291083 483 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
2c69448b 484 rwqe->sg_list[0].vaddr = tmp_addr;
7a291083
JBT
485 rwqe->sg_list[0].len = packet_size;
486 rwqe->data_segments = 1;
487
488 index++;
489 index &= max_index_mask;
2c69448b 490 adder++;
7a291083 491 }
44c82152 492
7a291083 493 q_skba->index = index;
2c69448b
JBT
494 if (adder == 0)
495 goto out;
7a291083
JBT
496
497 /* Ring doorbell */
498 iosync();
499 if (rq_nr == 2)
2c69448b 500 ehea_update_rq2a(pr->qp, adder);
7a291083 501 else
2c69448b 502 ehea_update_rq3a(pr->qp, adder);
44c82152 503out:
7a291083
JBT
504 return ret;
505}
506
507
508static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
509{
510 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
511 nr_of_wqes, EHEA_RWQE2_TYPE,
89d71a66 512 EHEA_RQ2_PKT_SIZE);
7a291083
JBT
513}
514
515
516static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
517{
518 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
519 nr_of_wqes, EHEA_RWQE3_TYPE,
89d71a66 520 EHEA_MAX_PACKET_SIZE);
7a291083
JBT
521}
522
523static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
524{
525 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
526 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
527 return 0;
528 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
529 (cqe->header_length == 0))
530 return 0;
531 return -EINVAL;
532}
533
534static inline void ehea_fill_skb(struct net_device *dev,
535 struct sk_buff *skb, struct ehea_cqe *cqe)
536{
537 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
538
539 skb_put(skb, length);
7a291083 540 skb->protocol = eth_type_trans(skb, dev);
71085ce8
BL
541
542 /* The packet was not an IPV4 packet so a complemented checksum was
543 calculated. The value is found in the Internet Checksum field. */
544 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
545 skb->ip_summed = CHECKSUM_COMPLETE;
546 skb->csum = csum_unfold(~cqe->inet_checksum_value);
547 } else
548 skb->ip_summed = CHECKSUM_UNNECESSARY;
7a291083
JBT
549}
550
551static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
552 int arr_len,
553 struct ehea_cqe *cqe)
554{
555 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
556 struct sk_buff *skb;
557 void *pref;
558 int x;
559
560 x = skb_index + 1;
561 x &= (arr_len - 1);
562
563 pref = skb_array[x];
0b2febf3
HH
564 if (pref) {
565 prefetchw(pref);
566 prefetchw(pref + EHEA_CACHE_LINE);
567
568 pref = (skb_array[x]->data);
569 prefetch(pref);
570 prefetch(pref + EHEA_CACHE_LINE);
571 prefetch(pref + EHEA_CACHE_LINE * 2);
572 prefetch(pref + EHEA_CACHE_LINE * 3);
573 }
574
7a291083
JBT
575 skb = skb_array[skb_index];
576 skb_array[skb_index] = NULL;
577 return skb;
578}
579
580static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
581 int arr_len, int wqe_index)
582{
583 struct sk_buff *skb;
584 void *pref;
585 int x;
586
587 x = wqe_index + 1;
588 x &= (arr_len - 1);
589
590 pref = skb_array[x];
0b2febf3
HH
591 if (pref) {
592 prefetchw(pref);
593 prefetchw(pref + EHEA_CACHE_LINE);
7a291083 594
0b2febf3
HH
595 pref = (skb_array[x]->data);
596 prefetchw(pref);
597 prefetchw(pref + EHEA_CACHE_LINE);
598 }
7a291083
JBT
599
600 skb = skb_array[wqe_index];
601 skb_array[wqe_index] = NULL;
602 return skb;
603}
604
605static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
606 struct ehea_cqe *cqe, int *processed_rq2,
607 int *processed_rq3)
608{
609 struct sk_buff *skb;
610
acbddb59
JBT
611 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
612 pr->p_stats.err_tcp_cksum++;
613 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
614 pr->p_stats.err_ip_cksum++;
615 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
616 pr->p_stats.err_frame_crc++;
617
7a291083
JBT
618 if (rq == 2) {
619 *processed_rq2 += 1;
620 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
621 dev_kfree_skb(skb);
622 } else if (rq == 3) {
623 *processed_rq3 += 1;
624 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
625 dev_kfree_skb(skb);
626 }
627
628 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
58dd8258
TK
629 if (netif_msg_rx_err(pr->port)) {
630 ehea_error("Critical receive error for QP %d. "
631 "Resetting port.", pr->qp->init_attr.qp_nr);
632 ehea_dump(cqe, sizeof(*cqe), "CQE");
633 }
2f69ae01 634 ehea_schedule_port_reset(pr->port);
7a291083
JBT
635 return 1;
636 }
637
638 return 0;
639}
640
d4dc4ec9
JBT
641static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
642 void **tcph, u64 *hdr_flags, void *priv)
643{
644 struct ehea_cqe *cqe = priv;
645 unsigned int ip_len;
646 struct iphdr *iph;
647
648 /* non tcp/udp packets */
649 if (!cqe->header_length)
650 return -1;
651
652 /* non tcp packet */
653 skb_reset_network_header(skb);
654 iph = ip_hdr(skb);
655 if (iph->protocol != IPPROTO_TCP)
656 return -1;
657
658 ip_len = ip_hdrlen(skb);
659 skb_set_transport_header(skb, ip_len);
660 *tcph = tcp_hdr(skb);
661
662 /* check if ip header and tcp header are complete */
3ff2cd23 663 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
d4dc4ec9
JBT
664 return -1;
665
666 *hdr_flags = LRO_IPV4 | LRO_TCP;
667 *iphdr = iph;
668
669 return 0;
670}
671
672static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
673 struct sk_buff *skb)
674{
8e95a202
JP
675 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
676 pr->port->vgrp);
d4dc4ec9
JBT
677
678 if (use_lro) {
679 if (vlan_extracted)
680 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
681 pr->port->vgrp,
682 cqe->vlan_tag,
683 cqe);
684 else
685 lro_receive_skb(&pr->lro_mgr, skb, cqe);
686 } else {
687 if (vlan_extracted)
688 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
689 cqe->vlan_tag);
690 else
691 netif_receive_skb(skb);
692 }
693}
694
bea3348e
SH
695static int ehea_proc_rwqes(struct net_device *dev,
696 struct ehea_port_res *pr,
697 int budget)
7a291083 698{
18604c54 699 struct ehea_port *port = pr->port;
7a291083
JBT
700 struct ehea_qp *qp = pr->qp;
701 struct ehea_cqe *cqe;
702 struct sk_buff *skb;
703 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
704 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
705 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
706 int skb_arr_rq1_len = pr->rq1_skba.len;
707 int skb_arr_rq2_len = pr->rq2_skba.len;
708 int skb_arr_rq3_len = pr->rq3_skba.len;
709 int processed, processed_rq1, processed_rq2, processed_rq3;
ce45b873 710 u64 processed_bytes = 0;
bea3348e 711 int wqe_index, last_wqe_index, rq, port_reset;
7a291083
JBT
712
713 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
714 last_wqe_index = 0;
7a291083 715
7a291083 716 cqe = ehea_poll_rq1(qp, &wqe_index);
bea3348e 717 while ((processed < budget) && cqe) {
7a291083
JBT
718 ehea_inc_rq1(qp);
719 processed_rq1++;
720 processed++;
7a291083
JBT
721 if (netif_msg_rx_status(port))
722 ehea_dump(cqe, sizeof(*cqe), "CQE");
723
724 last_wqe_index = wqe_index;
725 rmb();
726 if (!ehea_check_cqe(cqe, &rq)) {
508d2b5d
DM
727 if (rq == 1) {
728 /* LL RQ1 */
7a291083
JBT
729 skb = get_skb_by_index_ll(skb_arr_rq1,
730 skb_arr_rq1_len,
731 wqe_index);
732 if (unlikely(!skb)) {
733 if (netif_msg_rx_err(port))
734 ehea_error("LL rq1: skb=NULL");
18604c54 735
bea3348e 736 skb = netdev_alloc_skb(dev,
7a291083
JBT
737 EHEA_L_PKT_SIZE);
738 if (!skb)
739 break;
740 }
508d2b5d 741 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
d1d25aab 742 cqe->num_bytes_transfered - 4);
bea3348e 743 ehea_fill_skb(dev, skb, cqe);
508d2b5d
DM
744 } else if (rq == 2) {
745 /* RQ2 */
7a291083
JBT
746 skb = get_skb_by_index(skb_arr_rq2,
747 skb_arr_rq2_len, cqe);
748 if (unlikely(!skb)) {
749 if (netif_msg_rx_err(port))
750 ehea_error("rq2: skb=NULL");
751 break;
752 }
bea3348e 753 ehea_fill_skb(dev, skb, cqe);
7a291083 754 processed_rq2++;
508d2b5d
DM
755 } else {
756 /* RQ3 */
7a291083
JBT
757 skb = get_skb_by_index(skb_arr_rq3,
758 skb_arr_rq3_len, cqe);
759 if (unlikely(!skb)) {
760 if (netif_msg_rx_err(port))
761 ehea_error("rq3: skb=NULL");
762 break;
763 }
bea3348e 764 ehea_fill_skb(dev, skb, cqe);
7a291083
JBT
765 processed_rq3++;
766 }
767
ce45b873 768 processed_bytes += skb->len;
d4dc4ec9 769 ehea_proc_skb(pr, cqe, skb);
18604c54 770 } else {
acbddb59 771 pr->p_stats.poll_receive_errors++;
7a291083
JBT
772 port_reset = ehea_treat_poll_error(pr, rq, cqe,
773 &processed_rq2,
774 &processed_rq3);
775 if (port_reset)
776 break;
777 }
778 cqe = ehea_poll_rq1(qp, &wqe_index);
779 }
d4dc4ec9
JBT
780 if (use_lro)
781 lro_flush_all(&pr->lro_mgr);
7a291083 782
7a291083 783 pr->rx_packets += processed;
ce45b873 784 pr->rx_bytes += processed_bytes;
7a291083
JBT
785
786 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
787 ehea_refill_rq2(pr, processed_rq2);
788 ehea_refill_rq3(pr, processed_rq3);
789
bea3348e 790 return processed;
7a291083
JBT
791}
792
2928db4c
AD
793#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
794
795static void reset_sq_restart_flag(struct ehea_port *port)
796{
797 int i;
798
799 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
800 struct ehea_port_res *pr = &port->port_res[i];
801 pr->sq_restart_flag = 0;
802 }
a8bb69f7 803 wake_up(&port->restart_wq);
2928db4c
AD
804}
805
806static void check_sqs(struct ehea_port *port)
807{
808 struct ehea_swqe *swqe;
809 int swqe_index;
810 int i, k;
811
812 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
813 struct ehea_port_res *pr = &port->port_res[i];
a8bb69f7 814 int ret;
2928db4c
AD
815 k = 0;
816 swqe = ehea_get_swqe(pr->qp, &swqe_index);
817 memset(swqe, 0, SWQE_HEADER_SIZE);
818 atomic_dec(&pr->swqe_avail);
819
820 swqe->tx_control |= EHEA_SWQE_PURGE;
821 swqe->wr_id = SWQE_RESTART_CHECK;
822 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
823 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
824 swqe->immediate_data_length = 80;
825
826 ehea_post_swqe(pr->qp, swqe);
827
a8bb69f7
BL
828 ret = wait_event_timeout(port->restart_wq,
829 pr->sq_restart_flag == 0,
830 msecs_to_jiffies(100));
831
832 if (!ret) {
833 ehea_error("HW/SW queues out of sync");
834 ehea_schedule_port_reset(pr->port);
835 return;
2928db4c
AD
836 }
837 }
2928db4c
AD
838}
839
840
18604c54 841static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
7a291083 842{
acbddb59 843 struct sk_buff *skb;
7a291083
JBT
844 struct ehea_cq *send_cq = pr->send_cq;
845 struct ehea_cqe *cqe;
18604c54 846 int quota = my_quota;
7a291083
JBT
847 int cqe_counter = 0;
848 int swqe_av = 0;
acbddb59 849 int index;
7a291083
JBT
850 unsigned long flags;
851
18604c54 852 cqe = ehea_poll_cq(send_cq);
508d2b5d 853 while (cqe && (quota > 0)) {
18604c54
JBT
854 ehea_inc_cq(send_cq);
855
7a291083
JBT
856 cqe_counter++;
857 rmb();
2928db4c
AD
858
859 if (cqe->wr_id == SWQE_RESTART_CHECK) {
860 pr->sq_restart_flag = 1;
861 swqe_av++;
862 break;
863 }
864
7a291083 865 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
ea96ceac
TK
866 ehea_error("Bad send completion status=0x%04X",
867 cqe->status);
868
7a291083
JBT
869 if (netif_msg_tx_err(pr->port))
870 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
ea96ceac
TK
871
872 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
873 ehea_error("Resetting port");
874 ehea_schedule_port_reset(pr->port);
875 break;
876 }
7a291083
JBT
877 }
878
879 if (netif_msg_tx_done(pr->port))
880 ehea_dump(cqe, sizeof(*cqe), "CQE");
881
882 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
acbddb59
JBT
883 == EHEA_SWQE2_TYPE)) {
884
885 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
886 skb = pr->sq_skba.arr[index];
887 dev_kfree_skb(skb);
888 pr->sq_skba.arr[index] = NULL;
889 }
7a291083
JBT
890
891 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
892 quota--;
18604c54
JBT
893
894 cqe = ehea_poll_cq(send_cq);
ee289b64 895 }
7a291083
JBT
896
897 ehea_update_feca(send_cq, cqe_counter);
898 atomic_add(swqe_av, &pr->swqe_avail);
899
900 spin_lock_irqsave(&pr->netif_queue, flags);
18604c54 901
7a291083
JBT
902 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
903 >= pr->swqe_refill_th)) {
904 netif_wake_queue(pr->port->netdev);
905 pr->queue_stopped = 0;
906 }
907 spin_unlock_irqrestore(&pr->netif_queue, flags);
5b27d427 908 wake_up(&pr->port->swqe_avail_wq);
7a291083 909
18604c54 910 return cqe;
7a291083
JBT
911}
912
18604c54 913#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
bea3348e 914#define EHEA_POLL_MAX_CQES 65535
18604c54 915
bea3348e 916static int ehea_poll(struct napi_struct *napi, int budget)
7a291083 917{
508d2b5d
DM
918 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
919 napi);
bea3348e 920 struct net_device *dev = pr->port->netdev;
18604c54
JBT
921 struct ehea_cqe *cqe;
922 struct ehea_cqe *cqe_skb = NULL;
923 int force_irq, wqe_index;
bea3348e 924 int rx = 0;
18604c54
JBT
925
926 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
bea3348e
SH
927 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
928
929 if (!force_irq)
930 rx += ehea_proc_rwqes(dev, pr, budget - rx);
18604c54 931
bea3348e 932 while ((rx != budget) || force_irq) {
18604c54 933 pr->poll_counter = 0;
bea3348e 934 force_irq = 0;
288379f0 935 napi_complete(napi);
18604c54
JBT
936 ehea_reset_cq_ep(pr->recv_cq);
937 ehea_reset_cq_ep(pr->send_cq);
938 ehea_reset_cq_n1(pr->recv_cq);
939 ehea_reset_cq_n1(pr->send_cq);
a91fb143 940 rmb();
18604c54
JBT
941 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
942 cqe_skb = ehea_poll_cq(pr->send_cq);
943
e542aa6b 944 if (!cqe && !cqe_skb)
bea3348e 945 return rx;
18604c54 946
288379f0 947 if (!napi_reschedule(napi))
bea3348e 948 return rx;
18604c54 949
bea3348e
SH
950 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
951 rx += ehea_proc_rwqes(dev, pr, budget - rx);
952 }
e542aa6b 953
bea3348e
SH
954 pr->poll_counter++;
955 return rx;
7a291083
JBT
956}
957
8d22c971
JBT
958#ifdef CONFIG_NET_POLL_CONTROLLER
959static void ehea_netpoll(struct net_device *dev)
960{
961 struct ehea_port *port = netdev_priv(dev);
bea3348e 962 int i;
8d22c971 963
bea3348e 964 for (i = 0; i < port->num_def_qps; i++)
288379f0 965 napi_schedule(&port->port_res[i].napi);
8d22c971
JBT
966}
967#endif
968
7d12e780 969static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
7a291083
JBT
970{
971 struct ehea_port_res *pr = param;
18604c54 972
288379f0 973 napi_schedule(&pr->napi);
18604c54 974
7a291083
JBT
975 return IRQ_HANDLED;
976}
977
7d12e780 978static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
7a291083
JBT
979{
980 struct ehea_port *port = param;
981 struct ehea_eqe *eqe;
d2db9eea 982 struct ehea_qp *qp;
7a291083 983 u32 qp_token;
ea96ceac
TK
984 u64 resource_type, aer, aerr;
985 int reset_port = 0;
7a291083
JBT
986
987 eqe = ehea_poll_eq(port->qp_eq);
bb3a6449 988
7a291083 989 while (eqe) {
7a291083 990 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
a1c5a893 991 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
bb3a6449 992 eqe->entry, qp_token);
d2db9eea
JBT
993
994 qp = port->port_res[qp_token].qp;
ea96ceac
TK
995
996 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
997 &aer, &aerr);
998
999 if (resource_type == EHEA_AER_RESTYPE_QP) {
1000 if ((aer & EHEA_AER_RESET_MASK) ||
1001 (aerr & EHEA_AERR_RESET_MASK))
1002 reset_port = 1;
1003 } else
1004 reset_port = 1; /* Reset in case of CQ or EQ error */
1005
bb3a6449 1006 eqe = ehea_poll_eq(port->qp_eq);
7a291083
JBT
1007 }
1008
ea96ceac
TK
1009 if (reset_port) {
1010 ehea_error("Resetting port");
1011 ehea_schedule_port_reset(port);
1012 }
d2db9eea 1013
7a291083
JBT
1014 return IRQ_HANDLED;
1015}
1016
1017static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
1018 int logical_port)
1019{
1020 int i;
1021
1acf2318 1022 for (i = 0; i < EHEA_MAX_PORTS; i++)
41b69c70 1023 if (adapter->port[i])
d1d25aab 1024 if (adapter->port[i]->logical_port_id == logical_port)
41b69c70 1025 return adapter->port[i];
7a291083
JBT
1026 return NULL;
1027}
1028
1029int ehea_sense_port_attr(struct ehea_port *port)
1030{
1031 int ret;
1032 u64 hret;
1033 struct hcp_ehea_port_cb0 *cb0;
1034
508d2b5d 1035 /* may be called via ehea_neq_tasklet() */
3faf2693 1036 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
508d2b5d 1037 if (!cb0) {
7a291083
JBT
1038 ehea_error("no mem for cb0");
1039 ret = -ENOMEM;
1040 goto out;
1041 }
1042
1043 hret = ehea_h_query_ehea_port(port->adapter->handle,
1044 port->logical_port_id, H_PORT_CB0,
1045 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1046 cb0);
1047 if (hret != H_SUCCESS) {
1048 ret = -EIO;
1049 goto out_free;
1050 }
1051
1052 /* MAC address */
1053 port->mac_addr = cb0->port_mac_addr << 16;
1054
508d2b5d 1055 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
7a291083
JBT
1056 ret = -EADDRNOTAVAIL;
1057 goto out_free;
1058 }
1059
1060 /* Port speed */
1061 switch (cb0->port_speed) {
1062 case H_SPEED_10M_H:
1063 port->port_speed = EHEA_SPEED_10M;
1064 port->full_duplex = 0;
1065 break;
1066 case H_SPEED_10M_F:
1067 port->port_speed = EHEA_SPEED_10M;
1068 port->full_duplex = 1;
1069 break;
1070 case H_SPEED_100M_H:
1071 port->port_speed = EHEA_SPEED_100M;
1072 port->full_duplex = 0;
1073 break;
1074 case H_SPEED_100M_F:
1075 port->port_speed = EHEA_SPEED_100M;
1076 port->full_duplex = 1;
1077 break;
1078 case H_SPEED_1G_F:
1079 port->port_speed = EHEA_SPEED_1G;
1080 port->full_duplex = 1;
1081 break;
1082 case H_SPEED_10G_F:
1083 port->port_speed = EHEA_SPEED_10G;
1084 port->full_duplex = 1;
1085 break;
1086 default:
1087 port->port_speed = 0;
1088 port->full_duplex = 0;
1089 break;
1090 }
1091
e919b593 1092 port->autoneg = 1;
18604c54 1093 port->num_mcs = cb0->num_default_qps;
e919b593 1094
7a291083 1095 /* Number of default QPs */
18604c54
JBT
1096 if (use_mcs)
1097 port->num_def_qps = cb0->num_default_qps;
1098 else
1099 port->num_def_qps = 1;
7a291083
JBT
1100
1101 if (!port->num_def_qps) {
1102 ret = -EINVAL;
1103 goto out_free;
1104 }
1105
18604c54
JBT
1106 port->num_tx_qps = num_tx_qps;
1107
1108 if (port->num_def_qps >= port->num_tx_qps)
7a291083
JBT
1109 port->num_add_tx_qps = 0;
1110 else
18604c54 1111 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
7a291083
JBT
1112
1113 ret = 0;
1114out_free:
1115 if (ret || netif_msg_probe(port))
1116 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
3faf2693 1117 free_page((unsigned long)cb0);
7a291083
JBT
1118out:
1119 return ret;
1120}
1121
1122int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1123{
1124 struct hcp_ehea_port_cb4 *cb4;
1125 u64 hret;
1126 int ret = 0;
1127
3faf2693 1128 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1129 if (!cb4) {
1130 ehea_error("no mem for cb4");
1131 ret = -ENOMEM;
1132 goto out;
1133 }
1134
1135 cb4->port_speed = port_speed;
1136
1137 netif_carrier_off(port->netdev);
1138
1139 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1140 port->logical_port_id,
1141 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1142 if (hret == H_SUCCESS) {
1143 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1144
1145 hret = ehea_h_query_ehea_port(port->adapter->handle,
1146 port->logical_port_id,
1147 H_PORT_CB4, H_PORT_CB4_SPEED,
1148 cb4);
1149 if (hret == H_SUCCESS) {
1150 switch (cb4->port_speed) {
1151 case H_SPEED_10M_H:
1152 port->port_speed = EHEA_SPEED_10M;
1153 port->full_duplex = 0;
1154 break;
1155 case H_SPEED_10M_F:
1156 port->port_speed = EHEA_SPEED_10M;
1157 port->full_duplex = 1;
1158 break;
1159 case H_SPEED_100M_H:
1160 port->port_speed = EHEA_SPEED_100M;
1161 port->full_duplex = 0;
1162 break;
1163 case H_SPEED_100M_F:
1164 port->port_speed = EHEA_SPEED_100M;
1165 port->full_duplex = 1;
1166 break;
1167 case H_SPEED_1G_F:
1168 port->port_speed = EHEA_SPEED_1G;
1169 port->full_duplex = 1;
1170 break;
1171 case H_SPEED_10G_F:
1172 port->port_speed = EHEA_SPEED_10G;
1173 port->full_duplex = 1;
1174 break;
1175 default:
1176 port->port_speed = 0;
1177 port->full_duplex = 0;
1178 break;
1179 }
1180 } else {
1181 ehea_error("Failed sensing port speed");
1182 ret = -EIO;
1183 }
1184 } else {
1185 if (hret == H_AUTHORITY) {
7674a588 1186 ehea_info("Hypervisor denied setting port speed");
7a291083
JBT
1187 ret = -EPERM;
1188 } else {
1189 ret = -EIO;
1190 ehea_error("Failed setting port speed");
1191 }
1192 }
8759cf76
JBT
1193 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1194 netif_carrier_on(port->netdev);
1195
3faf2693 1196 free_page((unsigned long)cb4);
7a291083
JBT
1197out:
1198 return ret;
1199}
1200
1201static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1202{
1203 int ret;
1204 u8 ec;
1205 u8 portnum;
1206 struct ehea_port *port;
1207
1208 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1209 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1210 port = ehea_get_port(adapter, portnum);
1211
1212 switch (ec) {
1213 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1214
1215 if (!port) {
1216 ehea_error("unknown portnum %x", portnum);
1217 break;
1218 }
1219
1220 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1221 if (!netif_carrier_ok(port->netdev)) {
1e1675cc 1222 ret = ehea_sense_port_attr(port);
7a291083
JBT
1223 if (ret) {
1224 ehea_error("failed resensing port "
1225 "attributes");
1226 break;
1227 }
1228
1229 if (netif_msg_link(port))
1230 ehea_info("%s: Logical port up: %dMbps "
1231 "%s Duplex",
1232 port->netdev->name,
1233 port->port_speed,
1234 port->full_duplex ==
1235 1 ? "Full" : "Half");
1236
1237 netif_carrier_on(port->netdev);
1238 netif_wake_queue(port->netdev);
1239 }
1240 } else
1241 if (netif_carrier_ok(port->netdev)) {
1242 if (netif_msg_link(port))
1243 ehea_info("%s: Logical port down",
1244 port->netdev->name);
1245 netif_carrier_off(port->netdev);
1246 netif_stop_queue(port->netdev);
1247 }
1248
1249 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
8759cf76 1250 port->phy_link = EHEA_PHY_LINK_UP;
7a291083
JBT
1251 if (netif_msg_link(port))
1252 ehea_info("%s: Physical port up",
1253 port->netdev->name);
8759cf76
JBT
1254 if (prop_carrier_state)
1255 netif_carrier_on(port->netdev);
7a291083 1256 } else {
8759cf76 1257 port->phy_link = EHEA_PHY_LINK_DOWN;
7a291083
JBT
1258 if (netif_msg_link(port))
1259 ehea_info("%s: Physical port down",
1260 port->netdev->name);
8759cf76
JBT
1261 if (prop_carrier_state)
1262 netif_carrier_off(port->netdev);
7a291083
JBT
1263 }
1264
1265 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1266 ehea_info("External switch port is primary port");
1267 else
1268 ehea_info("External switch port is backup port");
1269
1270 break;
1271 case EHEA_EC_ADAPTER_MALFUNC:
1272 ehea_error("Adapter malfunction");
1273 break;
1274 case EHEA_EC_PORT_MALFUNC:
1275 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1276 netif_carrier_off(port->netdev);
1277 netif_stop_queue(port->netdev);
1278 break;
1279 default:
a1c5a893 1280 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
7a291083
JBT
1281 break;
1282 }
1283}
1284
1285static void ehea_neq_tasklet(unsigned long data)
1286{
508d2b5d 1287 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
7a291083
JBT
1288 struct ehea_eqe *eqe;
1289 u64 event_mask;
1290
1291 eqe = ehea_poll_eq(adapter->neq);
1292 ehea_debug("eqe=%p", eqe);
1293
1294 while (eqe) {
1295 ehea_debug("*eqe=%lx", eqe->entry);
1296 ehea_parse_eqe(adapter, eqe->entry);
1297 eqe = ehea_poll_eq(adapter->neq);
1298 ehea_debug("next eqe=%p", eqe);
1299 }
1300
1301 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1302 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1303 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1304
1305 ehea_h_reset_events(adapter->handle,
1306 adapter->neq->fw_handle, event_mask);
1307}
1308
7d12e780 1309static irqreturn_t ehea_interrupt_neq(int irq, void *param)
7a291083
JBT
1310{
1311 struct ehea_adapter *adapter = param;
1312 tasklet_hi_schedule(&adapter->neq_tasklet);
1313 return IRQ_HANDLED;
1314}
1315
1316
1317static int ehea_fill_port_res(struct ehea_port_res *pr)
1318{
1319 int ret;
1320 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1321
e2878806
TK
1322 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1323 - init_attr->act_nr_rwqes_rq2
1324 - init_attr->act_nr_rwqes_rq3 - 1);
7a291083 1325
e2878806 1326 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
7a291083
JBT
1327
1328 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1329
1330 return ret;
1331}
1332
1333static int ehea_reg_interrupts(struct net_device *dev)
1334{
1335 struct ehea_port *port = netdev_priv(dev);
1336 struct ehea_port_res *pr;
1337 int i, ret;
1338
7a291083
JBT
1339
1340 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1341 dev->name);
1342
6b08f3ae 1343 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
7a291083 1344 ehea_qp_aff_irq_handler,
38515e90 1345 IRQF_DISABLED, port->int_aff_name, port);
7a291083
JBT
1346 if (ret) {
1347 ehea_error("failed registering irq for qp_aff_irq_handler:"
1348 "ist=%X", port->qp_eq->attr.ist1);
1349 goto out_free_qpeq;
1350 }
1351
1352 if (netif_msg_ifup(port))
1353 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1354 "registered", port->qp_eq->attr.ist1);
1355
18604c54 1356
7a291083
JBT
1357 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1358 pr = &port->port_res[i];
1359 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
18604c54 1360 "%s-queue%d", dev->name, i);
6b08f3ae 1361 ret = ibmebus_request_irq(pr->eq->attr.ist1,
18604c54 1362 ehea_recv_irq_handler,
38515e90 1363 IRQF_DISABLED, pr->int_send_name,
7a291083
JBT
1364 pr);
1365 if (ret) {
18604c54 1366 ehea_error("failed registering irq for ehea_queue "
7a291083 1367 "port_res_nr:%d, ist=%X", i,
18604c54 1368 pr->eq->attr.ist1);
7a291083
JBT
1369 goto out_free_req;
1370 }
1371 if (netif_msg_ifup(port))
18604c54
JBT
1372 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1373 "%d registered", pr->eq->attr.ist1, i);
7a291083
JBT
1374 }
1375out:
1376 return ret;
1377
18604c54 1378
7a291083
JBT
1379out_free_req:
1380 while (--i >= 0) {
18604c54 1381 u32 ist = port->port_res[i].eq->attr.ist1;
6b08f3ae 1382 ibmebus_free_irq(ist, &port->port_res[i]);
7a291083 1383 }
18604c54 1384
7a291083 1385out_free_qpeq:
6b08f3ae 1386 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
7a291083 1387 i = port->num_def_qps;
18604c54 1388
7a291083 1389 goto out;
18604c54 1390
7a291083
JBT
1391}
1392
1393static void ehea_free_interrupts(struct net_device *dev)
1394{
1395 struct ehea_port *port = netdev_priv(dev);
1396 struct ehea_port_res *pr;
1397 int i;
1398
1399 /* send */
18604c54 1400
7a291083
JBT
1401 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1402 pr = &port->port_res[i];
6b08f3ae 1403 ibmebus_free_irq(pr->eq->attr.ist1, pr);
7a291083
JBT
1404 if (netif_msg_intr(port))
1405 ehea_info("free send irq for res %d with handle 0x%X",
18604c54 1406 i, pr->eq->attr.ist1);
7a291083
JBT
1407 }
1408
1409 /* associated events */
6b08f3ae 1410 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
7a291083
JBT
1411 if (netif_msg_intr(port))
1412 ehea_info("associated event interrupt for handle 0x%X freed",
1413 port->qp_eq->attr.ist1);
1414}
1415
1416static int ehea_configure_port(struct ehea_port *port)
1417{
1418 int ret, i;
1419 u64 hret, mask;
1420 struct hcp_ehea_port_cb0 *cb0;
1421
1422 ret = -ENOMEM;
3faf2693 1423 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1424 if (!cb0)
1425 goto out;
1426
1427 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1428 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1429 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1430 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1431 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1432 PXLY_RC_VLAN_FILTER)
1433 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1434
18604c54
JBT
1435 for (i = 0; i < port->num_mcs; i++)
1436 if (use_mcs)
1437 cb0->default_qpn_arr[i] =
1438 port->port_res[i].qp->init_attr.qp_nr;
1439 else
1440 cb0->default_qpn_arr[i] =
1441 port->port_res[0].qp->init_attr.qp_nr;
e542aa6b 1442
7a291083
JBT
1443 if (netif_msg_ifup(port))
1444 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1445
1446 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1447 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1448
1449 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1450 port->logical_port_id,
1451 H_PORT_CB0, mask, cb0);
1452 ret = -EIO;
1453 if (hret != H_SUCCESS)
1454 goto out_free;
1455
1456 ret = 0;
1457
1458out_free:
3faf2693 1459 free_page((unsigned long)cb0);
7a291083
JBT
1460out:
1461 return ret;
1462}
1463
e542aa6b 1464int ehea_gen_smrs(struct ehea_port_res *pr)
7a291083 1465{
e542aa6b 1466 int ret;
7a291083
JBT
1467 struct ehea_adapter *adapter = pr->port->adapter;
1468
e542aa6b
JBT
1469 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1470 if (ret)
7a291083
JBT
1471 goto out;
1472
e542aa6b
JBT
1473 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1474 if (ret)
1475 goto out_free;
7a291083
JBT
1476
1477 return 0;
1478
e542aa6b
JBT
1479out_free:
1480 ehea_rem_mr(&pr->send_mr);
7a291083 1481out:
e542aa6b 1482 ehea_error("Generating SMRS failed\n");
7a291083
JBT
1483 return -EIO;
1484}
1485
e542aa6b 1486int ehea_rem_smrs(struct ehea_port_res *pr)
7a291083 1487{
8e95a202
JP
1488 if ((ehea_rem_mr(&pr->send_mr)) ||
1489 (ehea_rem_mr(&pr->recv_mr)))
e542aa6b
JBT
1490 return -EIO;
1491 else
1492 return 0;
7a291083
JBT
1493}
1494
1495static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1496{
508d2b5d 1497 int arr_size = sizeof(void *) * max_q_entries;
7a291083
JBT
1498
1499 q_skba->arr = vmalloc(arr_size);
1500 if (!q_skba->arr)
1501 return -ENOMEM;
1502
1503 memset(q_skba->arr, 0, arr_size);
1504
1505 q_skba->len = max_q_entries;
1506 q_skba->index = 0;
1507 q_skba->os_skbs = 0;
1508
1509 return 0;
1510}
1511
1512static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1513 struct port_res_cfg *pr_cfg, int queue_token)
1514{
1515 struct ehea_adapter *adapter = port->adapter;
1516 enum ehea_eq_type eq_type = EHEA_EQ;
1517 struct ehea_qp_init_attr *init_attr = NULL;
1518 int ret = -EIO;
ce45b873
BL
1519 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1520
1521 tx_bytes = pr->tx_bytes;
1522 tx_packets = pr->tx_packets;
1523 rx_bytes = pr->rx_bytes;
1524 rx_packets = pr->rx_packets;
7a291083
JBT
1525
1526 memset(pr, 0, sizeof(struct ehea_port_res));
1527
ce45b873
BL
1528 pr->tx_bytes = rx_bytes;
1529 pr->tx_packets = tx_packets;
1530 pr->rx_bytes = rx_bytes;
1531 pr->rx_packets = rx_packets;
1532
7a291083 1533 pr->port = port;
7a291083
JBT
1534 spin_lock_init(&pr->xmit_lock);
1535 spin_lock_init(&pr->netif_queue);
1536
18604c54
JBT
1537 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1538 if (!pr->eq) {
1539 ehea_error("create_eq failed (eq)");
7a291083
JBT
1540 goto out_free;
1541 }
1542
1543 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
18604c54 1544 pr->eq->fw_handle,
7a291083
JBT
1545 port->logical_port_id);
1546 if (!pr->recv_cq) {
1547 ehea_error("create_cq failed (cq_recv)");
1548 goto out_free;
1549 }
1550
1551 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
18604c54 1552 pr->eq->fw_handle,
7a291083
JBT
1553 port->logical_port_id);
1554 if (!pr->send_cq) {
1555 ehea_error("create_cq failed (cq_send)");
1556 goto out_free;
1557 }
1558
1559 if (netif_msg_ifup(port))
1560 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1561 pr->send_cq->attr.act_nr_of_cqes,
1562 pr->recv_cq->attr.act_nr_of_cqes);
1563
1564 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1565 if (!init_attr) {
1566 ret = -ENOMEM;
1567 ehea_error("no mem for ehea_qp_init_attr");
1568 goto out_free;
1569 }
1570
1571 init_attr->low_lat_rq1 = 1;
1572 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1573 init_attr->rq_count = 3;
1574 init_attr->qp_token = queue_token;
1575 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1576 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1577 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1578 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1579 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1580 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1581 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1582 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1583 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1584 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1585 init_attr->port_nr = port->logical_port_id;
1586 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1587 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1588 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1589
1590 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1591 if (!pr->qp) {
1592 ehea_error("create_qp failed");
1593 ret = -EIO;
1594 goto out_free;
1595 }
1596
1597 if (netif_msg_ifup(port))
1598 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1599 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1600 init_attr->act_nr_send_wqes,
1601 init_attr->act_nr_rwqes_rq1,
1602 init_attr->act_nr_rwqes_rq2,
1603 init_attr->act_nr_rwqes_rq3);
1604
44fb3126
TK
1605 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1606
1607 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
7a291083
JBT
1608 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1609 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1610 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1611 if (ret)
1612 goto out_free;
1613
1614 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1615 if (ehea_gen_smrs(pr) != 0) {
1616 ret = -EIO;
1617 goto out_free;
1618 }
18604c54 1619
7a291083
JBT
1620 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1621
1622 kfree(init_attr);
18604c54 1623
bea3348e 1624 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
18604c54 1625
d4dc4ec9
JBT
1626 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1627 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1628 pr->lro_mgr.lro_arr = pr->lro_desc;
1629 pr->lro_mgr.get_skb_header = get_skb_hdr;
1630 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1631 pr->lro_mgr.dev = port->netdev;
1632 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1633 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1634
7a291083
JBT
1635 ret = 0;
1636 goto out;
1637
1638out_free:
1639 kfree(init_attr);
1640 vfree(pr->sq_skba.arr);
1641 vfree(pr->rq1_skba.arr);
1642 vfree(pr->rq2_skba.arr);
1643 vfree(pr->rq3_skba.arr);
1644 ehea_destroy_qp(pr->qp);
1645 ehea_destroy_cq(pr->send_cq);
1646 ehea_destroy_cq(pr->recv_cq);
18604c54 1647 ehea_destroy_eq(pr->eq);
7a291083
JBT
1648out:
1649 return ret;
1650}
1651
1652static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1653{
1654 int ret, i;
1655
357eb46d
HH
1656 if (pr->qp)
1657 netif_napi_del(&pr->napi);
1658
7a291083
JBT
1659 ret = ehea_destroy_qp(pr->qp);
1660
1661 if (!ret) {
1662 ehea_destroy_cq(pr->send_cq);
1663 ehea_destroy_cq(pr->recv_cq);
18604c54 1664 ehea_destroy_eq(pr->eq);
7a291083
JBT
1665
1666 for (i = 0; i < pr->rq1_skba.len; i++)
1667 if (pr->rq1_skba.arr[i])
1668 dev_kfree_skb(pr->rq1_skba.arr[i]);
1669
1670 for (i = 0; i < pr->rq2_skba.len; i++)
1671 if (pr->rq2_skba.arr[i])
1672 dev_kfree_skb(pr->rq2_skba.arr[i]);
1673
1674 for (i = 0; i < pr->rq3_skba.len; i++)
1675 if (pr->rq3_skba.arr[i])
1676 dev_kfree_skb(pr->rq3_skba.arr[i]);
1677
1678 for (i = 0; i < pr->sq_skba.len; i++)
1679 if (pr->sq_skba.arr[i])
1680 dev_kfree_skb(pr->sq_skba.arr[i]);
1681
1682 vfree(pr->rq1_skba.arr);
1683 vfree(pr->rq2_skba.arr);
1684 vfree(pr->rq3_skba.arr);
1685 vfree(pr->sq_skba.arr);
1686 ret = ehea_rem_smrs(pr);
1687 }
1688 return ret;
1689}
1690
1691/*
1692 * The write_* functions store information in swqe which is used by
1693 * the hardware to calculate the ip/tcp/udp checksum
1694 */
1695
1696static inline void write_ip_start_end(struct ehea_swqe *swqe,
1697 const struct sk_buff *skb)
1698{
eddc9ec5 1699 swqe->ip_start = skb_network_offset(skb);
c9bdd4b5 1700 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
7a291083
JBT
1701}
1702
1703static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1704 const struct sk_buff *skb)
1705{
1706 swqe->tcp_offset =
1707 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1708
1709 swqe->tcp_end = (u16)skb->len - 1;
1710}
1711
1712static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1713 const struct sk_buff *skb)
1714{
1715 swqe->tcp_offset =
1716 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1717
1718 swqe->tcp_end = (u16)skb->len - 1;
1719}
1720
1721
1722static void write_swqe2_TSO(struct sk_buff *skb,
1723 struct ehea_swqe *swqe, u32 lkey)
1724{
1725 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1726 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
e743d313 1727 int skb_data_size = skb_headlen(skb);
7a291083 1728 int headersize;
7a291083
JBT
1729
1730 /* Packet is TCP with TSO enabled */
1731 swqe->tx_control |= EHEA_SWQE_TSO;
1732 swqe->mss = skb_shinfo(skb)->gso_size;
1733 /* copy only eth/ip/tcp headers to immediate data and
1734 * the rest of skb->data to sg1entry
1735 */
ab6a5bb6 1736 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
7a291083 1737
e743d313 1738 skb_data_size = skb_headlen(skb);
7a291083
JBT
1739
1740 if (skb_data_size >= headersize) {
1741 /* copy immediate data */
d626f62b 1742 skb_copy_from_linear_data(skb, imm_data, headersize);
7a291083
JBT
1743 swqe->immediate_data_length = headersize;
1744
1745 if (skb_data_size > headersize) {
1746 /* set sg1entry data */
1747 sg1entry->l_key = lkey;
1748 sg1entry->len = skb_data_size - headersize;
44a5b3d5
TK
1749 sg1entry->vaddr =
1750 ehea_map_vaddr(skb->data + headersize);
7a291083
JBT
1751 swqe->descriptors++;
1752 }
1753 } else
1754 ehea_error("cannot handle fragmented headers");
1755}
1756
1757static void write_swqe2_nonTSO(struct sk_buff *skb,
1758 struct ehea_swqe *swqe, u32 lkey)
1759{
e743d313 1760 int skb_data_size = skb_headlen(skb);
7a291083
JBT
1761 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1762 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
7a291083
JBT
1763
1764 /* Packet is any nonTSO type
1765 *
1766 * Copy as much as possible skb->data to immediate data and
1767 * the rest to sg1entry
1768 */
1769 if (skb_data_size >= SWQE2_MAX_IMM) {
1770 /* copy immediate data */
d626f62b 1771 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
7a291083
JBT
1772
1773 swqe->immediate_data_length = SWQE2_MAX_IMM;
1774
1775 if (skb_data_size > SWQE2_MAX_IMM) {
1776 /* copy sg1entry data */
1777 sg1entry->l_key = lkey;
1778 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
44a5b3d5
TK
1779 sg1entry->vaddr =
1780 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
7a291083
JBT
1781 swqe->descriptors++;
1782 }
1783 } else {
d626f62b 1784 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
7a291083
JBT
1785 swqe->immediate_data_length = skb_data_size;
1786 }
1787}
1788
1789static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1790 struct ehea_swqe *swqe, u32 lkey)
1791{
1792 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1793 skb_frag_t *frag;
1794 int nfrags, sg1entry_contains_frag_data, i;
7a291083
JBT
1795
1796 nfrags = skb_shinfo(skb)->nr_frags;
1797 sg1entry = &swqe->u.immdata_desc.sg_entry;
508d2b5d 1798 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
7a291083
JBT
1799 swqe->descriptors = 0;
1800 sg1entry_contains_frag_data = 0;
1801
1802 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1803 write_swqe2_TSO(skb, swqe, lkey);
1804 else
1805 write_swqe2_nonTSO(skb, swqe, lkey);
1806
1807 /* write descriptors */
1808 if (nfrags > 0) {
1809 if (swqe->descriptors == 0) {
1810 /* sg1entry not yet used */
1811 frag = &skb_shinfo(skb)->frags[0];
1812
1813 /* copy sg1entry data */
1814 sg1entry->l_key = lkey;
1815 sg1entry->len = frag->size;
44a5b3d5
TK
1816 sg1entry->vaddr =
1817 ehea_map_vaddr(page_address(frag->page)
1818 + frag->page_offset);
7a291083
JBT
1819 swqe->descriptors++;
1820 sg1entry_contains_frag_data = 1;
1821 }
1822
1823 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1824
1825 frag = &skb_shinfo(skb)->frags[i];
1826 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1827
1828 sgentry->l_key = lkey;
1829 sgentry->len = frag->size;
44a5b3d5
TK
1830 sgentry->vaddr =
1831 ehea_map_vaddr(page_address(frag->page)
1832 + frag->page_offset);
7a291083
JBT
1833 swqe->descriptors++;
1834 }
1835 }
1836}
1837
1838static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1839{
1840 int ret = 0;
1841 u64 hret;
1842 u8 reg_type;
1843
1844 /* De/Register untagged packets */
1845 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1846 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1847 port->logical_port_id,
1848 reg_type, port->mac_addr, 0, hcallid);
1849 if (hret != H_SUCCESS) {
f9e29228 1850 ehea_error("%sregistering bc address failed (tagged)",
508d2b5d 1851 hcallid == H_REG_BCMC ? "" : "de");
7a291083
JBT
1852 ret = -EIO;
1853 goto out_herr;
1854 }
1855
1856 /* De/Register VLAN packets */
1857 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1858 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1859 port->logical_port_id,
1860 reg_type, port->mac_addr, 0, hcallid);
1861 if (hret != H_SUCCESS) {
f9e29228
TK
1862 ehea_error("%sregistering bc address failed (vlan)",
1863 hcallid == H_REG_BCMC ? "" : "de");
7a291083
JBT
1864 ret = -EIO;
1865 }
1866out_herr:
1867 return ret;
1868}
1869
1870static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1871{
1872 struct ehea_port *port = netdev_priv(dev);
1873 struct sockaddr *mac_addr = sa;
1874 struct hcp_ehea_port_cb0 *cb0;
1875 int ret;
1876 u64 hret;
1877
1878 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1879 ret = -EADDRNOTAVAIL;
1880 goto out;
1881 }
1882
3faf2693 1883 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1884 if (!cb0) {
1885 ehea_error("no mem for cb0");
1886 ret = -ENOMEM;
1887 goto out;
1888 }
1889
1890 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1891
1892 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1893
1894 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1895 port->logical_port_id, H_PORT_CB0,
1896 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1897 if (hret != H_SUCCESS) {
1898 ret = -EIO;
1899 goto out_free;
1900 }
1901
1902 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1903
1904 /* Deregister old MAC in pHYP */
00aaea2f
JBT
1905 if (port->state == EHEA_PORT_UP) {
1906 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1907 if (ret)
1908 goto out_upregs;
1909 }
7a291083
JBT
1910
1911 port->mac_addr = cb0->port_mac_addr << 16;
1912
1913 /* Register new MAC in pHYP */
00aaea2f
JBT
1914 if (port->state == EHEA_PORT_UP) {
1915 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1916 if (ret)
1917 goto out_upregs;
1918 }
7a291083
JBT
1919
1920 ret = 0;
21eee2dd
TK
1921
1922out_upregs:
1923 ehea_update_bcmc_registrations();
7a291083 1924out_free:
3faf2693 1925 free_page((unsigned long)cb0);
7a291083
JBT
1926out:
1927 return ret;
1928}
1929
1930static void ehea_promiscuous_error(u64 hret, int enable)
1931{
7674a588
TK
1932 if (hret == H_AUTHORITY)
1933 ehea_info("Hypervisor denied %sabling promiscuous mode",
1934 enable == 1 ? "en" : "dis");
1935 else
1936 ehea_error("failed %sabling promiscuous mode",
1937 enable == 1 ? "en" : "dis");
7a291083
JBT
1938}
1939
1940static void ehea_promiscuous(struct net_device *dev, int enable)
1941{
1942 struct ehea_port *port = netdev_priv(dev);
1943 struct hcp_ehea_port_cb7 *cb7;
1944 u64 hret;
1945
aa3bc6c6 1946 if (enable == port->promisc)
7a291083
JBT
1947 return;
1948
3faf2693 1949 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
7a291083
JBT
1950 if (!cb7) {
1951 ehea_error("no mem for cb7");
1952 goto out;
1953 }
1954
1955 /* Modify Pxs_DUCQPN in CB7 */
1956 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1957
1958 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1959 port->logical_port_id,
1960 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1961 if (hret) {
1962 ehea_promiscuous_error(hret, enable);
1963 goto out;
1964 }
1965
1966 port->promisc = enable;
1967out:
3faf2693 1968 free_page((unsigned long)cb7);
7a291083
JBT
1969}
1970
1971static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1972 u32 hcallid)
1973{
1974 u64 hret;
1975 u8 reg_type;
1976
1977 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1978 | EHEA_BCMC_UNTAGGED;
1979
1980 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1981 port->logical_port_id,
1982 reg_type, mc_mac_addr, 0, hcallid);
1983 if (hret)
1984 goto out;
1985
1986 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1987 | EHEA_BCMC_VLANID_ALL;
1988
1989 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1990 port->logical_port_id,
1991 reg_type, mc_mac_addr, 0, hcallid);
1992out:
1993 return hret;
1994}
1995
1996static int ehea_drop_multicast_list(struct net_device *dev)
1997{
1998 struct ehea_port *port = netdev_priv(dev);
1999 struct ehea_mc_list *mc_entry = port->mc_list;
2000 struct list_head *pos;
2001 struct list_head *temp;
2002 int ret = 0;
2003 u64 hret;
2004
2005 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
2006 mc_entry = list_entry(pos, struct ehea_mc_list, list);
2007
2008 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
2009 H_DEREG_BCMC);
2010 if (hret) {
2011 ehea_error("failed deregistering mcast MAC");
2012 ret = -EIO;
2013 }
2014
2015 list_del(pos);
2016 kfree(mc_entry);
2017 }
2018 return ret;
2019}
2020
2021static void ehea_allmulti(struct net_device *dev, int enable)
2022{
2023 struct ehea_port *port = netdev_priv(dev);
2024 u64 hret;
2025
2026 if (!port->allmulti) {
2027 if (enable) {
2028 /* Enable ALLMULTI */
2029 ehea_drop_multicast_list(dev);
2030 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
2031 if (!hret)
2032 port->allmulti = 1;
2033 else
2034 ehea_error("failed enabling IFF_ALLMULTI");
2035 }
2036 } else
2037 if (!enable) {
2038 /* Disable ALLMULTI */
2039 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
2040 if (!hret)
2041 port->allmulti = 0;
2042 else
2043 ehea_error("failed disabling IFF_ALLMULTI");
2044 }
2045}
2046
508d2b5d 2047static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
7a291083
JBT
2048{
2049 struct ehea_mc_list *ehea_mcl_entry;
2050 u64 hret;
2051
1e1675cc 2052 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
7a291083
JBT
2053 if (!ehea_mcl_entry) {
2054 ehea_error("no mem for mcl_entry");
2055 return;
2056 }
2057
2058 INIT_LIST_HEAD(&ehea_mcl_entry->list);
2059
2060 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
2061
2062 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
2063 H_REG_BCMC);
2064 if (!hret)
2065 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2066 else {
2067 ehea_error("failed registering mcast MAC");
2068 kfree(ehea_mcl_entry);
2069 }
2070}
2071
2072static void ehea_set_multicast_list(struct net_device *dev)
2073{
2074 struct ehea_port *port = netdev_priv(dev);
22bedad3 2075 struct netdev_hw_addr *ha;
48e2f183 2076 int ret;
7a291083
JBT
2077
2078 if (dev->flags & IFF_PROMISC) {
2079 ehea_promiscuous(dev, 1);
2080 return;
2081 }
2082 ehea_promiscuous(dev, 0);
2083
2084 if (dev->flags & IFF_ALLMULTI) {
2085 ehea_allmulti(dev, 1);
21eee2dd 2086 goto out;
7a291083
JBT
2087 }
2088 ehea_allmulti(dev, 0);
2089
4cd24eaf 2090 if (!netdev_mc_empty(dev)) {
7a291083
JBT
2091 ret = ehea_drop_multicast_list(dev);
2092 if (ret) {
2093 /* Dropping the current multicast list failed.
2094 * Enabling ALL_MULTI is the best we can do.
2095 */
2096 ehea_allmulti(dev, 1);
2097 }
2098
4cd24eaf 2099 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
a1c5a893 2100 ehea_info("Mcast registration limit reached (0x%llx). "
7a291083
JBT
2101 "Use ALLMULTI!",
2102 port->adapter->max_mc_mac);
2103 goto out;
2104 }
2105
22bedad3
JP
2106 netdev_for_each_mc_addr(ha, dev)
2107 ehea_add_multicast_entry(port, ha->addr);
508d2b5d 2108
7a291083
JBT
2109 }
2110out:
21eee2dd 2111 ehea_update_bcmc_registrations();
7a291083
JBT
2112}
2113
2114static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2115{
2116 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2117 return -EINVAL;
2118 dev->mtu = new_mtu;
2119 return 0;
2120}
2121
2122static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2123 struct ehea_swqe *swqe, u32 lkey)
2124{
2125 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5 2126 const struct iphdr *iph = ip_hdr(skb);
d1d25aab 2127
7a291083
JBT
2128 /* IPv4 */
2129 swqe->tx_control |= EHEA_SWQE_CRC
2130 | EHEA_SWQE_IP_CHECKSUM
2131 | EHEA_SWQE_TCP_CHECKSUM
2132 | EHEA_SWQE_IMM_DATA_PRESENT
2133 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2134
2135 write_ip_start_end(swqe, skb);
2136
eddc9ec5 2137 if (iph->protocol == IPPROTO_UDP) {
8e95a202
JP
2138 if ((iph->frag_off & IP_MF) ||
2139 (iph->frag_off & IP_OFFSET))
7a291083
JBT
2140 /* IP fragment, so don't change cs */
2141 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2142 else
2143 write_udp_offset_end(swqe, skb);
eddc9ec5 2144 } else if (iph->protocol == IPPROTO_TCP) {
7a291083
JBT
2145 write_tcp_offset_end(swqe, skb);
2146 }
2147
2148 /* icmp (big data) and ip segmentation packets (all other ip
2149 packets) do not require any special handling */
2150
2151 } else {
2152 /* Other Ethernet Protocol */
2153 swqe->tx_control |= EHEA_SWQE_CRC
2154 | EHEA_SWQE_IMM_DATA_PRESENT
2155 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2156 }
2157
2158 write_swqe2_data(skb, dev, swqe, lkey);
2159}
2160
2161static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2162 struct ehea_swqe *swqe)
2163{
2164 int nfrags = skb_shinfo(skb)->nr_frags;
2165 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2166 skb_frag_t *frag;
2167 int i;
2168
2169 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5 2170 const struct iphdr *iph = ip_hdr(skb);
d1d25aab 2171
7a291083
JBT
2172 /* IPv4 */
2173 write_ip_start_end(swqe, skb);
2174
eddc9ec5 2175 if (iph->protocol == IPPROTO_TCP) {
7a291083
JBT
2176 swqe->tx_control |= EHEA_SWQE_CRC
2177 | EHEA_SWQE_IP_CHECKSUM
2178 | EHEA_SWQE_TCP_CHECKSUM
2179 | EHEA_SWQE_IMM_DATA_PRESENT;
2180
2181 write_tcp_offset_end(swqe, skb);
2182
eddc9ec5 2183 } else if (iph->protocol == IPPROTO_UDP) {
8e95a202
JP
2184 if ((iph->frag_off & IP_MF) ||
2185 (iph->frag_off & IP_OFFSET))
7a291083
JBT
2186 /* IP fragment, so don't change cs */
2187 swqe->tx_control |= EHEA_SWQE_CRC
2188 | EHEA_SWQE_IMM_DATA_PRESENT;
2189 else {
2190 swqe->tx_control |= EHEA_SWQE_CRC
2191 | EHEA_SWQE_IP_CHECKSUM
2192 | EHEA_SWQE_TCP_CHECKSUM
2193 | EHEA_SWQE_IMM_DATA_PRESENT;
2194
2195 write_udp_offset_end(swqe, skb);
2196 }
2197 } else {
2198 /* icmp (big data) and
2199 ip segmentation packets (all other ip packets) */
2200 swqe->tx_control |= EHEA_SWQE_CRC
2201 | EHEA_SWQE_IP_CHECKSUM
2202 | EHEA_SWQE_IMM_DATA_PRESENT;
2203 }
2204 } else {
2205 /* Other Ethernet Protocol */
2206 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2207 }
2208 /* copy (immediate) data */
2209 if (nfrags == 0) {
2210 /* data is in a single piece */
d626f62b 2211 skb_copy_from_linear_data(skb, imm_data, skb->len);
7a291083
JBT
2212 } else {
2213 /* first copy data from the skb->data buffer ... */
d626f62b 2214 skb_copy_from_linear_data(skb, imm_data,
e743d313
ED
2215 skb_headlen(skb));
2216 imm_data += skb_headlen(skb);
7a291083
JBT
2217
2218 /* ... then copy data from the fragments */
2219 for (i = 0; i < nfrags; i++) {
2220 frag = &skb_shinfo(skb)->frags[i];
2221 memcpy(imm_data,
2222 page_address(frag->page) + frag->page_offset,
2223 frag->size);
2224 imm_data += frag->size;
2225 }
2226 }
2227 swqe->immediate_data_length = skb->len;
2228 dev_kfree_skb(skb);
2229}
2230
18604c54
JBT
2231static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2232{
2233 struct tcphdr *tcp;
2234 u32 tmp;
2235
2236 if ((skb->protocol == htons(ETH_P_IP)) &&
88ca2d07 2237 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
508d2b5d
DM
2238 tcp = (struct tcphdr *)(skb_network_header(skb) +
2239 (ip_hdr(skb)->ihl * 4));
18604c54 2240 tmp = (tcp->source + (tcp->dest << 16)) % 31;
88ca2d07 2241 tmp += ip_hdr(skb)->daddr % 31;
18604c54 2242 return tmp % num_qps;
508d2b5d 2243 } else
18604c54
JBT
2244 return 0;
2245}
2246
7a291083
JBT
2247static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2248{
2249 struct ehea_port *port = netdev_priv(dev);
2250 struct ehea_swqe *swqe;
2251 unsigned long flags;
2252 u32 lkey;
2253 int swqe_index;
18604c54
JBT
2254 struct ehea_port_res *pr;
2255
2256 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2257
18604c54
JBT
2258 if (!spin_trylock(&pr->xmit_lock))
2259 return NETDEV_TX_BUSY;
2260
2261 if (pr->queue_stopped) {
2262 spin_unlock(&pr->xmit_lock);
2263 return NETDEV_TX_BUSY;
2264 }
7a291083
JBT
2265
2266 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2267 memset(swqe, 0, SWQE_HEADER_SIZE);
2268 atomic_dec(&pr->swqe_avail);
2269
e5ccd961
ED
2270 if (vlan_tx_tag_present(skb)) {
2271 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2272 swqe->vlan_tag = vlan_tx_tag_get(skb);
2273 }
2274
ce45b873
BL
2275 pr->tx_packets++;
2276 pr->tx_bytes += skb->len;
2277
7a291083
JBT
2278 if (skb->len <= SWQE3_MAX_IMM) {
2279 u32 sig_iv = port->sig_comp_iv;
2280 u32 swqe_num = pr->swqe_id_counter;
2281 ehea_xmit3(skb, dev, swqe);
2282 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2283 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2284 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2285 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2286 sig_iv);
2287 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2288 pr->swqe_ll_count = 0;
2289 } else
2290 pr->swqe_ll_count += 1;
2291 } else {
2292 swqe->wr_id =
2293 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2294 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
acbddb59 2295 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
7a291083
JBT
2296 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2297 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2298
2299 pr->sq_skba.index++;
2300 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2301
2302 lkey = pr->send_mr.lkey;
2303 ehea_xmit2(skb, dev, swqe, lkey);
acbddb59 2304 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
7a291083
JBT
2305 }
2306 pr->swqe_id_counter += 1;
2307
7a291083
JBT
2308 if (netif_msg_tx_queued(port)) {
2309 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
bff0a55f 2310 ehea_dump(swqe, 512, "swqe");
7a291083
JBT
2311 }
2312
2c69448b
JBT
2313 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2314 netif_stop_queue(dev);
2315 swqe->tx_control |= EHEA_SWQE_PURGE;
2316 }
44c82152 2317
7a291083 2318 ehea_post_swqe(pr->qp, swqe);
7a291083
JBT
2319
2320 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2321 spin_lock_irqsave(&pr->netif_queue, flags);
2322 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
acbddb59 2323 pr->p_stats.queue_stopped++;
7a291083
JBT
2324 netif_stop_queue(dev);
2325 pr->queue_stopped = 1;
2326 }
2327 spin_unlock_irqrestore(&pr->netif_queue, flags);
2328 }
1ae5dc34 2329 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
7a291083 2330 spin_unlock(&pr->xmit_lock);
2c69448b 2331
7a291083
JBT
2332 return NETDEV_TX_OK;
2333}
2334
2335static void ehea_vlan_rx_register(struct net_device *dev,
2336 struct vlan_group *grp)
2337{
2338 struct ehea_port *port = netdev_priv(dev);
2339 struct ehea_adapter *adapter = port->adapter;
2340 struct hcp_ehea_port_cb1 *cb1;
2341 u64 hret;
2342
2343 port->vgrp = grp;
2344
3faf2693 2345 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2346 if (!cb1) {
2347 ehea_error("no mem for cb1");
2348 goto out;
2349 }
2350
7a291083
JBT
2351 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2352 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2353 if (hret != H_SUCCESS)
2354 ehea_error("modify_ehea_port failed");
2355
3faf2693 2356 free_page((unsigned long)cb1);
7a291083
JBT
2357out:
2358 return;
2359}
2360
2361static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2362{
2363 struct ehea_port *port = netdev_priv(dev);
2364 struct ehea_adapter *adapter = port->adapter;
2365 struct hcp_ehea_port_cb1 *cb1;
2366 int index;
2367 u64 hret;
2368
3faf2693 2369 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2370 if (!cb1) {
2371 ehea_error("no mem for cb1");
2372 goto out;
2373 }
2374
2375 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2376 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2377 if (hret != H_SUCCESS) {
2378 ehea_error("query_ehea_port failed");
2379 goto out;
2380 }
2381
2382 index = (vid / 64);
dec590c1 2383 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
7a291083
JBT
2384
2385 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2386 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2387 if (hret != H_SUCCESS)
2388 ehea_error("modify_ehea_port failed");
2389out:
3faf2693 2390 free_page((unsigned long)cb1);
7a291083
JBT
2391 return;
2392}
2393
2394static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2395{
2396 struct ehea_port *port = netdev_priv(dev);
2397 struct ehea_adapter *adapter = port->adapter;
2398 struct hcp_ehea_port_cb1 *cb1;
2399 int index;
2400 u64 hret;
2401
5c15bdec 2402 vlan_group_set_device(port->vgrp, vid, NULL);
7a291083 2403
3faf2693 2404 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2405 if (!cb1) {
2406 ehea_error("no mem for cb1");
2407 goto out;
2408 }
2409
2410 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2411 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2412 if (hret != H_SUCCESS) {
2413 ehea_error("query_ehea_port failed");
2414 goto out;
2415 }
2416
2417 index = (vid / 64);
dec590c1 2418 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
7a291083
JBT
2419
2420 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2421 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2422 if (hret != H_SUCCESS)
2423 ehea_error("modify_ehea_port failed");
2424out:
3faf2693 2425 free_page((unsigned long)cb1);
7a291083
JBT
2426}
2427
2428int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2429{
2430 int ret = -EIO;
2431 u64 hret;
2432 u16 dummy16 = 0;
2433 u64 dummy64 = 0;
508d2b5d 2434 struct hcp_modify_qp_cb0 *cb0;
7a291083 2435
3faf2693 2436 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2437 if (!cb0) {
2438 ret = -ENOMEM;
2439 goto out;
2440 }
2441
2442 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2443 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2444 if (hret != H_SUCCESS) {
2445 ehea_error("query_ehea_qp failed (1)");
2446 goto out;
2447 }
2448
2449 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2450 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2451 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2452 &dummy64, &dummy64, &dummy16, &dummy16);
2453 if (hret != H_SUCCESS) {
2454 ehea_error("modify_ehea_qp failed (1)");
2455 goto out;
2456 }
2457
2458 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2459 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2460 if (hret != H_SUCCESS) {
2461 ehea_error("query_ehea_qp failed (2)");
2462 goto out;
2463 }
2464
2465 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2466 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2467 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2468 &dummy64, &dummy64, &dummy16, &dummy16);
2469 if (hret != H_SUCCESS) {
2470 ehea_error("modify_ehea_qp failed (2)");
2471 goto out;
2472 }
2473
2474 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2475 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2476 if (hret != H_SUCCESS) {
2477 ehea_error("query_ehea_qp failed (3)");
2478 goto out;
2479 }
2480
2481 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2482 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2483 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2484 &dummy64, &dummy64, &dummy16, &dummy16);
2485 if (hret != H_SUCCESS) {
2486 ehea_error("modify_ehea_qp failed (3)");
2487 goto out;
2488 }
2489
2490 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2491 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2492 if (hret != H_SUCCESS) {
2493 ehea_error("query_ehea_qp failed (4)");
2494 goto out;
2495 }
2496
2497 ret = 0;
2498out:
3faf2693 2499 free_page((unsigned long)cb0);
7a291083
JBT
2500 return ret;
2501}
2502
2503static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2504 int add_tx_qps)
2505{
2506 int ret, i;
2507 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2508 enum ehea_eq_type eq_type = EHEA_EQ;
2509
2510 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2511 EHEA_MAX_ENTRIES_EQ, 1);
2512 if (!port->qp_eq) {
2513 ret = -EINVAL;
2514 ehea_error("ehea_create_eq failed (qp_eq)");
2515 goto out_kill_eq;
2516 }
2517
2518 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
18604c54 2519 pr_cfg.max_entries_scq = sq_entries * 2;
7a291083
JBT
2520 pr_cfg.max_entries_sq = sq_entries;
2521 pr_cfg.max_entries_rq1 = rq1_entries;
2522 pr_cfg.max_entries_rq2 = rq2_entries;
2523 pr_cfg.max_entries_rq3 = rq3_entries;
2524
2525 pr_cfg_small_rx.max_entries_rcq = 1;
2526 pr_cfg_small_rx.max_entries_scq = sq_entries;
2527 pr_cfg_small_rx.max_entries_sq = sq_entries;
2528 pr_cfg_small_rx.max_entries_rq1 = 1;
2529 pr_cfg_small_rx.max_entries_rq2 = 1;
2530 pr_cfg_small_rx.max_entries_rq3 = 1;
2531
2532 for (i = 0; i < def_qps; i++) {
2533 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2534 if (ret)
2535 goto out_clean_pr;
2536 }
2537 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2538 ret = ehea_init_port_res(port, &port->port_res[i],
2539 &pr_cfg_small_rx, i);
2540 if (ret)
2541 goto out_clean_pr;
2542 }
2543
2544 return 0;
2545
2546out_clean_pr:
2547 while (--i >= 0)
2548 ehea_clean_portres(port, &port->port_res[i]);
2549
2550out_kill_eq:
2551 ehea_destroy_eq(port->qp_eq);
2552 return ret;
2553}
2554
2555static int ehea_clean_all_portres(struct ehea_port *port)
2556{
2557 int ret = 0;
2558 int i;
2559
508d2b5d 2560 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
7a291083
JBT
2561 ret |= ehea_clean_portres(port, &port->port_res[i]);
2562
2563 ret |= ehea_destroy_eq(port->qp_eq);
2564
2565 return ret;
2566}
2567
35cf2e2e 2568static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
1211bb6d 2569{
35cf2e2e
TK
2570 if (adapter->active_ports)
2571 return;
1211bb6d
TK
2572
2573 ehea_rem_mr(&adapter->mr);
2574}
2575
35cf2e2e 2576static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
1211bb6d 2577{
35cf2e2e
TK
2578 if (adapter->active_ports)
2579 return 0;
1211bb6d
TK
2580
2581 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2582}
2583
7a291083
JBT
2584static int ehea_up(struct net_device *dev)
2585{
2586 int ret, i;
2587 struct ehea_port *port = netdev_priv(dev);
7a291083
JBT
2588
2589 if (port->state == EHEA_PORT_UP)
2590 return 0;
2591
2592 ret = ehea_port_res_setup(port, port->num_def_qps,
2593 port->num_add_tx_qps);
2594 if (ret) {
2595 ehea_error("port_res_failed");
2596 goto out;
2597 }
2598
2599 /* Set default QP for this port */
2600 ret = ehea_configure_port(port);
2601 if (ret) {
2602 ehea_error("ehea_configure_port failed. ret:%d", ret);
2603 goto out_clean_pr;
2604 }
2605
7a291083
JBT
2606 ret = ehea_reg_interrupts(dev);
2607 if (ret) {
f9e29228
TK
2608 ehea_error("reg_interrupts failed. ret:%d", ret);
2609 goto out_clean_pr;
7a291083
JBT
2610 }
2611
508d2b5d 2612 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
7a291083
JBT
2613 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2614 if (ret) {
2615 ehea_error("activate_qp failed");
2616 goto out_free_irqs;
2617 }
2618 }
2619
508d2b5d 2620 for (i = 0; i < port->num_def_qps; i++) {
7a291083
JBT
2621 ret = ehea_fill_port_res(&port->port_res[i]);
2622 if (ret) {
2623 ehea_error("out_free_irqs");
2624 goto out_free_irqs;
2625 }
2626 }
2627
21eee2dd
TK
2628 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2629 if (ret) {
2630 ret = -EIO;
2631 goto out_free_irqs;
2632 }
2633
7a291083 2634 port->state = EHEA_PORT_UP;
21eee2dd
TK
2635
2636 ret = 0;
7a291083
JBT
2637 goto out;
2638
2639out_free_irqs:
2640 ehea_free_interrupts(dev);
2641
7a291083
JBT
2642out_clean_pr:
2643 ehea_clean_all_portres(port);
2644out:
44c82152
TK
2645 if (ret)
2646 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2647
21eee2dd 2648 ehea_update_bcmc_registrations();
21eee2dd 2649 ehea_update_firmware_handles();
21eee2dd 2650
7a291083
JBT
2651 return ret;
2652}
2653
bea3348e
SH
2654static void port_napi_disable(struct ehea_port *port)
2655{
2656 int i;
2657
0173b793 2658 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
bea3348e
SH
2659 napi_disable(&port->port_res[i].napi);
2660}
2661
2662static void port_napi_enable(struct ehea_port *port)
2663{
2664 int i;
2665
0173b793 2666 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
bea3348e
SH
2667 napi_enable(&port->port_res[i].napi);
2668}
2669
7a291083
JBT
2670static int ehea_open(struct net_device *dev)
2671{
2672 int ret;
2673 struct ehea_port *port = netdev_priv(dev);
2674
a5af6ad3 2675 mutex_lock(&port->port_lock);
7a291083
JBT
2676
2677 if (netif_msg_ifup(port))
2678 ehea_info("enabling port %s", dev->name);
2679
2680 ret = ehea_up(dev);
bea3348e
SH
2681 if (!ret) {
2682 port_napi_enable(port);
7a291083 2683 netif_start_queue(dev);
bea3348e 2684 }
7a291083 2685
5b27d427 2686 init_waitqueue_head(&port->swqe_avail_wq);
a8bb69f7 2687 init_waitqueue_head(&port->restart_wq);
5b27d427 2688
a5af6ad3 2689 mutex_unlock(&port->port_lock);
7a291083
JBT
2690
2691 return ret;
2692}
2693
2694static int ehea_down(struct net_device *dev)
2695{
bea3348e 2696 int ret;
7a291083
JBT
2697 struct ehea_port *port = netdev_priv(dev);
2698
2699 if (port->state == EHEA_PORT_DOWN)
2700 return 0;
2701
2702 ehea_drop_multicast_list(dev);
21eee2dd
TK
2703 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2704
7a291083
JBT
2705 ehea_free_interrupts(dev);
2706
7a291083 2707 port->state = EHEA_PORT_DOWN;
44c82152 2708
21eee2dd 2709 ehea_update_bcmc_registrations();
21eee2dd 2710
44c82152
TK
2711 ret = ehea_clean_all_portres(port);
2712 if (ret)
2713 ehea_info("Failed freeing resources for %s. ret=%i",
2714 dev->name, ret);
2715
21eee2dd 2716 ehea_update_firmware_handles();
21eee2dd 2717
7a291083
JBT
2718 return ret;
2719}
2720
2721static int ehea_stop(struct net_device *dev)
2722{
2723 int ret;
2724 struct ehea_port *port = netdev_priv(dev);
2725
2726 if (netif_msg_ifdown(port))
2727 ehea_info("disabling port %s", dev->name);
2728
2f69ae01 2729 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
4bb073c0 2730 cancel_work_sync(&port->reset_task);
a5af6ad3 2731 mutex_lock(&port->port_lock);
7a291083 2732 netif_stop_queue(dev);
0173b793 2733 port_napi_disable(port);
7a291083 2734 ret = ehea_down(dev);
a5af6ad3 2735 mutex_unlock(&port->port_lock);
2f69ae01 2736 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
7a291083
JBT
2737 return ret;
2738}
2739
22559c5d 2740static void ehea_purge_sq(struct ehea_qp *orig_qp)
2c69448b
JBT
2741{
2742 struct ehea_qp qp = *orig_qp;
2743 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2744 struct ehea_swqe *swqe;
2745 int wqe_index;
2746 int i;
2747
2748 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2749 swqe = ehea_get_swqe(&qp, &wqe_index);
2750 swqe->tx_control |= EHEA_SWQE_PURGE;
2751 }
2752}
2753
22559c5d 2754static void ehea_flush_sq(struct ehea_port *port)
44fb3126
TK
2755{
2756 int i;
2757
2758 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2759 struct ehea_port_res *pr = &port->port_res[i];
2760 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
5b27d427
BL
2761 int ret;
2762
2763 ret = wait_event_timeout(port->swqe_avail_wq,
2764 atomic_read(&pr->swqe_avail) >= swqe_max,
2765 msecs_to_jiffies(100));
2766
2767 if (!ret) {
2768 ehea_error("WARNING: sq not flushed completely");
2769 break;
44fb3126
TK
2770 }
2771 }
2772}
2773
2c69448b
JBT
2774int ehea_stop_qps(struct net_device *dev)
2775{
2776 struct ehea_port *port = netdev_priv(dev);
2777 struct ehea_adapter *adapter = port->adapter;
508d2b5d 2778 struct hcp_modify_qp_cb0 *cb0;
2c69448b
JBT
2779 int ret = -EIO;
2780 int dret;
2781 int i;
2782 u64 hret;
2783 u64 dummy64 = 0;
2784 u16 dummy16 = 0;
2785
3faf2693 2786 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2c69448b
JBT
2787 if (!cb0) {
2788 ret = -ENOMEM;
2789 goto out;
2790 }
2791
2792 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2793 struct ehea_port_res *pr = &port->port_res[i];
2794 struct ehea_qp *qp = pr->qp;
2795
2796 /* Purge send queue */
2797 ehea_purge_sq(qp);
2798
2799 /* Disable queue pair */
2800 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2801 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2802 cb0);
2803 if (hret != H_SUCCESS) {
2804 ehea_error("query_ehea_qp failed (1)");
2805 goto out;
2806 }
2807
2808 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2809 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2810
2811 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2812 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2813 1), cb0, &dummy64,
2814 &dummy64, &dummy16, &dummy16);
2815 if (hret != H_SUCCESS) {
2816 ehea_error("modify_ehea_qp failed (1)");
2817 goto out;
2818 }
2819
2820 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2821 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2822 cb0);
2823 if (hret != H_SUCCESS) {
2824 ehea_error("query_ehea_qp failed (2)");
2825 goto out;
2826 }
2827
2828 /* deregister shared memory regions */
2829 dret = ehea_rem_smrs(pr);
2830 if (dret) {
2831 ehea_error("unreg shared memory region failed");
2832 goto out;
2833 }
2834 }
2835
2836 ret = 0;
2837out:
3faf2693 2838 free_page((unsigned long)cb0);
2c69448b
JBT
2839
2840 return ret;
2841}
2842
508d2b5d 2843void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2c69448b
JBT
2844{
2845 struct ehea_qp qp = *orig_qp;
2846 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2847 struct ehea_rwqe *rwqe;
2848 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2849 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2850 struct sk_buff *skb;
2851 u32 lkey = pr->recv_mr.lkey;
2852
2853
2854 int i;
2855 int index;
2856
2857 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2858 rwqe = ehea_get_next_rwqe(&qp, 2);
2859 rwqe->sg_list[0].l_key = lkey;
2860 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2861 skb = skba_rq2[index];
2862 if (skb)
2863 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2864 }
2865
2866 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2867 rwqe = ehea_get_next_rwqe(&qp, 3);
2868 rwqe->sg_list[0].l_key = lkey;
2869 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2870 skb = skba_rq3[index];
2871 if (skb)
2872 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2873 }
2874}
2875
2876int ehea_restart_qps(struct net_device *dev)
2877{
2878 struct ehea_port *port = netdev_priv(dev);
2879 struct ehea_adapter *adapter = port->adapter;
2880 int ret = 0;
2881 int i;
2882
508d2b5d 2883 struct hcp_modify_qp_cb0 *cb0;
2c69448b
JBT
2884 u64 hret;
2885 u64 dummy64 = 0;
2886 u16 dummy16 = 0;
2887
3faf2693 2888 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2c69448b
JBT
2889 if (!cb0) {
2890 ret = -ENOMEM;
2891 goto out;
2892 }
2893
2894 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2895 struct ehea_port_res *pr = &port->port_res[i];
2896 struct ehea_qp *qp = pr->qp;
2897
2898 ret = ehea_gen_smrs(pr);
2899 if (ret) {
2900 ehea_error("creation of shared memory regions failed");
2901 goto out;
2902 }
2903
2904 ehea_update_rqs(qp, pr);
2905
2906 /* Enable queue pair */
2907 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2908 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2909 cb0);
2910 if (hret != H_SUCCESS) {
2911 ehea_error("query_ehea_qp failed (1)");
2912 goto out;
2913 }
2914
2915 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2916 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2917
2918 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2919 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2920 1), cb0, &dummy64,
2921 &dummy64, &dummy16, &dummy16);
2922 if (hret != H_SUCCESS) {
2923 ehea_error("modify_ehea_qp failed (1)");
2924 goto out;
2925 }
2926
2927 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2928 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2929 cb0);
2930 if (hret != H_SUCCESS) {
2931 ehea_error("query_ehea_qp failed (2)");
2932 goto out;
2933 }
2934
2935 /* refill entire queue */
2936 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2937 ehea_refill_rq2(pr, 0);
2938 ehea_refill_rq3(pr, 0);
2939 }
2940out:
3faf2693 2941 free_page((unsigned long)cb0);
2c69448b
JBT
2942
2943 return ret;
2944}
2945
c4028958 2946static void ehea_reset_port(struct work_struct *work)
7a291083
JBT
2947{
2948 int ret;
c4028958
DH
2949 struct ehea_port *port =
2950 container_of(work, struct ehea_port, reset_task);
2951 struct net_device *dev = port->netdev;
7a291083 2952
099473c1 2953 mutex_lock(&dlpar_mem_lock);
7a291083 2954 port->resets++;
a5af6ad3 2955 mutex_lock(&port->port_lock);
7a291083 2956 netif_stop_queue(dev);
bea3348e
SH
2957
2958 port_napi_disable(port);
7a291083 2959
44c82152 2960 ehea_down(dev);
7a291083
JBT
2961
2962 ret = ehea_up(dev);
44c82152 2963 if (ret)
7a291083 2964 goto out;
7a291083 2965
2c69448b
JBT
2966 ehea_set_multicast_list(dev);
2967
7a291083
JBT
2968 if (netif_msg_timer(port))
2969 ehea_info("Device %s resetted successfully", dev->name);
2970
bea3348e
SH
2971 port_napi_enable(port);
2972
7a291083
JBT
2973 netif_wake_queue(dev);
2974out:
a5af6ad3 2975 mutex_unlock(&port->port_lock);
099473c1 2976 mutex_unlock(&dlpar_mem_lock);
7a291083
JBT
2977}
2978
44c82152
TK
2979static void ehea_rereg_mrs(struct work_struct *work)
2980{
2981 int ret, i;
2982 struct ehea_adapter *adapter;
2983
d4f12daf 2984 ehea_info("LPAR memory changed - re-initializing driver");
44c82152
TK
2985
2986 list_for_each_entry(adapter, &adapter_list, list)
2987 if (adapter->active_ports) {
2988 /* Shutdown all ports */
2989 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2990 struct ehea_port *port = adapter->port[i];
a5af6ad3 2991 struct net_device *dev;
44c82152 2992
a5af6ad3
DW
2993 if (!port)
2994 continue;
44c82152 2995
a5af6ad3
DW
2996 dev = port->netdev;
2997
2998 if (dev->flags & IFF_UP) {
2999 mutex_lock(&port->port_lock);
3000 netif_stop_queue(dev);
df39e8ba 3001 ehea_flush_sq(port);
a5af6ad3
DW
3002 ret = ehea_stop_qps(dev);
3003 if (ret) {
3004 mutex_unlock(&port->port_lock);
3005 goto out;
44c82152 3006 }
a5af6ad3
DW
3007 port_napi_disable(port);
3008 mutex_unlock(&port->port_lock);
44c82152 3009 }
2928db4c 3010 reset_sq_restart_flag(port);
44c82152
TK
3011 }
3012
3013 /* Unregister old memory region */
3014 ret = ehea_rem_mr(&adapter->mr);
3015 if (ret) {
3016 ehea_error("unregister MR failed - driver"
3017 " inoperable!");
3018 goto out;
3019 }
3020 }
3021
44c82152
TK
3022 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3023
3024 list_for_each_entry(adapter, &adapter_list, list)
3025 if (adapter->active_ports) {
3026 /* Register new memory region */
3027 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
3028 if (ret) {
3029 ehea_error("register MR failed - driver"
3030 " inoperable!");
3031 goto out;
3032 }
3033
3034 /* Restart all ports */
3035 for (i = 0; i < EHEA_MAX_PORTS; i++) {
3036 struct ehea_port *port = adapter->port[i];
3037
3038 if (port) {
3039 struct net_device *dev = port->netdev;
3040
3041 if (dev->flags & IFF_UP) {
a5af6ad3 3042 mutex_lock(&port->port_lock);
2c69448b
JBT
3043 port_napi_enable(port);
3044 ret = ehea_restart_qps(dev);
2928db4c 3045 check_sqs(port);
2c69448b 3046 if (!ret)
44c82152 3047 netif_wake_queue(dev);
a5af6ad3 3048 mutex_unlock(&port->port_lock);
44c82152
TK
3049 }
3050 }
3051 }
3052 }
68905eb4 3053 ehea_info("re-initializing driver complete");
44c82152
TK
3054out:
3055 return;
3056}
3057
7a291083
JBT
3058static void ehea_tx_watchdog(struct net_device *dev)
3059{
3060 struct ehea_port *port = netdev_priv(dev);
3061
2c69448b
JBT
3062 if (netif_carrier_ok(dev) &&
3063 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2f69ae01 3064 ehea_schedule_port_reset(port);
7a291083
JBT
3065}
3066
3067int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
3068{
3069 struct hcp_query_ehea *cb;
3070 u64 hret;
3071 int ret;
3072
3faf2693 3073 cb = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
3074 if (!cb) {
3075 ret = -ENOMEM;
3076 goto out;
3077 }
3078
3079 hret = ehea_h_query_ehea(adapter->handle, cb);
3080
3081 if (hret != H_SUCCESS) {
3082 ret = -EIO;
3083 goto out_herr;
3084 }
3085
7a291083
JBT
3086 adapter->max_mc_mac = cb->max_mc_mac - 1;
3087 ret = 0;
3088
3089out_herr:
3faf2693 3090 free_page((unsigned long)cb);
7a291083
JBT
3091out:
3092 return ret;
3093}
3094
1acf2318 3095int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
7a291083 3096{
7a291083 3097 struct hcp_ehea_port_cb4 *cb4;
1acf2318
JBT
3098 u64 hret;
3099 int ret = 0;
7a291083 3100
1acf2318 3101 *jumbo = 0;
7a291083 3102
1acf2318 3103 /* (Try to) enable *jumbo frames */
3faf2693 3104 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
3105 if (!cb4) {
3106 ehea_error("no mem for cb4");
1acf2318
JBT
3107 ret = -ENOMEM;
3108 goto out;
7a291083 3109 } else {
1acf2318 3110 hret = ehea_h_query_ehea_port(port->adapter->handle,
9c750b7d
TK
3111 port->logical_port_id,
3112 H_PORT_CB4,
3113 H_PORT_CB4_JUMBO, cb4);
9c750b7d
TK
3114 if (hret == H_SUCCESS) {
3115 if (cb4->jumbo_frame)
1acf2318 3116 *jumbo = 1;
9c750b7d
TK
3117 else {
3118 cb4->jumbo_frame = 1;
1acf2318
JBT
3119 hret = ehea_h_modify_ehea_port(port->adapter->
3120 handle,
9c750b7d 3121 port->
1acf2318 3122 logical_port_id,
9c750b7d
TK
3123 H_PORT_CB4,
3124 H_PORT_CB4_JUMBO,
3125 cb4);
3126 if (hret == H_SUCCESS)
1acf2318 3127 *jumbo = 1;
9c750b7d 3128 }
1acf2318
JBT
3129 } else
3130 ret = -EINVAL;
3131
3faf2693 3132 free_page((unsigned long)cb4);
7a291083 3133 }
1acf2318
JBT
3134out:
3135 return ret;
3136}
3137
3138static ssize_t ehea_show_port_id(struct device *dev,
3139 struct device_attribute *attr, char *buf)
3140{
3141 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
a8e34fda 3142 return sprintf(buf, "%d", port->logical_port_id);
1acf2318
JBT
3143}
3144
3145static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3146 NULL);
3147
3148static void __devinit logical_port_release(struct device *dev)
3149{
3150 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
61c7a080 3151 of_node_put(port->ofdev.dev.of_node);
1acf2318
JBT
3152}
3153
3154static struct device *ehea_register_port(struct ehea_port *port,
3155 struct device_node *dn)
3156{
3157 int ret;
3158
61c7a080 3159 port->ofdev.dev.of_node = of_node_get(dn);
6b08f3ae 3160 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
d1dea38d 3161 port->ofdev.dev.bus = &ibmebus_bus_type;
1acf2318 3162
db1d7bf7 3163 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
1acf2318
JBT
3164 port->ofdev.dev.release = logical_port_release;
3165
3166 ret = of_device_register(&port->ofdev);
3167 if (ret) {
3168 ehea_error("failed to register device. ret=%d", ret);
3169 goto out;
3170 }
3171
3172 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
d1d25aab 3173 if (ret) {
1acf2318
JBT
3174 ehea_error("failed to register attributes, ret=%d", ret);
3175 goto out_unreg_of_dev;
3176 }
e542aa6b 3177
1acf2318
JBT
3178 return &port->ofdev.dev;
3179
3180out_unreg_of_dev:
3181 of_device_unregister(&port->ofdev);
3182out:
3183 return NULL;
3184}
3185
3186static void ehea_unregister_port(struct ehea_port *port)
3187{
3188 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3189 of_device_unregister(&port->ofdev);
3190}
3191
086c1b2c
TK
3192static const struct net_device_ops ehea_netdev_ops = {
3193 .ndo_open = ehea_open,
3194 .ndo_stop = ehea_stop,
3195 .ndo_start_xmit = ehea_start_xmit,
3196#ifdef CONFIG_NET_POLL_CONTROLLER
3197 .ndo_poll_controller = ehea_netpoll,
3198#endif
3199 .ndo_get_stats = ehea_get_stats,
3200 .ndo_set_mac_address = ehea_set_mac_addr,
240c102d 3201 .ndo_validate_addr = eth_validate_addr,
086c1b2c
TK
3202 .ndo_set_multicast_list = ehea_set_multicast_list,
3203 .ndo_change_mtu = ehea_change_mtu,
3204 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3205 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
32e8f9a8
AB
3206 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3207 .ndo_tx_timeout = ehea_tx_watchdog,
086c1b2c
TK
3208};
3209
1acf2318
JBT
3210struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3211 u32 logical_port_id,
3212 struct device_node *dn)
3213{
3214 int ret;
3215 struct net_device *dev;
3216 struct ehea_port *port;
3217 struct device *port_dev;
3218 int jumbo;
3219
3220 /* allocate memory for the port structures */
3221 dev = alloc_etherdev(sizeof(struct ehea_port));
3222
3223 if (!dev) {
3224 ehea_error("no mem for net_device");
3225 ret = -ENOMEM;
3226 goto out_err;
3227 }
3228
3229 port = netdev_priv(dev);
3230
a5af6ad3 3231 mutex_init(&port->port_lock);
1acf2318
JBT
3232 port->state = EHEA_PORT_DOWN;
3233 port->sig_comp_iv = sq_entries / 10;
3234
3235 port->adapter = adapter;
3236 port->netdev = dev;
3237 port->logical_port_id = logical_port_id;
3238
3239 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3240
3241 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3242 if (!port->mc_list) {
3243 ret = -ENOMEM;
3244 goto out_free_ethdev;
3245 }
3246
3247 INIT_LIST_HEAD(&port->mc_list->list);
3248
3249 ret = ehea_sense_port_attr(port);
3250 if (ret)
3251 goto out_free_mc_list;
3252
3253 port_dev = ehea_register_port(port, dn);
3254 if (!port_dev)
3255 goto out_free_mc_list;
3256
3257 SET_NETDEV_DEV(dev, port_dev);
7a291083
JBT
3258
3259 /* initialize net_device structure */
7a291083
JBT
3260 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3261
086c1b2c
TK
3262 dev->netdev_ops = &ehea_netdev_ops;
3263 ehea_set_ethtool_ops(dev);
3264
7a291083 3265 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
dc01c447 3266 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
7a291083
JBT
3267 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3268 | NETIF_F_LLTX;
7a291083
JBT
3269 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3270
c4028958 3271 INIT_WORK(&port->reset_task, ehea_reset_port);
7a291083
JBT
3272
3273 ret = register_netdev(dev);
3274 if (ret) {
3275 ehea_error("register_netdev failed. ret=%d", ret);
21eee2dd 3276 goto out_unreg_port;
7a291083
JBT
3277 }
3278
d4dc4ec9
JBT
3279 port->lro_max_aggr = lro_max_aggr;
3280
1acf2318 3281 ret = ehea_get_jumboframe_status(port, &jumbo);
e542aa6b 3282 if (ret)
1acf2318
JBT
3283 ehea_error("failed determining jumbo frame status for %s",
3284 port->netdev->name);
3285
9c750b7d
TK
3286 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3287 jumbo == 1 ? "en" : "dis");
3288
44c82152
TK
3289 adapter->active_ports++;
3290
1acf2318 3291 return port;
7a291083 3292
1acf2318
JBT
3293out_unreg_port:
3294 ehea_unregister_port(port);
3295
3296out_free_mc_list:
7a291083 3297 kfree(port->mc_list);
1acf2318
JBT
3298
3299out_free_ethdev:
3300 free_netdev(dev);
3301
3302out_err:
3303 ehea_error("setting up logical port with id=%d failed, ret=%d",
3304 logical_port_id, ret);
3305 return NULL;
3306}
3307
3308static void ehea_shutdown_single_port(struct ehea_port *port)
3309{
7fb1c2ac 3310 struct ehea_adapter *adapter = port->adapter;
1acf2318
JBT
3311 unregister_netdev(port->netdev);
3312 ehea_unregister_port(port);
3313 kfree(port->mc_list);
3314 free_netdev(port->netdev);
7fb1c2ac 3315 adapter->active_ports--;
7a291083
JBT
3316}
3317
3318static int ehea_setup_ports(struct ehea_adapter *adapter)
3319{
1acf2318
JBT
3320 struct device_node *lhea_dn;
3321 struct device_node *eth_dn = NULL;
d1d25aab 3322
9f9a3b8a 3323 const u32 *dn_log_port_id;
1acf2318
JBT
3324 int i = 0;
3325
61c7a080 3326 lhea_dn = adapter->ofdev->dev.of_node;
1eef4e04 3327 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
e542aa6b 3328
40cd3a45 3329 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
d1d25aab 3330 NULL);
1acf2318
JBT
3331 if (!dn_log_port_id) {
3332 ehea_error("bad device node: eth_dn name=%s",
3333 eth_dn->full_name);
3334 continue;
3335 }
7a291083 3336
1211bb6d
TK
3337 if (ehea_add_adapter_mr(adapter)) {
3338 ehea_error("creating MR failed");
3339 of_node_put(eth_dn);
3340 return -EIO;
3341 }
3342
1acf2318
JBT
3343 adapter->port[i] = ehea_setup_single_port(adapter,
3344 *dn_log_port_id,
3345 eth_dn);
7a291083 3346 if (adapter->port[i])
1acf2318 3347 ehea_info("%s -> logical port id #%d",
e542aa6b 3348 adapter->port[i]->netdev->name,
1acf2318 3349 *dn_log_port_id);
1211bb6d
TK
3350 else
3351 ehea_remove_adapter_mr(adapter);
3352
1acf2318 3353 i++;
ee289b64 3354 }
1211bb6d 3355 return 0;
1acf2318
JBT
3356}
3357
e542aa6b
JBT
3358static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3359 u32 logical_port_id)
1acf2318
JBT
3360{
3361 struct device_node *lhea_dn;
3362 struct device_node *eth_dn = NULL;
9f9a3b8a 3363 const u32 *dn_log_port_id;
1acf2318 3364
61c7a080 3365 lhea_dn = adapter->ofdev->dev.of_node;
1eef4e04 3366 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
e542aa6b 3367
40cd3a45 3368 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
d1d25aab 3369 NULL);
1acf2318
JBT
3370 if (dn_log_port_id)
3371 if (*dn_log_port_id == logical_port_id)
3372 return eth_dn;
ee289b64 3373 }
1acf2318
JBT
3374
3375 return NULL;
3376}
3377
3378static ssize_t ehea_probe_port(struct device *dev,
3379 struct device_attribute *attr,
3380 const char *buf, size_t count)
3381{
c7ae011d 3382 struct ehea_adapter *adapter = dev_get_drvdata(dev);
1acf2318
JBT
3383 struct ehea_port *port;
3384 struct device_node *eth_dn = NULL;
3385 int i;
3386
3387 u32 logical_port_id;
3388
a8e34fda 3389 sscanf(buf, "%d", &logical_port_id);
1acf2318
JBT
3390
3391 port = ehea_get_port(adapter, logical_port_id);
3392
3393 if (port) {
3394 ehea_info("adding port with logical port id=%d failed. port "
3395 "already configured as %s.", logical_port_id,
3396 port->netdev->name);
3397 return -EINVAL;
7a291083 3398 }
e542aa6b 3399
1acf2318 3400 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
7a291083 3401
1acf2318
JBT
3402 if (!eth_dn) {
3403 ehea_info("no logical port with id %d found", logical_port_id);
3404 return -EINVAL;
3405 }
e542aa6b 3406
1211bb6d
TK
3407 if (ehea_add_adapter_mr(adapter)) {
3408 ehea_error("creating MR failed");
3409 return -EIO;
3410 }
3411
1acf2318 3412 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
7a291083 3413
1eef4e04
JBT
3414 of_node_put(eth_dn);
3415
1acf2318 3416 if (port) {
508d2b5d 3417 for (i = 0; i < EHEA_MAX_PORTS; i++)
1acf2318
JBT
3418 if (!adapter->port[i]) {
3419 adapter->port[i] = port;
3420 break;
3421 }
7a291083 3422
1acf2318
JBT
3423 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3424 logical_port_id);
1211bb6d
TK
3425 } else {
3426 ehea_remove_adapter_mr(adapter);
e542aa6b 3427 return -EIO;
1211bb6d 3428 }
7a291083 3429
1acf2318
JBT
3430 return (ssize_t) count;
3431}
3432
3433static ssize_t ehea_remove_port(struct device *dev,
3434 struct device_attribute *attr,
3435 const char *buf, size_t count)
3436{
c7ae011d 3437 struct ehea_adapter *adapter = dev_get_drvdata(dev);
1acf2318
JBT
3438 struct ehea_port *port;
3439 int i;
3440 u32 logical_port_id;
3441
a8e34fda 3442 sscanf(buf, "%d", &logical_port_id);
1acf2318
JBT
3443
3444 port = ehea_get_port(adapter, logical_port_id);
3445
3446 if (port) {
3447 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3448 logical_port_id);
3449
3450 ehea_shutdown_single_port(port);
3451
508d2b5d 3452 for (i = 0; i < EHEA_MAX_PORTS; i++)
1acf2318
JBT
3453 if (adapter->port[i] == port) {
3454 adapter->port[i] = NULL;
3455 break;
3456 }
3457 } else {
3458 ehea_error("removing port with logical port id=%d failed. port "
3459 "not configured.", logical_port_id);
3460 return -EINVAL;
3461 }
3462
1211bb6d
TK
3463 ehea_remove_adapter_mr(adapter);
3464
1acf2318
JBT
3465 return (ssize_t) count;
3466}
3467
3468static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3469static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3470
2dc11581 3471int ehea_create_device_sysfs(struct platform_device *dev)
1acf2318 3472{
6b08f3ae 3473 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
1acf2318
JBT
3474 if (ret)
3475 goto out;
3476
6b08f3ae 3477 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
1acf2318 3478out:
7a291083
JBT
3479 return ret;
3480}
3481
2dc11581 3482void ehea_remove_device_sysfs(struct platform_device *dev)
1acf2318 3483{
6b08f3ae
JF
3484 device_remove_file(&dev->dev, &dev_attr_probe_port);
3485 device_remove_file(&dev->dev, &dev_attr_remove_port);
1acf2318
JBT
3486}
3487
2dc11581 3488static int __devinit ehea_probe_adapter(struct platform_device *dev,
1acf2318 3489 const struct of_device_id *id)
7a291083
JBT
3490{
3491 struct ehea_adapter *adapter;
9f9a3b8a 3492 const u64 *adapter_handle;
7a291083
JBT
3493 int ret;
3494
61c7a080 3495 if (!dev || !dev->dev.of_node) {
1eef4e04
JBT
3496 ehea_error("Invalid ibmebus device probed");
3497 return -EINVAL;
3498 }
3499
7a291083
JBT
3500 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3501 if (!adapter) {
3502 ret = -ENOMEM;
6b08f3ae 3503 dev_err(&dev->dev, "no mem for ehea_adapter\n");
7a291083
JBT
3504 goto out;
3505 }
3506
44c82152
TK
3507 list_add(&adapter->list, &adapter_list);
3508
6b08f3ae 3509 adapter->ofdev = dev;
1acf2318 3510
61c7a080 3511 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
d1d25aab 3512 NULL);
061bf3cd
TK
3513 if (adapter_handle)
3514 adapter->handle = *adapter_handle;
3515
3516 if (!adapter->handle) {
6b08f3ae 3517 dev_err(&dev->dev, "failed getting handle for adapter"
61c7a080 3518 " '%s'\n", dev->dev.of_node->full_name);
7a291083
JBT
3519 ret = -ENODEV;
3520 goto out_free_ad;
3521 }
3522
7a291083
JBT
3523 adapter->pd = EHEA_PD_ID;
3524
c7ae011d 3525 dev_set_drvdata(&dev->dev, adapter);
7a291083 3526
7a291083
JBT
3527
3528 /* initialize adapter and ports */
3529 /* get adapter properties */
3530 ret = ehea_sense_adapter_attr(adapter);
3531 if (ret) {
898eb71c 3532 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
1211bb6d 3533 goto out_free_ad;
7a291083 3534 }
7a291083
JBT
3535
3536 adapter->neq = ehea_create_eq(adapter,
3537 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3538 if (!adapter->neq) {
1eef4e04 3539 ret = -EIO;
898eb71c 3540 dev_err(&dev->dev, "NEQ creation failed\n");
1211bb6d 3541 goto out_free_ad;
7a291083
JBT
3542 }
3543
3544 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3545 (unsigned long)adapter);
3546
6b08f3ae 3547 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
38515e90 3548 ehea_interrupt_neq, IRQF_DISABLED,
7a291083
JBT
3549 "ehea_neq", adapter);
3550 if (ret) {
898eb71c 3551 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
7a291083
JBT
3552 goto out_kill_eq;
3553 }
3554
1eef4e04
JBT
3555 ret = ehea_create_device_sysfs(dev);
3556 if (ret)
3bf76b81 3557 goto out_free_irq;
1acf2318 3558
7a291083
JBT
3559 ret = ehea_setup_ports(adapter);
3560 if (ret) {
898eb71c 3561 dev_err(&dev->dev, "setup_ports failed\n");
1acf2318 3562 goto out_rem_dev_sysfs;
7a291083
JBT
3563 }
3564
3565 ret = 0;
3566 goto out;
3567
1acf2318
JBT
3568out_rem_dev_sysfs:
3569 ehea_remove_device_sysfs(dev);
3570
7a291083 3571out_free_irq:
6b08f3ae 3572 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
7a291083
JBT
3573
3574out_kill_eq:
3575 ehea_destroy_eq(adapter->neq);
3576
7a291083 3577out_free_ad:
51621fbd 3578 list_del(&adapter->list);
7a291083 3579 kfree(adapter);
21eee2dd 3580
7a291083 3581out:
21eee2dd 3582 ehea_update_firmware_handles();
52e21b1b 3583
7a291083
JBT
3584 return ret;
3585}
3586
2dc11581 3587static int __devexit ehea_remove(struct platform_device *dev)
7a291083 3588{
c7ae011d 3589 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
7a291083
JBT
3590 int i;
3591
1acf2318 3592 for (i = 0; i < EHEA_MAX_PORTS; i++)
7a291083
JBT
3593 if (adapter->port[i]) {
3594 ehea_shutdown_single_port(adapter->port[i]);
3595 adapter->port[i] = NULL;
3596 }
1acf2318
JBT
3597
3598 ehea_remove_device_sysfs(dev);
3599
3bf76b81 3600 flush_scheduled_work();
7a291083 3601
6b08f3ae 3602 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
d4150a27 3603 tasklet_kill(&adapter->neq_tasklet);
7a291083
JBT
3604
3605 ehea_destroy_eq(adapter->neq);
1211bb6d 3606 ehea_remove_adapter_mr(adapter);
44c82152 3607 list_del(&adapter->list);
7a291083 3608 kfree(adapter);
44c82152 3609
21eee2dd 3610 ehea_update_firmware_handles();
21eee2dd 3611
7a291083
JBT
3612 return 0;
3613}
3614
21eee2dd
TK
3615void ehea_crash_handler(void)
3616{
3617 int i;
3618
3619 if (ehea_fw_handles.arr)
3620 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3621 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3622 ehea_fw_handles.arr[i].fwh,
3623 FORCE_FREE);
3624
3625 if (ehea_bcmc_regs.arr)
3626 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3627 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3628 ehea_bcmc_regs.arr[i].port_id,
3629 ehea_bcmc_regs.arr[i].reg_type,
3630 ehea_bcmc_regs.arr[i].macaddr,
3631 0, H_DEREG_BCMC);
3632}
3633
48cfb14f
HH
3634static int ehea_mem_notifier(struct notifier_block *nb,
3635 unsigned long action, void *data)
3636{
a7c561f2 3637 int ret = NOTIFY_BAD;
d4f12daf 3638 struct memory_notify *arg = data;
a7c561f2 3639
099473c1 3640 mutex_lock(&dlpar_mem_lock);
a7c561f2 3641
48cfb14f 3642 switch (action) {
d4f12daf
HH
3643 case MEM_CANCEL_OFFLINE:
3644 ehea_info("memory offlining canceled");
3645 /* Readd canceled memory block */
3646 case MEM_ONLINE:
3647 ehea_info("memory is going online");
3876732c 3648 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
d4f12daf 3649 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
a7c561f2 3650 goto out_unlock;
d4f12daf
HH
3651 ehea_rereg_mrs(NULL);
3652 break;
3653 case MEM_GOING_OFFLINE:
3654 ehea_info("memory is going offline");
3876732c 3655 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
d4f12daf 3656 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
a7c561f2 3657 goto out_unlock;
48cfb14f
HH
3658 ehea_rereg_mrs(NULL);
3659 break;
3660 default:
3661 break;
3662 }
52e21b1b
JBT
3663
3664 ehea_update_firmware_handles();
a7c561f2 3665 ret = NOTIFY_OK;
52e21b1b 3666
a7c561f2
TK
3667out_unlock:
3668 mutex_unlock(&dlpar_mem_lock);
a7c561f2 3669 return ret;
48cfb14f
HH
3670}
3671
3672static struct notifier_block ehea_mem_nb = {
3673 .notifier_call = ehea_mem_notifier,
3674};
3675
2a6f4e49
JBT
3676static int ehea_reboot_notifier(struct notifier_block *nb,
3677 unsigned long action, void *unused)
3678{
3679 if (action == SYS_RESTART) {
3680 ehea_info("Reboot: freeing all eHEA resources");
3681 ibmebus_unregister_driver(&ehea_driver);
3682 }
3683 return NOTIFY_DONE;
3684}
3685
3686static struct notifier_block ehea_reboot_nb = {
508d2b5d 3687 .notifier_call = ehea_reboot_notifier,
2a6f4e49
JBT
3688};
3689
7a291083
JBT
3690static int check_module_parm(void)
3691{
3692 int ret = 0;
3693
3694 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3695 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3696 ehea_info("Bad parameter: rq1_entries");
3697 ret = -EINVAL;
3698 }
3699 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3700 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3701 ehea_info("Bad parameter: rq2_entries");
3702 ret = -EINVAL;
3703 }
3704 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3705 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3706 ehea_info("Bad parameter: rq3_entries");
3707 ret = -EINVAL;
3708 }
3709 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3710 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3711 ehea_info("Bad parameter: sq_entries");
3712 ret = -EINVAL;
3713 }
3714
3715 return ret;
3716}
3717
4c3ca4da
JBT
3718static ssize_t ehea_show_capabilities(struct device_driver *drv,
3719 char *buf)
3720{
3721 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3722}
3723
3724static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3725 ehea_show_capabilities, NULL);
3726
7a291083
JBT
3727int __init ehea_module_init(void)
3728{
3729 int ret;
3730
3731 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3732 DRV_VERSION);
3733
44c82152
TK
3734
3735 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
21eee2dd
TK
3736 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3737 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3738
9f71a568 3739 mutex_init(&ehea_fw_handles.lock);
5c2cec14 3740 spin_lock_init(&ehea_bcmc_regs.lock);
44c82152 3741
7a291083
JBT
3742 ret = check_module_parm();
3743 if (ret)
3744 goto out;
44c82152
TK
3745
3746 ret = ehea_create_busmap();
3747 if (ret)
3748 goto out;
3749
21eee2dd
TK
3750 ret = register_reboot_notifier(&ehea_reboot_nb);
3751 if (ret)
3752 ehea_info("failed registering reboot notifier");
3753
48cfb14f
HH
3754 ret = register_memory_notifier(&ehea_mem_nb);
3755 if (ret)
3756 ehea_info("failed registering memory remove notifier");
3757
c061b18d 3758 ret = crash_shutdown_register(ehea_crash_handler);
21eee2dd
TK
3759 if (ret)
3760 ehea_info("failed registering crash handler");
2a6f4e49 3761
7a291083 3762 ret = ibmebus_register_driver(&ehea_driver);
4c3ca4da 3763 if (ret) {
7a291083 3764 ehea_error("failed registering eHEA device driver on ebus");
21eee2dd 3765 goto out2;
4c3ca4da
JBT
3766 }
3767
3768 ret = driver_create_file(&ehea_driver.driver,
3769 &driver_attr_capabilities);
3770 if (ret) {
3771 ehea_error("failed to register capabilities attribute, ret=%d",
3772 ret);
21eee2dd 3773 goto out3;
4c3ca4da 3774 }
7a291083 3775
21eee2dd
TK
3776 return ret;
3777
3778out3:
3779 ibmebus_unregister_driver(&ehea_driver);
3780out2:
48cfb14f 3781 unregister_memory_notifier(&ehea_mem_nb);
21eee2dd 3782 unregister_reboot_notifier(&ehea_reboot_nb);
c061b18d 3783 crash_shutdown_unregister(ehea_crash_handler);
7a291083
JBT
3784out:
3785 return ret;
3786}
3787
3788static void __exit ehea_module_exit(void)
3789{
21eee2dd
TK
3790 int ret;
3791
3bf76b81 3792 flush_scheduled_work();
4c3ca4da 3793 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
7a291083 3794 ibmebus_unregister_driver(&ehea_driver);
2a6f4e49 3795 unregister_reboot_notifier(&ehea_reboot_nb);
c061b18d 3796 ret = crash_shutdown_unregister(ehea_crash_handler);
21eee2dd
TK
3797 if (ret)
3798 ehea_info("failed unregistering crash handler");
48cfb14f 3799 unregister_memory_notifier(&ehea_mem_nb);
21eee2dd
TK
3800 kfree(ehea_fw_handles.arr);
3801 kfree(ehea_bcmc_regs.arr);
44c82152 3802 ehea_destroy_busmap();
7a291083
JBT
3803}
3804
3805module_init(ehea_module_init);
3806module_exit(ehea_module_exit);