]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ehea/ehea_main.c
net: move address list functions to a separate file
[net-next-2.6.git] / drivers / net / ehea / ehea_main.c
CommitLineData
7a291083
JBT
1/*
2 * linux/drivers/net/ehea/ehea_main.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
508d2b5d
DM
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
7a291083
JBT
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/udp.h>
33#include <linux/if.h>
34#include <linux/list.h>
35#include <linux/if_ether.h>
2a6f4e49
JBT
36#include <linux/notifier.h>
37#include <linux/reboot.h>
48cfb14f 38#include <linux/memory.h>
21eee2dd 39#include <asm/kexec.h>
06f89edf 40#include <linux/mutex.h>
2a6f4e49 41
7a291083
JBT
42#include <net/ip.h>
43
44#include "ehea.h"
45#include "ehea_qmr.h"
46#include "ehea_phyp.h"
47
48
49MODULE_LICENSE("GPL");
50MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
51MODULE_DESCRIPTION("IBM eServer HEA Driver");
52MODULE_VERSION(DRV_VERSION);
53
54
55static int msg_level = -1;
56static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
57static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
58static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
59static int sq_entries = EHEA_DEF_ENTRIES_SQ;
508d2b5d
DM
60static int use_mcs;
61static int use_lro;
d4dc4ec9 62static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
18604c54 63static int num_tx_qps = EHEA_NUM_TX_QP;
508d2b5d 64static int prop_carrier_state;
7a291083
JBT
65
66module_param(msg_level, int, 0);
67module_param(rq1_entries, int, 0);
68module_param(rq2_entries, int, 0);
69module_param(rq3_entries, int, 0);
70module_param(sq_entries, int, 0);
8759cf76 71module_param(prop_carrier_state, int, 0);
18604c54 72module_param(use_mcs, int, 0);
d4dc4ec9
JBT
73module_param(use_lro, int, 0);
74module_param(lro_max_aggr, int, 0);
18604c54 75module_param(num_tx_qps, int, 0);
7a291083 76
18604c54 77MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
7a291083 78MODULE_PARM_DESC(msg_level, "msg_level");
8759cf76
JBT
79MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
80 "port to stack. 1:yes, 0:no. Default = 0 ");
7a291083
JBT
81MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
82 "[2^x - 1], x = [6..14]. Default = "
83 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
84MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
85 "[2^x - 1], x = [6..14]. Default = "
86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
87MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
88 "[2^x - 1], x = [6..14]. Default = "
89 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
90MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
91 "[2^x - 1], x = [6..14]. Default = "
92 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
18072a5b 93MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
7a291083 94
d4dc4ec9
JBT
95MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
96 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
97MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
98 "Default = 0");
99
508d2b5d 100static int port_name_cnt;
44c82152 101static LIST_HEAD(adapter_list);
48e4cc77 102static unsigned long ehea_driver_flags;
44c82152 103struct work_struct ehea_rereg_mr_task;
06f89edf 104static DEFINE_MUTEX(dlpar_mem_lock);
21eee2dd
TK
105struct ehea_fw_handle_array ehea_fw_handles;
106struct ehea_bcmc_reg_array ehea_bcmc_regs;
107
d1dea38d 108
6b08f3ae 109static int __devinit ehea_probe_adapter(struct of_device *dev,
d1d25aab 110 const struct of_device_id *id);
d1dea38d 111
6b08f3ae 112static int __devexit ehea_remove(struct of_device *dev);
d1dea38d
TK
113
114static struct of_device_id ehea_device_table[] = {
115 {
116 .name = "lhea",
117 .compatible = "IBM,lhea",
118 },
119 {},
120};
b0afffe8 121MODULE_DEVICE_TABLE(of, ehea_device_table);
d1dea38d 122
6b08f3ae 123static struct of_platform_driver ehea_driver = {
d1dea38d 124 .name = "ehea",
6b08f3ae 125 .match_table = ehea_device_table,
d1dea38d
TK
126 .probe = ehea_probe_adapter,
127 .remove = ehea_remove,
128};
129
508d2b5d
DM
130void ehea_dump(void *adr, int len, char *msg)
131{
7a291083
JBT
132 int x;
133 unsigned char *deb = adr;
134 for (x = 0; x < len; x += 16) {
a1c5a893 135 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
508d2b5d 136 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
7a291083
JBT
137 deb += 16;
138 }
139}
140
2f69ae01
JBT
141void ehea_schedule_port_reset(struct ehea_port *port)
142{
143 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
144 schedule_work(&port->reset_task);
145}
146
21eee2dd
TK
147static void ehea_update_firmware_handles(void)
148{
149 struct ehea_fw_handle_entry *arr = NULL;
150 struct ehea_adapter *adapter;
151 int num_adapters = 0;
152 int num_ports = 0;
153 int num_portres = 0;
154 int i = 0;
155 int num_fw_handles, k, l;
156
157 /* Determine number of handles */
52e21b1b
JBT
158 mutex_lock(&ehea_fw_handles.lock);
159
21eee2dd
TK
160 list_for_each_entry(adapter, &adapter_list, list) {
161 num_adapters++;
162
163 for (k = 0; k < EHEA_MAX_PORTS; k++) {
164 struct ehea_port *port = adapter->port[k];
165
166 if (!port || (port->state != EHEA_PORT_UP))
167 continue;
168
169 num_ports++;
170 num_portres += port->num_def_qps + port->num_add_tx_qps;
171 }
172 }
173
174 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
175 num_ports * EHEA_NUM_PORT_FW_HANDLES +
176 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
177
178 if (num_fw_handles) {
179 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
180 if (!arr)
52e21b1b 181 goto out; /* Keep the existing array */
21eee2dd
TK
182 } else
183 goto out_update;
184
185 list_for_each_entry(adapter, &adapter_list, list) {
52e21b1b
JBT
186 if (num_adapters == 0)
187 break;
188
21eee2dd
TK
189 for (k = 0; k < EHEA_MAX_PORTS; k++) {
190 struct ehea_port *port = adapter->port[k];
191
8e95a202
JP
192 if (!port || (port->state != EHEA_PORT_UP) ||
193 (num_ports == 0))
21eee2dd
TK
194 continue;
195
196 for (l = 0;
197 l < port->num_def_qps + port->num_add_tx_qps;
198 l++) {
199 struct ehea_port_res *pr = &port->port_res[l];
200
201 arr[i].adh = adapter->handle;
202 arr[i++].fwh = pr->qp->fw_handle;
203 arr[i].adh = adapter->handle;
204 arr[i++].fwh = pr->send_cq->fw_handle;
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = pr->recv_cq->fw_handle;
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = pr->eq->fw_handle;
209 arr[i].adh = adapter->handle;
210 arr[i++].fwh = pr->send_mr.handle;
211 arr[i].adh = adapter->handle;
212 arr[i++].fwh = pr->recv_mr.handle;
213 }
214 arr[i].adh = adapter->handle;
215 arr[i++].fwh = port->qp_eq->fw_handle;
52e21b1b 216 num_ports--;
21eee2dd
TK
217 }
218
219 arr[i].adh = adapter->handle;
220 arr[i++].fwh = adapter->neq->fw_handle;
221
222 if (adapter->mr.handle) {
223 arr[i].adh = adapter->handle;
224 arr[i++].fwh = adapter->mr.handle;
225 }
52e21b1b 226 num_adapters--;
21eee2dd
TK
227 }
228
229out_update:
230 kfree(ehea_fw_handles.arr);
231 ehea_fw_handles.arr = arr;
232 ehea_fw_handles.num_entries = i;
52e21b1b
JBT
233out:
234 mutex_unlock(&ehea_fw_handles.lock);
21eee2dd
TK
235}
236
237static void ehea_update_bcmc_registrations(void)
238{
52e21b1b 239 unsigned long flags;
21eee2dd
TK
240 struct ehea_bcmc_reg_entry *arr = NULL;
241 struct ehea_adapter *adapter;
242 struct ehea_mc_list *mc_entry;
243 int num_registrations = 0;
244 int i = 0;
245 int k;
246
52e21b1b
JBT
247 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
248
21eee2dd
TK
249 /* Determine number of registrations */
250 list_for_each_entry(adapter, &adapter_list, list)
251 for (k = 0; k < EHEA_MAX_PORTS; k++) {
252 struct ehea_port *port = adapter->port[k];
253
254 if (!port || (port->state != EHEA_PORT_UP))
255 continue;
256
257 num_registrations += 2; /* Broadcast registrations */
258
259 list_for_each_entry(mc_entry, &port->mc_list->list,list)
260 num_registrations += 2;
261 }
262
263 if (num_registrations) {
5c2cec14 264 arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
21eee2dd 265 if (!arr)
52e21b1b 266 goto out; /* Keep the existing array */
21eee2dd
TK
267 } else
268 goto out_update;
269
270 list_for_each_entry(adapter, &adapter_list, list) {
271 for (k = 0; k < EHEA_MAX_PORTS; k++) {
272 struct ehea_port *port = adapter->port[k];
273
274 if (!port || (port->state != EHEA_PORT_UP))
275 continue;
276
52e21b1b
JBT
277 if (num_registrations == 0)
278 goto out_update;
279
21eee2dd
TK
280 arr[i].adh = adapter->handle;
281 arr[i].port_id = port->logical_port_id;
282 arr[i].reg_type = EHEA_BCMC_BROADCAST |
283 EHEA_BCMC_UNTAGGED;
284 arr[i++].macaddr = port->mac_addr;
285
286 arr[i].adh = adapter->handle;
287 arr[i].port_id = port->logical_port_id;
288 arr[i].reg_type = EHEA_BCMC_BROADCAST |
289 EHEA_BCMC_VLANID_ALL;
290 arr[i++].macaddr = port->mac_addr;
52e21b1b 291 num_registrations -= 2;
21eee2dd
TK
292
293 list_for_each_entry(mc_entry,
294 &port->mc_list->list, list) {
52e21b1b
JBT
295 if (num_registrations == 0)
296 goto out_update;
297
21eee2dd
TK
298 arr[i].adh = adapter->handle;
299 arr[i].port_id = port->logical_port_id;
300 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
301 EHEA_BCMC_MULTICAST |
302 EHEA_BCMC_UNTAGGED;
303 arr[i++].macaddr = mc_entry->macaddr;
304
305 arr[i].adh = adapter->handle;
306 arr[i].port_id = port->logical_port_id;
307 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
308 EHEA_BCMC_MULTICAST |
309 EHEA_BCMC_VLANID_ALL;
310 arr[i++].macaddr = mc_entry->macaddr;
52e21b1b 311 num_registrations -= 2;
21eee2dd
TK
312 }
313 }
314 }
315
316out_update:
317 kfree(ehea_bcmc_regs.arr);
318 ehea_bcmc_regs.arr = arr;
319 ehea_bcmc_regs.num_entries = i;
52e21b1b
JBT
320out:
321 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
21eee2dd
TK
322}
323
7a291083
JBT
324static struct net_device_stats *ehea_get_stats(struct net_device *dev)
325{
326 struct ehea_port *port = netdev_priv(dev);
327 struct net_device_stats *stats = &port->stats;
328 struct hcp_ehea_port_cb2 *cb2;
7393b87c 329 u64 hret, rx_packets, tx_packets;
7a291083
JBT
330 int i;
331
332 memset(stats, 0, sizeof(*stats));
333
3faf2693 334 cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
7a291083
JBT
335 if (!cb2) {
336 ehea_error("no mem for cb2");
337 goto out;
338 }
339
340 hret = ehea_h_query_ehea_port(port->adapter->handle,
341 port->logical_port_id,
342 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
343 if (hret != H_SUCCESS) {
344 ehea_error("query_ehea_port failed");
345 goto out_herr;
346 }
347
348 if (netif_msg_hw(port))
349 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
350
351 rx_packets = 0;
352 for (i = 0; i < port->num_def_qps; i++)
353 rx_packets += port->port_res[i].rx_packets;
354
7393b87c
TK
355 tx_packets = 0;
356 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
357 tx_packets += port->port_res[i].tx_packets;
358
359 stats->tx_packets = tx_packets;
7a291083
JBT
360 stats->multicast = cb2->rxmcp;
361 stats->rx_errors = cb2->rxuerr;
362 stats->rx_bytes = cb2->rxo;
363 stats->tx_bytes = cb2->txo;
364 stats->rx_packets = rx_packets;
365
366out_herr:
3faf2693 367 free_page((unsigned long)cb2);
7a291083
JBT
368out:
369 return stats;
370}
371
372static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
373{
374 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
375 struct net_device *dev = pr->port->netdev;
376 int max_index_mask = pr->rq1_skba.len - 1;
2c69448b
JBT
377 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
378 int adder = 0;
7a291083
JBT
379 int i;
380
2c69448b
JBT
381 pr->rq1_skba.os_skbs = 0;
382
383 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
44fb3126
TK
384 if (nr_of_wqes > 0)
385 pr->rq1_skba.index = index;
2c69448b 386 pr->rq1_skba.os_skbs = fill_wqes;
7a291083 387 return;
2c69448b 388 }
7a291083 389
2c69448b 390 for (i = 0; i < fill_wqes; i++) {
7a291083
JBT
391 if (!skb_arr_rq1[index]) {
392 skb_arr_rq1[index] = netdev_alloc_skb(dev,
393 EHEA_L_PKT_SIZE);
394 if (!skb_arr_rq1[index]) {
2c69448b 395 pr->rq1_skba.os_skbs = fill_wqes - i;
7a291083
JBT
396 break;
397 }
398 }
399 index--;
400 index &= max_index_mask;
2c69448b 401 adder++;
7a291083 402 }
2c69448b
JBT
403
404 if (adder == 0)
405 return;
406
7a291083 407 /* Ring doorbell */
2c69448b 408 ehea_update_rq1a(pr->qp, adder);
7a291083
JBT
409}
410
e2878806 411static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
7a291083 412{
7a291083
JBT
413 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
414 struct net_device *dev = pr->port->netdev;
415 int i;
416
417 for (i = 0; i < pr->rq1_skba.len; i++) {
418 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
e2878806
TK
419 if (!skb_arr_rq1[i])
420 break;
7a291083
JBT
421 }
422 /* Ring doorbell */
423 ehea_update_rq1a(pr->qp, nr_rq1a);
7a291083
JBT
424}
425
426static int ehea_refill_rq_def(struct ehea_port_res *pr,
427 struct ehea_q_skb_arr *q_skba, int rq_nr,
428 int num_wqes, int wqe_type, int packet_size)
429{
430 struct net_device *dev = pr->port->netdev;
431 struct ehea_qp *qp = pr->qp;
432 struct sk_buff **skb_arr = q_skba->arr;
433 struct ehea_rwqe *rwqe;
434 int i, index, max_index_mask, fill_wqes;
2c69448b 435 int adder = 0;
7a291083
JBT
436 int ret = 0;
437
438 fill_wqes = q_skba->os_skbs + num_wqes;
2c69448b 439 q_skba->os_skbs = 0;
7a291083 440
2c69448b
JBT
441 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
442 q_skba->os_skbs = fill_wqes;
7a291083 443 return ret;
2c69448b 444 }
7a291083
JBT
445
446 index = q_skba->index;
447 max_index_mask = q_skba->len - 1;
448 for (i = 0; i < fill_wqes; i++) {
2c69448b 449 u64 tmp_addr;
89d71a66
ED
450 struct sk_buff *skb;
451
452 skb = netdev_alloc_skb_ip_align(dev, packet_size);
7a291083 453 if (!skb) {
7a291083 454 q_skba->os_skbs = fill_wqes - i;
e2878806
TK
455 if (q_skba->os_skbs == q_skba->len - 2) {
456 ehea_info("%s: rq%i ran dry - no mem for skb",
457 pr->port->netdev->name, rq_nr);
458 ret = -ENOMEM;
459 }
7a291083
JBT
460 break;
461 }
7a291083
JBT
462
463 skb_arr[index] = skb;
2c69448b
JBT
464 tmp_addr = ehea_map_vaddr(skb->data);
465 if (tmp_addr == -1) {
466 dev_kfree_skb(skb);
467 q_skba->os_skbs = fill_wqes - i;
468 ret = 0;
469 break;
470 }
7a291083
JBT
471
472 rwqe = ehea_get_next_rwqe(qp, rq_nr);
473 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
d1d25aab 474 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
7a291083 475 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
2c69448b 476 rwqe->sg_list[0].vaddr = tmp_addr;
7a291083
JBT
477 rwqe->sg_list[0].len = packet_size;
478 rwqe->data_segments = 1;
479
480 index++;
481 index &= max_index_mask;
2c69448b 482 adder++;
7a291083 483 }
44c82152 484
7a291083 485 q_skba->index = index;
2c69448b
JBT
486 if (adder == 0)
487 goto out;
7a291083
JBT
488
489 /* Ring doorbell */
490 iosync();
491 if (rq_nr == 2)
2c69448b 492 ehea_update_rq2a(pr->qp, adder);
7a291083 493 else
2c69448b 494 ehea_update_rq3a(pr->qp, adder);
44c82152 495out:
7a291083
JBT
496 return ret;
497}
498
499
500static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
501{
502 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
503 nr_of_wqes, EHEA_RWQE2_TYPE,
89d71a66 504 EHEA_RQ2_PKT_SIZE);
7a291083
JBT
505}
506
507
508static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
509{
510 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
511 nr_of_wqes, EHEA_RWQE3_TYPE,
89d71a66 512 EHEA_MAX_PACKET_SIZE);
7a291083
JBT
513}
514
515static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
516{
517 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
518 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
519 return 0;
520 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
521 (cqe->header_length == 0))
522 return 0;
523 return -EINVAL;
524}
525
526static inline void ehea_fill_skb(struct net_device *dev,
527 struct sk_buff *skb, struct ehea_cqe *cqe)
528{
529 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
530
531 skb_put(skb, length);
532 skb->ip_summed = CHECKSUM_UNNECESSARY;
533 skb->protocol = eth_type_trans(skb, dev);
534}
535
536static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
537 int arr_len,
538 struct ehea_cqe *cqe)
539{
540 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
541 struct sk_buff *skb;
542 void *pref;
543 int x;
544
545 x = skb_index + 1;
546 x &= (arr_len - 1);
547
548 pref = skb_array[x];
0b2febf3
HH
549 if (pref) {
550 prefetchw(pref);
551 prefetchw(pref + EHEA_CACHE_LINE);
552
553 pref = (skb_array[x]->data);
554 prefetch(pref);
555 prefetch(pref + EHEA_CACHE_LINE);
556 prefetch(pref + EHEA_CACHE_LINE * 2);
557 prefetch(pref + EHEA_CACHE_LINE * 3);
558 }
559
7a291083
JBT
560 skb = skb_array[skb_index];
561 skb_array[skb_index] = NULL;
562 return skb;
563}
564
565static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
566 int arr_len, int wqe_index)
567{
568 struct sk_buff *skb;
569 void *pref;
570 int x;
571
572 x = wqe_index + 1;
573 x &= (arr_len - 1);
574
575 pref = skb_array[x];
0b2febf3
HH
576 if (pref) {
577 prefetchw(pref);
578 prefetchw(pref + EHEA_CACHE_LINE);
7a291083 579
0b2febf3
HH
580 pref = (skb_array[x]->data);
581 prefetchw(pref);
582 prefetchw(pref + EHEA_CACHE_LINE);
583 }
7a291083
JBT
584
585 skb = skb_array[wqe_index];
586 skb_array[wqe_index] = NULL;
587 return skb;
588}
589
590static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
591 struct ehea_cqe *cqe, int *processed_rq2,
592 int *processed_rq3)
593{
594 struct sk_buff *skb;
595
acbddb59
JBT
596 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
597 pr->p_stats.err_tcp_cksum++;
598 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
599 pr->p_stats.err_ip_cksum++;
600 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
601 pr->p_stats.err_frame_crc++;
602
7a291083
JBT
603 if (rq == 2) {
604 *processed_rq2 += 1;
605 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
606 dev_kfree_skb(skb);
607 } else if (rq == 3) {
608 *processed_rq3 += 1;
609 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
610 dev_kfree_skb(skb);
611 }
612
613 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
58dd8258
TK
614 if (netif_msg_rx_err(pr->port)) {
615 ehea_error("Critical receive error for QP %d. "
616 "Resetting port.", pr->qp->init_attr.qp_nr);
617 ehea_dump(cqe, sizeof(*cqe), "CQE");
618 }
2f69ae01 619 ehea_schedule_port_reset(pr->port);
7a291083
JBT
620 return 1;
621 }
622
623 return 0;
624}
625
d4dc4ec9
JBT
626static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
627 void **tcph, u64 *hdr_flags, void *priv)
628{
629 struct ehea_cqe *cqe = priv;
630 unsigned int ip_len;
631 struct iphdr *iph;
632
633 /* non tcp/udp packets */
634 if (!cqe->header_length)
635 return -1;
636
637 /* non tcp packet */
638 skb_reset_network_header(skb);
639 iph = ip_hdr(skb);
640 if (iph->protocol != IPPROTO_TCP)
641 return -1;
642
643 ip_len = ip_hdrlen(skb);
644 skb_set_transport_header(skb, ip_len);
645 *tcph = tcp_hdr(skb);
646
647 /* check if ip header and tcp header are complete */
3ff2cd23 648 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
d4dc4ec9
JBT
649 return -1;
650
651 *hdr_flags = LRO_IPV4 | LRO_TCP;
652 *iphdr = iph;
653
654 return 0;
655}
656
657static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
658 struct sk_buff *skb)
659{
8e95a202
JP
660 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
661 pr->port->vgrp);
d4dc4ec9
JBT
662
663 if (use_lro) {
664 if (vlan_extracted)
665 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
666 pr->port->vgrp,
667 cqe->vlan_tag,
668 cqe);
669 else
670 lro_receive_skb(&pr->lro_mgr, skb, cqe);
671 } else {
672 if (vlan_extracted)
673 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
674 cqe->vlan_tag);
675 else
676 netif_receive_skb(skb);
677 }
678}
679
bea3348e
SH
680static int ehea_proc_rwqes(struct net_device *dev,
681 struct ehea_port_res *pr,
682 int budget)
7a291083 683{
18604c54 684 struct ehea_port *port = pr->port;
7a291083
JBT
685 struct ehea_qp *qp = pr->qp;
686 struct ehea_cqe *cqe;
687 struct sk_buff *skb;
688 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
689 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
690 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
691 int skb_arr_rq1_len = pr->rq1_skba.len;
692 int skb_arr_rq2_len = pr->rq2_skba.len;
693 int skb_arr_rq3_len = pr->rq3_skba.len;
694 int processed, processed_rq1, processed_rq2, processed_rq3;
bea3348e 695 int wqe_index, last_wqe_index, rq, port_reset;
7a291083
JBT
696
697 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
698 last_wqe_index = 0;
7a291083 699
7a291083 700 cqe = ehea_poll_rq1(qp, &wqe_index);
bea3348e 701 while ((processed < budget) && cqe) {
7a291083
JBT
702 ehea_inc_rq1(qp);
703 processed_rq1++;
704 processed++;
7a291083
JBT
705 if (netif_msg_rx_status(port))
706 ehea_dump(cqe, sizeof(*cqe), "CQE");
707
708 last_wqe_index = wqe_index;
709 rmb();
710 if (!ehea_check_cqe(cqe, &rq)) {
508d2b5d
DM
711 if (rq == 1) {
712 /* LL RQ1 */
7a291083
JBT
713 skb = get_skb_by_index_ll(skb_arr_rq1,
714 skb_arr_rq1_len,
715 wqe_index);
716 if (unlikely(!skb)) {
717 if (netif_msg_rx_err(port))
718 ehea_error("LL rq1: skb=NULL");
18604c54 719
bea3348e 720 skb = netdev_alloc_skb(dev,
7a291083
JBT
721 EHEA_L_PKT_SIZE);
722 if (!skb)
723 break;
724 }
508d2b5d 725 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
d1d25aab 726 cqe->num_bytes_transfered - 4);
bea3348e 727 ehea_fill_skb(dev, skb, cqe);
508d2b5d
DM
728 } else if (rq == 2) {
729 /* RQ2 */
7a291083
JBT
730 skb = get_skb_by_index(skb_arr_rq2,
731 skb_arr_rq2_len, cqe);
732 if (unlikely(!skb)) {
733 if (netif_msg_rx_err(port))
734 ehea_error("rq2: skb=NULL");
735 break;
736 }
bea3348e 737 ehea_fill_skb(dev, skb, cqe);
7a291083 738 processed_rq2++;
508d2b5d
DM
739 } else {
740 /* RQ3 */
7a291083
JBT
741 skb = get_skb_by_index(skb_arr_rq3,
742 skb_arr_rq3_len, cqe);
743 if (unlikely(!skb)) {
744 if (netif_msg_rx_err(port))
745 ehea_error("rq3: skb=NULL");
746 break;
747 }
bea3348e 748 ehea_fill_skb(dev, skb, cqe);
7a291083
JBT
749 processed_rq3++;
750 }
751
d4dc4ec9 752 ehea_proc_skb(pr, cqe, skb);
18604c54 753 } else {
acbddb59 754 pr->p_stats.poll_receive_errors++;
7a291083
JBT
755 port_reset = ehea_treat_poll_error(pr, rq, cqe,
756 &processed_rq2,
757 &processed_rq3);
758 if (port_reset)
759 break;
760 }
761 cqe = ehea_poll_rq1(qp, &wqe_index);
762 }
d4dc4ec9
JBT
763 if (use_lro)
764 lro_flush_all(&pr->lro_mgr);
7a291083 765
7a291083
JBT
766 pr->rx_packets += processed;
767
768 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
769 ehea_refill_rq2(pr, processed_rq2);
770 ehea_refill_rq3(pr, processed_rq3);
771
bea3348e 772 return processed;
7a291083
JBT
773}
774
18604c54 775static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
7a291083 776{
acbddb59 777 struct sk_buff *skb;
7a291083
JBT
778 struct ehea_cq *send_cq = pr->send_cq;
779 struct ehea_cqe *cqe;
18604c54 780 int quota = my_quota;
7a291083
JBT
781 int cqe_counter = 0;
782 int swqe_av = 0;
acbddb59 783 int index;
7a291083
JBT
784 unsigned long flags;
785
18604c54 786 cqe = ehea_poll_cq(send_cq);
508d2b5d 787 while (cqe && (quota > 0)) {
18604c54
JBT
788 ehea_inc_cq(send_cq);
789
7a291083
JBT
790 cqe_counter++;
791 rmb();
792 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
793 ehea_error("Send Completion Error: Resetting port");
794 if (netif_msg_tx_err(pr->port))
795 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
2f69ae01 796 ehea_schedule_port_reset(pr->port);
7a291083
JBT
797 break;
798 }
799
800 if (netif_msg_tx_done(pr->port))
801 ehea_dump(cqe, sizeof(*cqe), "CQE");
802
803 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
acbddb59
JBT
804 == EHEA_SWQE2_TYPE)) {
805
806 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
807 skb = pr->sq_skba.arr[index];
808 dev_kfree_skb(skb);
809 pr->sq_skba.arr[index] = NULL;
810 }
7a291083
JBT
811
812 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
813 quota--;
18604c54
JBT
814
815 cqe = ehea_poll_cq(send_cq);
816 };
7a291083
JBT
817
818 ehea_update_feca(send_cq, cqe_counter);
819 atomic_add(swqe_av, &pr->swqe_avail);
820
821 spin_lock_irqsave(&pr->netif_queue, flags);
18604c54 822
7a291083
JBT
823 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
824 >= pr->swqe_refill_th)) {
825 netif_wake_queue(pr->port->netdev);
826 pr->queue_stopped = 0;
827 }
828 spin_unlock_irqrestore(&pr->netif_queue, flags);
829
18604c54 830 return cqe;
7a291083
JBT
831}
832
18604c54 833#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
bea3348e 834#define EHEA_POLL_MAX_CQES 65535
18604c54 835
bea3348e 836static int ehea_poll(struct napi_struct *napi, int budget)
7a291083 837{
508d2b5d
DM
838 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
839 napi);
bea3348e 840 struct net_device *dev = pr->port->netdev;
18604c54
JBT
841 struct ehea_cqe *cqe;
842 struct ehea_cqe *cqe_skb = NULL;
843 int force_irq, wqe_index;
bea3348e 844 int rx = 0;
18604c54
JBT
845
846 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
bea3348e
SH
847 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
848
849 if (!force_irq)
850 rx += ehea_proc_rwqes(dev, pr, budget - rx);
18604c54 851
bea3348e 852 while ((rx != budget) || force_irq) {
18604c54 853 pr->poll_counter = 0;
bea3348e 854 force_irq = 0;
288379f0 855 napi_complete(napi);
18604c54
JBT
856 ehea_reset_cq_ep(pr->recv_cq);
857 ehea_reset_cq_ep(pr->send_cq);
858 ehea_reset_cq_n1(pr->recv_cq);
859 ehea_reset_cq_n1(pr->send_cq);
860 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
861 cqe_skb = ehea_poll_cq(pr->send_cq);
862
e542aa6b 863 if (!cqe && !cqe_skb)
bea3348e 864 return rx;
18604c54 865
288379f0 866 if (!napi_reschedule(napi))
bea3348e 867 return rx;
18604c54 868
bea3348e
SH
869 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
870 rx += ehea_proc_rwqes(dev, pr, budget - rx);
871 }
e542aa6b 872
bea3348e
SH
873 pr->poll_counter++;
874 return rx;
7a291083
JBT
875}
876
8d22c971
JBT
877#ifdef CONFIG_NET_POLL_CONTROLLER
878static void ehea_netpoll(struct net_device *dev)
879{
880 struct ehea_port *port = netdev_priv(dev);
bea3348e 881 int i;
8d22c971 882
bea3348e 883 for (i = 0; i < port->num_def_qps; i++)
288379f0 884 napi_schedule(&port->port_res[i].napi);
8d22c971
JBT
885}
886#endif
887
7d12e780 888static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
7a291083
JBT
889{
890 struct ehea_port_res *pr = param;
18604c54 891
288379f0 892 napi_schedule(&pr->napi);
18604c54 893
7a291083
JBT
894 return IRQ_HANDLED;
895}
896
7d12e780 897static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
7a291083
JBT
898{
899 struct ehea_port *port = param;
900 struct ehea_eqe *eqe;
d2db9eea 901 struct ehea_qp *qp;
7a291083
JBT
902 u32 qp_token;
903
904 eqe = ehea_poll_eq(port->qp_eq);
bb3a6449 905
7a291083 906 while (eqe) {
7a291083 907 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
a1c5a893 908 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
bb3a6449 909 eqe->entry, qp_token);
d2db9eea
JBT
910
911 qp = port->port_res[qp_token].qp;
912 ehea_error_data(port->adapter, qp->fw_handle);
bb3a6449 913 eqe = ehea_poll_eq(port->qp_eq);
7a291083
JBT
914 }
915
2f69ae01 916 ehea_schedule_port_reset(port);
d2db9eea 917
7a291083
JBT
918 return IRQ_HANDLED;
919}
920
921static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
922 int logical_port)
923{
924 int i;
925
1acf2318 926 for (i = 0; i < EHEA_MAX_PORTS; i++)
41b69c70 927 if (adapter->port[i])
d1d25aab 928 if (adapter->port[i]->logical_port_id == logical_port)
41b69c70 929 return adapter->port[i];
7a291083
JBT
930 return NULL;
931}
932
933int ehea_sense_port_attr(struct ehea_port *port)
934{
935 int ret;
936 u64 hret;
937 struct hcp_ehea_port_cb0 *cb0;
938
508d2b5d 939 /* may be called via ehea_neq_tasklet() */
3faf2693 940 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
508d2b5d 941 if (!cb0) {
7a291083
JBT
942 ehea_error("no mem for cb0");
943 ret = -ENOMEM;
944 goto out;
945 }
946
947 hret = ehea_h_query_ehea_port(port->adapter->handle,
948 port->logical_port_id, H_PORT_CB0,
949 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
950 cb0);
951 if (hret != H_SUCCESS) {
952 ret = -EIO;
953 goto out_free;
954 }
955
956 /* MAC address */
957 port->mac_addr = cb0->port_mac_addr << 16;
958
508d2b5d 959 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
7a291083
JBT
960 ret = -EADDRNOTAVAIL;
961 goto out_free;
962 }
963
964 /* Port speed */
965 switch (cb0->port_speed) {
966 case H_SPEED_10M_H:
967 port->port_speed = EHEA_SPEED_10M;
968 port->full_duplex = 0;
969 break;
970 case H_SPEED_10M_F:
971 port->port_speed = EHEA_SPEED_10M;
972 port->full_duplex = 1;
973 break;
974 case H_SPEED_100M_H:
975 port->port_speed = EHEA_SPEED_100M;
976 port->full_duplex = 0;
977 break;
978 case H_SPEED_100M_F:
979 port->port_speed = EHEA_SPEED_100M;
980 port->full_duplex = 1;
981 break;
982 case H_SPEED_1G_F:
983 port->port_speed = EHEA_SPEED_1G;
984 port->full_duplex = 1;
985 break;
986 case H_SPEED_10G_F:
987 port->port_speed = EHEA_SPEED_10G;
988 port->full_duplex = 1;
989 break;
990 default:
991 port->port_speed = 0;
992 port->full_duplex = 0;
993 break;
994 }
995
e919b593 996 port->autoneg = 1;
18604c54 997 port->num_mcs = cb0->num_default_qps;
e919b593 998
7a291083 999 /* Number of default QPs */
18604c54
JBT
1000 if (use_mcs)
1001 port->num_def_qps = cb0->num_default_qps;
1002 else
1003 port->num_def_qps = 1;
7a291083
JBT
1004
1005 if (!port->num_def_qps) {
1006 ret = -EINVAL;
1007 goto out_free;
1008 }
1009
18604c54
JBT
1010 port->num_tx_qps = num_tx_qps;
1011
1012 if (port->num_def_qps >= port->num_tx_qps)
7a291083
JBT
1013 port->num_add_tx_qps = 0;
1014 else
18604c54 1015 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
7a291083
JBT
1016
1017 ret = 0;
1018out_free:
1019 if (ret || netif_msg_probe(port))
1020 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
3faf2693 1021 free_page((unsigned long)cb0);
7a291083
JBT
1022out:
1023 return ret;
1024}
1025
1026int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1027{
1028 struct hcp_ehea_port_cb4 *cb4;
1029 u64 hret;
1030 int ret = 0;
1031
3faf2693 1032 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1033 if (!cb4) {
1034 ehea_error("no mem for cb4");
1035 ret = -ENOMEM;
1036 goto out;
1037 }
1038
1039 cb4->port_speed = port_speed;
1040
1041 netif_carrier_off(port->netdev);
1042
1043 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1044 port->logical_port_id,
1045 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1046 if (hret == H_SUCCESS) {
1047 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1048
1049 hret = ehea_h_query_ehea_port(port->adapter->handle,
1050 port->logical_port_id,
1051 H_PORT_CB4, H_PORT_CB4_SPEED,
1052 cb4);
1053 if (hret == H_SUCCESS) {
1054 switch (cb4->port_speed) {
1055 case H_SPEED_10M_H:
1056 port->port_speed = EHEA_SPEED_10M;
1057 port->full_duplex = 0;
1058 break;
1059 case H_SPEED_10M_F:
1060 port->port_speed = EHEA_SPEED_10M;
1061 port->full_duplex = 1;
1062 break;
1063 case H_SPEED_100M_H:
1064 port->port_speed = EHEA_SPEED_100M;
1065 port->full_duplex = 0;
1066 break;
1067 case H_SPEED_100M_F:
1068 port->port_speed = EHEA_SPEED_100M;
1069 port->full_duplex = 1;
1070 break;
1071 case H_SPEED_1G_F:
1072 port->port_speed = EHEA_SPEED_1G;
1073 port->full_duplex = 1;
1074 break;
1075 case H_SPEED_10G_F:
1076 port->port_speed = EHEA_SPEED_10G;
1077 port->full_duplex = 1;
1078 break;
1079 default:
1080 port->port_speed = 0;
1081 port->full_duplex = 0;
1082 break;
1083 }
1084 } else {
1085 ehea_error("Failed sensing port speed");
1086 ret = -EIO;
1087 }
1088 } else {
1089 if (hret == H_AUTHORITY) {
7674a588 1090 ehea_info("Hypervisor denied setting port speed");
7a291083
JBT
1091 ret = -EPERM;
1092 } else {
1093 ret = -EIO;
1094 ehea_error("Failed setting port speed");
1095 }
1096 }
8759cf76
JBT
1097 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1098 netif_carrier_on(port->netdev);
1099
3faf2693 1100 free_page((unsigned long)cb4);
7a291083
JBT
1101out:
1102 return ret;
1103}
1104
1105static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1106{
1107 int ret;
1108 u8 ec;
1109 u8 portnum;
1110 struct ehea_port *port;
1111
1112 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1113 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1114 port = ehea_get_port(adapter, portnum);
1115
1116 switch (ec) {
1117 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1118
1119 if (!port) {
1120 ehea_error("unknown portnum %x", portnum);
1121 break;
1122 }
1123
1124 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1125 if (!netif_carrier_ok(port->netdev)) {
1e1675cc 1126 ret = ehea_sense_port_attr(port);
7a291083
JBT
1127 if (ret) {
1128 ehea_error("failed resensing port "
1129 "attributes");
1130 break;
1131 }
1132
1133 if (netif_msg_link(port))
1134 ehea_info("%s: Logical port up: %dMbps "
1135 "%s Duplex",
1136 port->netdev->name,
1137 port->port_speed,
1138 port->full_duplex ==
1139 1 ? "Full" : "Half");
1140
1141 netif_carrier_on(port->netdev);
1142 netif_wake_queue(port->netdev);
1143 }
1144 } else
1145 if (netif_carrier_ok(port->netdev)) {
1146 if (netif_msg_link(port))
1147 ehea_info("%s: Logical port down",
1148 port->netdev->name);
1149 netif_carrier_off(port->netdev);
1150 netif_stop_queue(port->netdev);
1151 }
1152
1153 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
8759cf76 1154 port->phy_link = EHEA_PHY_LINK_UP;
7a291083
JBT
1155 if (netif_msg_link(port))
1156 ehea_info("%s: Physical port up",
1157 port->netdev->name);
8759cf76
JBT
1158 if (prop_carrier_state)
1159 netif_carrier_on(port->netdev);
7a291083 1160 } else {
8759cf76 1161 port->phy_link = EHEA_PHY_LINK_DOWN;
7a291083
JBT
1162 if (netif_msg_link(port))
1163 ehea_info("%s: Physical port down",
1164 port->netdev->name);
8759cf76
JBT
1165 if (prop_carrier_state)
1166 netif_carrier_off(port->netdev);
7a291083
JBT
1167 }
1168
1169 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1170 ehea_info("External switch port is primary port");
1171 else
1172 ehea_info("External switch port is backup port");
1173
1174 break;
1175 case EHEA_EC_ADAPTER_MALFUNC:
1176 ehea_error("Adapter malfunction");
1177 break;
1178 case EHEA_EC_PORT_MALFUNC:
1179 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1180 netif_carrier_off(port->netdev);
1181 netif_stop_queue(port->netdev);
1182 break;
1183 default:
a1c5a893 1184 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
7a291083
JBT
1185 break;
1186 }
1187}
1188
1189static void ehea_neq_tasklet(unsigned long data)
1190{
508d2b5d 1191 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
7a291083
JBT
1192 struct ehea_eqe *eqe;
1193 u64 event_mask;
1194
1195 eqe = ehea_poll_eq(adapter->neq);
1196 ehea_debug("eqe=%p", eqe);
1197
1198 while (eqe) {
1199 ehea_debug("*eqe=%lx", eqe->entry);
1200 ehea_parse_eqe(adapter, eqe->entry);
1201 eqe = ehea_poll_eq(adapter->neq);
1202 ehea_debug("next eqe=%p", eqe);
1203 }
1204
1205 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1206 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1207 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1208
1209 ehea_h_reset_events(adapter->handle,
1210 adapter->neq->fw_handle, event_mask);
1211}
1212
7d12e780 1213static irqreturn_t ehea_interrupt_neq(int irq, void *param)
7a291083
JBT
1214{
1215 struct ehea_adapter *adapter = param;
1216 tasklet_hi_schedule(&adapter->neq_tasklet);
1217 return IRQ_HANDLED;
1218}
1219
1220
1221static int ehea_fill_port_res(struct ehea_port_res *pr)
1222{
1223 int ret;
1224 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1225
e2878806
TK
1226 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1227 - init_attr->act_nr_rwqes_rq2
1228 - init_attr->act_nr_rwqes_rq3 - 1);
7a291083 1229
e2878806 1230 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
7a291083
JBT
1231
1232 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1233
1234 return ret;
1235}
1236
1237static int ehea_reg_interrupts(struct net_device *dev)
1238{
1239 struct ehea_port *port = netdev_priv(dev);
1240 struct ehea_port_res *pr;
1241 int i, ret;
1242
7a291083
JBT
1243
1244 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1245 dev->name);
1246
6b08f3ae 1247 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
7a291083 1248 ehea_qp_aff_irq_handler,
38515e90 1249 IRQF_DISABLED, port->int_aff_name, port);
7a291083
JBT
1250 if (ret) {
1251 ehea_error("failed registering irq for qp_aff_irq_handler:"
1252 "ist=%X", port->qp_eq->attr.ist1);
1253 goto out_free_qpeq;
1254 }
1255
1256 if (netif_msg_ifup(port))
1257 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1258 "registered", port->qp_eq->attr.ist1);
1259
18604c54 1260
7a291083
JBT
1261 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1262 pr = &port->port_res[i];
1263 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
18604c54 1264 "%s-queue%d", dev->name, i);
6b08f3ae 1265 ret = ibmebus_request_irq(pr->eq->attr.ist1,
18604c54 1266 ehea_recv_irq_handler,
38515e90 1267 IRQF_DISABLED, pr->int_send_name,
7a291083
JBT
1268 pr);
1269 if (ret) {
18604c54 1270 ehea_error("failed registering irq for ehea_queue "
7a291083 1271 "port_res_nr:%d, ist=%X", i,
18604c54 1272 pr->eq->attr.ist1);
7a291083
JBT
1273 goto out_free_req;
1274 }
1275 if (netif_msg_ifup(port))
18604c54
JBT
1276 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1277 "%d registered", pr->eq->attr.ist1, i);
7a291083
JBT
1278 }
1279out:
1280 return ret;
1281
18604c54 1282
7a291083
JBT
1283out_free_req:
1284 while (--i >= 0) {
18604c54 1285 u32 ist = port->port_res[i].eq->attr.ist1;
6b08f3ae 1286 ibmebus_free_irq(ist, &port->port_res[i]);
7a291083 1287 }
18604c54 1288
7a291083 1289out_free_qpeq:
6b08f3ae 1290 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
7a291083 1291 i = port->num_def_qps;
18604c54 1292
7a291083 1293 goto out;
18604c54 1294
7a291083
JBT
1295}
1296
1297static void ehea_free_interrupts(struct net_device *dev)
1298{
1299 struct ehea_port *port = netdev_priv(dev);
1300 struct ehea_port_res *pr;
1301 int i;
1302
1303 /* send */
18604c54 1304
7a291083
JBT
1305 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1306 pr = &port->port_res[i];
6b08f3ae 1307 ibmebus_free_irq(pr->eq->attr.ist1, pr);
7a291083
JBT
1308 if (netif_msg_intr(port))
1309 ehea_info("free send irq for res %d with handle 0x%X",
18604c54 1310 i, pr->eq->attr.ist1);
7a291083
JBT
1311 }
1312
1313 /* associated events */
6b08f3ae 1314 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
7a291083
JBT
1315 if (netif_msg_intr(port))
1316 ehea_info("associated event interrupt for handle 0x%X freed",
1317 port->qp_eq->attr.ist1);
1318}
1319
1320static int ehea_configure_port(struct ehea_port *port)
1321{
1322 int ret, i;
1323 u64 hret, mask;
1324 struct hcp_ehea_port_cb0 *cb0;
1325
1326 ret = -ENOMEM;
3faf2693 1327 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1328 if (!cb0)
1329 goto out;
1330
1331 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1332 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1333 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1334 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1335 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1336 PXLY_RC_VLAN_FILTER)
1337 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1338
18604c54
JBT
1339 for (i = 0; i < port->num_mcs; i++)
1340 if (use_mcs)
1341 cb0->default_qpn_arr[i] =
1342 port->port_res[i].qp->init_attr.qp_nr;
1343 else
1344 cb0->default_qpn_arr[i] =
1345 port->port_res[0].qp->init_attr.qp_nr;
e542aa6b 1346
7a291083
JBT
1347 if (netif_msg_ifup(port))
1348 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1349
1350 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1351 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1352
1353 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1354 port->logical_port_id,
1355 H_PORT_CB0, mask, cb0);
1356 ret = -EIO;
1357 if (hret != H_SUCCESS)
1358 goto out_free;
1359
1360 ret = 0;
1361
1362out_free:
3faf2693 1363 free_page((unsigned long)cb0);
7a291083
JBT
1364out:
1365 return ret;
1366}
1367
e542aa6b 1368int ehea_gen_smrs(struct ehea_port_res *pr)
7a291083 1369{
e542aa6b 1370 int ret;
7a291083
JBT
1371 struct ehea_adapter *adapter = pr->port->adapter;
1372
e542aa6b
JBT
1373 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1374 if (ret)
7a291083
JBT
1375 goto out;
1376
e542aa6b
JBT
1377 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1378 if (ret)
1379 goto out_free;
7a291083
JBT
1380
1381 return 0;
1382
e542aa6b
JBT
1383out_free:
1384 ehea_rem_mr(&pr->send_mr);
7a291083 1385out:
e542aa6b 1386 ehea_error("Generating SMRS failed\n");
7a291083
JBT
1387 return -EIO;
1388}
1389
e542aa6b 1390int ehea_rem_smrs(struct ehea_port_res *pr)
7a291083 1391{
8e95a202
JP
1392 if ((ehea_rem_mr(&pr->send_mr)) ||
1393 (ehea_rem_mr(&pr->recv_mr)))
e542aa6b
JBT
1394 return -EIO;
1395 else
1396 return 0;
7a291083
JBT
1397}
1398
1399static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1400{
508d2b5d 1401 int arr_size = sizeof(void *) * max_q_entries;
7a291083
JBT
1402
1403 q_skba->arr = vmalloc(arr_size);
1404 if (!q_skba->arr)
1405 return -ENOMEM;
1406
1407 memset(q_skba->arr, 0, arr_size);
1408
1409 q_skba->len = max_q_entries;
1410 q_skba->index = 0;
1411 q_skba->os_skbs = 0;
1412
1413 return 0;
1414}
1415
1416static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1417 struct port_res_cfg *pr_cfg, int queue_token)
1418{
1419 struct ehea_adapter *adapter = port->adapter;
1420 enum ehea_eq_type eq_type = EHEA_EQ;
1421 struct ehea_qp_init_attr *init_attr = NULL;
1422 int ret = -EIO;
1423
1424 memset(pr, 0, sizeof(struct ehea_port_res));
1425
1426 pr->port = port;
7a291083
JBT
1427 spin_lock_init(&pr->xmit_lock);
1428 spin_lock_init(&pr->netif_queue);
1429
18604c54
JBT
1430 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1431 if (!pr->eq) {
1432 ehea_error("create_eq failed (eq)");
7a291083
JBT
1433 goto out_free;
1434 }
1435
1436 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
18604c54 1437 pr->eq->fw_handle,
7a291083
JBT
1438 port->logical_port_id);
1439 if (!pr->recv_cq) {
1440 ehea_error("create_cq failed (cq_recv)");
1441 goto out_free;
1442 }
1443
1444 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
18604c54 1445 pr->eq->fw_handle,
7a291083
JBT
1446 port->logical_port_id);
1447 if (!pr->send_cq) {
1448 ehea_error("create_cq failed (cq_send)");
1449 goto out_free;
1450 }
1451
1452 if (netif_msg_ifup(port))
1453 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1454 pr->send_cq->attr.act_nr_of_cqes,
1455 pr->recv_cq->attr.act_nr_of_cqes);
1456
1457 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1458 if (!init_attr) {
1459 ret = -ENOMEM;
1460 ehea_error("no mem for ehea_qp_init_attr");
1461 goto out_free;
1462 }
1463
1464 init_attr->low_lat_rq1 = 1;
1465 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1466 init_attr->rq_count = 3;
1467 init_attr->qp_token = queue_token;
1468 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1469 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1470 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1471 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1472 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1473 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1474 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1475 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1476 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1477 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1478 init_attr->port_nr = port->logical_port_id;
1479 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1480 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1481 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1482
1483 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1484 if (!pr->qp) {
1485 ehea_error("create_qp failed");
1486 ret = -EIO;
1487 goto out_free;
1488 }
1489
1490 if (netif_msg_ifup(port))
1491 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1492 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1493 init_attr->act_nr_send_wqes,
1494 init_attr->act_nr_rwqes_rq1,
1495 init_attr->act_nr_rwqes_rq2,
1496 init_attr->act_nr_rwqes_rq3);
1497
44fb3126
TK
1498 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1499
1500 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
7a291083
JBT
1501 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1502 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1503 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1504 if (ret)
1505 goto out_free;
1506
1507 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1508 if (ehea_gen_smrs(pr) != 0) {
1509 ret = -EIO;
1510 goto out_free;
1511 }
18604c54 1512
7a291083
JBT
1513 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1514
1515 kfree(init_attr);
18604c54 1516
bea3348e 1517 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
18604c54 1518
d4dc4ec9
JBT
1519 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1520 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1521 pr->lro_mgr.lro_arr = pr->lro_desc;
1522 pr->lro_mgr.get_skb_header = get_skb_hdr;
1523 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1524 pr->lro_mgr.dev = port->netdev;
1525 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1526 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1527
7a291083
JBT
1528 ret = 0;
1529 goto out;
1530
1531out_free:
1532 kfree(init_attr);
1533 vfree(pr->sq_skba.arr);
1534 vfree(pr->rq1_skba.arr);
1535 vfree(pr->rq2_skba.arr);
1536 vfree(pr->rq3_skba.arr);
1537 ehea_destroy_qp(pr->qp);
1538 ehea_destroy_cq(pr->send_cq);
1539 ehea_destroy_cq(pr->recv_cq);
18604c54 1540 ehea_destroy_eq(pr->eq);
7a291083
JBT
1541out:
1542 return ret;
1543}
1544
1545static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1546{
1547 int ret, i;
1548
357eb46d
HH
1549 if (pr->qp)
1550 netif_napi_del(&pr->napi);
1551
7a291083
JBT
1552 ret = ehea_destroy_qp(pr->qp);
1553
1554 if (!ret) {
1555 ehea_destroy_cq(pr->send_cq);
1556 ehea_destroy_cq(pr->recv_cq);
18604c54 1557 ehea_destroy_eq(pr->eq);
7a291083
JBT
1558
1559 for (i = 0; i < pr->rq1_skba.len; i++)
1560 if (pr->rq1_skba.arr[i])
1561 dev_kfree_skb(pr->rq1_skba.arr[i]);
1562
1563 for (i = 0; i < pr->rq2_skba.len; i++)
1564 if (pr->rq2_skba.arr[i])
1565 dev_kfree_skb(pr->rq2_skba.arr[i]);
1566
1567 for (i = 0; i < pr->rq3_skba.len; i++)
1568 if (pr->rq3_skba.arr[i])
1569 dev_kfree_skb(pr->rq3_skba.arr[i]);
1570
1571 for (i = 0; i < pr->sq_skba.len; i++)
1572 if (pr->sq_skba.arr[i])
1573 dev_kfree_skb(pr->sq_skba.arr[i]);
1574
1575 vfree(pr->rq1_skba.arr);
1576 vfree(pr->rq2_skba.arr);
1577 vfree(pr->rq3_skba.arr);
1578 vfree(pr->sq_skba.arr);
1579 ret = ehea_rem_smrs(pr);
1580 }
1581 return ret;
1582}
1583
1584/*
1585 * The write_* functions store information in swqe which is used by
1586 * the hardware to calculate the ip/tcp/udp checksum
1587 */
1588
1589static inline void write_ip_start_end(struct ehea_swqe *swqe,
1590 const struct sk_buff *skb)
1591{
eddc9ec5 1592 swqe->ip_start = skb_network_offset(skb);
c9bdd4b5 1593 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
7a291083
JBT
1594}
1595
1596static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1597 const struct sk_buff *skb)
1598{
1599 swqe->tcp_offset =
1600 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1601
1602 swqe->tcp_end = (u16)skb->len - 1;
1603}
1604
1605static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1606 const struct sk_buff *skb)
1607{
1608 swqe->tcp_offset =
1609 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1610
1611 swqe->tcp_end = (u16)skb->len - 1;
1612}
1613
1614
1615static void write_swqe2_TSO(struct sk_buff *skb,
1616 struct ehea_swqe *swqe, u32 lkey)
1617{
1618 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1619 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1620 int skb_data_size = skb->len - skb->data_len;
1621 int headersize;
7a291083
JBT
1622
1623 /* Packet is TCP with TSO enabled */
1624 swqe->tx_control |= EHEA_SWQE_TSO;
1625 swqe->mss = skb_shinfo(skb)->gso_size;
1626 /* copy only eth/ip/tcp headers to immediate data and
1627 * the rest of skb->data to sg1entry
1628 */
ab6a5bb6 1629 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
7a291083
JBT
1630
1631 skb_data_size = skb->len - skb->data_len;
1632
1633 if (skb_data_size >= headersize) {
1634 /* copy immediate data */
d626f62b 1635 skb_copy_from_linear_data(skb, imm_data, headersize);
7a291083
JBT
1636 swqe->immediate_data_length = headersize;
1637
1638 if (skb_data_size > headersize) {
1639 /* set sg1entry data */
1640 sg1entry->l_key = lkey;
1641 sg1entry->len = skb_data_size - headersize;
44a5b3d5
TK
1642 sg1entry->vaddr =
1643 ehea_map_vaddr(skb->data + headersize);
7a291083
JBT
1644 swqe->descriptors++;
1645 }
1646 } else
1647 ehea_error("cannot handle fragmented headers");
1648}
1649
1650static void write_swqe2_nonTSO(struct sk_buff *skb,
1651 struct ehea_swqe *swqe, u32 lkey)
1652{
1653 int skb_data_size = skb->len - skb->data_len;
1654 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1655 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
7a291083
JBT
1656
1657 /* Packet is any nonTSO type
1658 *
1659 * Copy as much as possible skb->data to immediate data and
1660 * the rest to sg1entry
1661 */
1662 if (skb_data_size >= SWQE2_MAX_IMM) {
1663 /* copy immediate data */
d626f62b 1664 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
7a291083
JBT
1665
1666 swqe->immediate_data_length = SWQE2_MAX_IMM;
1667
1668 if (skb_data_size > SWQE2_MAX_IMM) {
1669 /* copy sg1entry data */
1670 sg1entry->l_key = lkey;
1671 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
44a5b3d5
TK
1672 sg1entry->vaddr =
1673 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
7a291083
JBT
1674 swqe->descriptors++;
1675 }
1676 } else {
d626f62b 1677 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
7a291083
JBT
1678 swqe->immediate_data_length = skb_data_size;
1679 }
1680}
1681
1682static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1683 struct ehea_swqe *swqe, u32 lkey)
1684{
1685 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1686 skb_frag_t *frag;
1687 int nfrags, sg1entry_contains_frag_data, i;
7a291083
JBT
1688
1689 nfrags = skb_shinfo(skb)->nr_frags;
1690 sg1entry = &swqe->u.immdata_desc.sg_entry;
508d2b5d 1691 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
7a291083
JBT
1692 swqe->descriptors = 0;
1693 sg1entry_contains_frag_data = 0;
1694
1695 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1696 write_swqe2_TSO(skb, swqe, lkey);
1697 else
1698 write_swqe2_nonTSO(skb, swqe, lkey);
1699
1700 /* write descriptors */
1701 if (nfrags > 0) {
1702 if (swqe->descriptors == 0) {
1703 /* sg1entry not yet used */
1704 frag = &skb_shinfo(skb)->frags[0];
1705
1706 /* copy sg1entry data */
1707 sg1entry->l_key = lkey;
1708 sg1entry->len = frag->size;
44a5b3d5
TK
1709 sg1entry->vaddr =
1710 ehea_map_vaddr(page_address(frag->page)
1711 + frag->page_offset);
7a291083
JBT
1712 swqe->descriptors++;
1713 sg1entry_contains_frag_data = 1;
1714 }
1715
1716 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1717
1718 frag = &skb_shinfo(skb)->frags[i];
1719 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1720
1721 sgentry->l_key = lkey;
1722 sgentry->len = frag->size;
44a5b3d5
TK
1723 sgentry->vaddr =
1724 ehea_map_vaddr(page_address(frag->page)
1725 + frag->page_offset);
7a291083
JBT
1726 swqe->descriptors++;
1727 }
1728 }
1729}
1730
1731static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1732{
1733 int ret = 0;
1734 u64 hret;
1735 u8 reg_type;
1736
1737 /* De/Register untagged packets */
1738 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1739 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1740 port->logical_port_id,
1741 reg_type, port->mac_addr, 0, hcallid);
1742 if (hret != H_SUCCESS) {
f9e29228 1743 ehea_error("%sregistering bc address failed (tagged)",
508d2b5d 1744 hcallid == H_REG_BCMC ? "" : "de");
7a291083
JBT
1745 ret = -EIO;
1746 goto out_herr;
1747 }
1748
1749 /* De/Register VLAN packets */
1750 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1751 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1752 port->logical_port_id,
1753 reg_type, port->mac_addr, 0, hcallid);
1754 if (hret != H_SUCCESS) {
f9e29228
TK
1755 ehea_error("%sregistering bc address failed (vlan)",
1756 hcallid == H_REG_BCMC ? "" : "de");
7a291083
JBT
1757 ret = -EIO;
1758 }
1759out_herr:
1760 return ret;
1761}
1762
1763static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1764{
1765 struct ehea_port *port = netdev_priv(dev);
1766 struct sockaddr *mac_addr = sa;
1767 struct hcp_ehea_port_cb0 *cb0;
1768 int ret;
1769 u64 hret;
1770
1771 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1772 ret = -EADDRNOTAVAIL;
1773 goto out;
1774 }
1775
3faf2693 1776 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1777 if (!cb0) {
1778 ehea_error("no mem for cb0");
1779 ret = -ENOMEM;
1780 goto out;
1781 }
1782
1783 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1784
1785 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1786
1787 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1788 port->logical_port_id, H_PORT_CB0,
1789 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1790 if (hret != H_SUCCESS) {
1791 ret = -EIO;
1792 goto out_free;
1793 }
1794
1795 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1796
1797 /* Deregister old MAC in pHYP */
00aaea2f
JBT
1798 if (port->state == EHEA_PORT_UP) {
1799 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1800 if (ret)
1801 goto out_upregs;
1802 }
7a291083
JBT
1803
1804 port->mac_addr = cb0->port_mac_addr << 16;
1805
1806 /* Register new MAC in pHYP */
00aaea2f
JBT
1807 if (port->state == EHEA_PORT_UP) {
1808 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1809 if (ret)
1810 goto out_upregs;
1811 }
7a291083
JBT
1812
1813 ret = 0;
21eee2dd
TK
1814
1815out_upregs:
1816 ehea_update_bcmc_registrations();
7a291083 1817out_free:
3faf2693 1818 free_page((unsigned long)cb0);
7a291083
JBT
1819out:
1820 return ret;
1821}
1822
1823static void ehea_promiscuous_error(u64 hret, int enable)
1824{
7674a588
TK
1825 if (hret == H_AUTHORITY)
1826 ehea_info("Hypervisor denied %sabling promiscuous mode",
1827 enable == 1 ? "en" : "dis");
1828 else
1829 ehea_error("failed %sabling promiscuous mode",
1830 enable == 1 ? "en" : "dis");
7a291083
JBT
1831}
1832
1833static void ehea_promiscuous(struct net_device *dev, int enable)
1834{
1835 struct ehea_port *port = netdev_priv(dev);
1836 struct hcp_ehea_port_cb7 *cb7;
1837 u64 hret;
1838
1839 if ((enable && port->promisc) || (!enable && !port->promisc))
1840 return;
1841
3faf2693 1842 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
7a291083
JBT
1843 if (!cb7) {
1844 ehea_error("no mem for cb7");
1845 goto out;
1846 }
1847
1848 /* Modify Pxs_DUCQPN in CB7 */
1849 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1850
1851 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1852 port->logical_port_id,
1853 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1854 if (hret) {
1855 ehea_promiscuous_error(hret, enable);
1856 goto out;
1857 }
1858
1859 port->promisc = enable;
1860out:
3faf2693 1861 free_page((unsigned long)cb7);
7a291083
JBT
1862 return;
1863}
1864
1865static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1866 u32 hcallid)
1867{
1868 u64 hret;
1869 u8 reg_type;
1870
1871 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1872 | EHEA_BCMC_UNTAGGED;
1873
1874 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1875 port->logical_port_id,
1876 reg_type, mc_mac_addr, 0, hcallid);
1877 if (hret)
1878 goto out;
1879
1880 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1881 | EHEA_BCMC_VLANID_ALL;
1882
1883 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1884 port->logical_port_id,
1885 reg_type, mc_mac_addr, 0, hcallid);
1886out:
1887 return hret;
1888}
1889
1890static int ehea_drop_multicast_list(struct net_device *dev)
1891{
1892 struct ehea_port *port = netdev_priv(dev);
1893 struct ehea_mc_list *mc_entry = port->mc_list;
1894 struct list_head *pos;
1895 struct list_head *temp;
1896 int ret = 0;
1897 u64 hret;
1898
1899 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1900 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1901
1902 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1903 H_DEREG_BCMC);
1904 if (hret) {
1905 ehea_error("failed deregistering mcast MAC");
1906 ret = -EIO;
1907 }
1908
1909 list_del(pos);
1910 kfree(mc_entry);
1911 }
1912 return ret;
1913}
1914
1915static void ehea_allmulti(struct net_device *dev, int enable)
1916{
1917 struct ehea_port *port = netdev_priv(dev);
1918 u64 hret;
1919
1920 if (!port->allmulti) {
1921 if (enable) {
1922 /* Enable ALLMULTI */
1923 ehea_drop_multicast_list(dev);
1924 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1925 if (!hret)
1926 port->allmulti = 1;
1927 else
1928 ehea_error("failed enabling IFF_ALLMULTI");
1929 }
1930 } else
1931 if (!enable) {
1932 /* Disable ALLMULTI */
1933 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1934 if (!hret)
1935 port->allmulti = 0;
1936 else
1937 ehea_error("failed disabling IFF_ALLMULTI");
1938 }
1939}
1940
508d2b5d 1941static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
7a291083
JBT
1942{
1943 struct ehea_mc_list *ehea_mcl_entry;
1944 u64 hret;
1945
1e1675cc 1946 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
7a291083
JBT
1947 if (!ehea_mcl_entry) {
1948 ehea_error("no mem for mcl_entry");
1949 return;
1950 }
1951
1952 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1953
1954 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1955
1956 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1957 H_REG_BCMC);
1958 if (!hret)
1959 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1960 else {
1961 ehea_error("failed registering mcast MAC");
1962 kfree(ehea_mcl_entry);
1963 }
1964}
1965
1966static void ehea_set_multicast_list(struct net_device *dev)
1967{
1968 struct ehea_port *port = netdev_priv(dev);
1969 struct dev_mc_list *k_mcl_entry;
48e2f183 1970 int ret;
7a291083
JBT
1971
1972 if (dev->flags & IFF_PROMISC) {
1973 ehea_promiscuous(dev, 1);
1974 return;
1975 }
1976 ehea_promiscuous(dev, 0);
1977
1978 if (dev->flags & IFF_ALLMULTI) {
1979 ehea_allmulti(dev, 1);
21eee2dd 1980 goto out;
7a291083
JBT
1981 }
1982 ehea_allmulti(dev, 0);
1983
4cd24eaf 1984 if (!netdev_mc_empty(dev)) {
7a291083
JBT
1985 ret = ehea_drop_multicast_list(dev);
1986 if (ret) {
1987 /* Dropping the current multicast list failed.
1988 * Enabling ALL_MULTI is the best we can do.
1989 */
1990 ehea_allmulti(dev, 1);
1991 }
1992
4cd24eaf 1993 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
a1c5a893 1994 ehea_info("Mcast registration limit reached (0x%llx). "
7a291083
JBT
1995 "Use ALLMULTI!",
1996 port->adapter->max_mc_mac);
1997 goto out;
1998 }
1999
48e2f183 2000 netdev_for_each_mc_addr(k_mcl_entry, dev)
7a291083 2001 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
508d2b5d 2002
7a291083
JBT
2003 }
2004out:
21eee2dd 2005 ehea_update_bcmc_registrations();
7a291083
JBT
2006 return;
2007}
2008
2009static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2010{
2011 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2012 return -EINVAL;
2013 dev->mtu = new_mtu;
2014 return 0;
2015}
2016
2017static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2018 struct ehea_swqe *swqe, u32 lkey)
2019{
2020 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5 2021 const struct iphdr *iph = ip_hdr(skb);
d1d25aab 2022
7a291083
JBT
2023 /* IPv4 */
2024 swqe->tx_control |= EHEA_SWQE_CRC
2025 | EHEA_SWQE_IP_CHECKSUM
2026 | EHEA_SWQE_TCP_CHECKSUM
2027 | EHEA_SWQE_IMM_DATA_PRESENT
2028 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2029
2030 write_ip_start_end(swqe, skb);
2031
eddc9ec5 2032 if (iph->protocol == IPPROTO_UDP) {
8e95a202
JP
2033 if ((iph->frag_off & IP_MF) ||
2034 (iph->frag_off & IP_OFFSET))
7a291083
JBT
2035 /* IP fragment, so don't change cs */
2036 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2037 else
2038 write_udp_offset_end(swqe, skb);
eddc9ec5 2039 } else if (iph->protocol == IPPROTO_TCP) {
7a291083
JBT
2040 write_tcp_offset_end(swqe, skb);
2041 }
2042
2043 /* icmp (big data) and ip segmentation packets (all other ip
2044 packets) do not require any special handling */
2045
2046 } else {
2047 /* Other Ethernet Protocol */
2048 swqe->tx_control |= EHEA_SWQE_CRC
2049 | EHEA_SWQE_IMM_DATA_PRESENT
2050 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2051 }
2052
2053 write_swqe2_data(skb, dev, swqe, lkey);
2054}
2055
2056static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2057 struct ehea_swqe *swqe)
2058{
2059 int nfrags = skb_shinfo(skb)->nr_frags;
2060 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2061 skb_frag_t *frag;
2062 int i;
2063
2064 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5 2065 const struct iphdr *iph = ip_hdr(skb);
d1d25aab 2066
7a291083
JBT
2067 /* IPv4 */
2068 write_ip_start_end(swqe, skb);
2069
eddc9ec5 2070 if (iph->protocol == IPPROTO_TCP) {
7a291083
JBT
2071 swqe->tx_control |= EHEA_SWQE_CRC
2072 | EHEA_SWQE_IP_CHECKSUM
2073 | EHEA_SWQE_TCP_CHECKSUM
2074 | EHEA_SWQE_IMM_DATA_PRESENT;
2075
2076 write_tcp_offset_end(swqe, skb);
2077
eddc9ec5 2078 } else if (iph->protocol == IPPROTO_UDP) {
8e95a202
JP
2079 if ((iph->frag_off & IP_MF) ||
2080 (iph->frag_off & IP_OFFSET))
7a291083
JBT
2081 /* IP fragment, so don't change cs */
2082 swqe->tx_control |= EHEA_SWQE_CRC
2083 | EHEA_SWQE_IMM_DATA_PRESENT;
2084 else {
2085 swqe->tx_control |= EHEA_SWQE_CRC
2086 | EHEA_SWQE_IP_CHECKSUM
2087 | EHEA_SWQE_TCP_CHECKSUM
2088 | EHEA_SWQE_IMM_DATA_PRESENT;
2089
2090 write_udp_offset_end(swqe, skb);
2091 }
2092 } else {
2093 /* icmp (big data) and
2094 ip segmentation packets (all other ip packets) */
2095 swqe->tx_control |= EHEA_SWQE_CRC
2096 | EHEA_SWQE_IP_CHECKSUM
2097 | EHEA_SWQE_IMM_DATA_PRESENT;
2098 }
2099 } else {
2100 /* Other Ethernet Protocol */
2101 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2102 }
2103 /* copy (immediate) data */
2104 if (nfrags == 0) {
2105 /* data is in a single piece */
d626f62b 2106 skb_copy_from_linear_data(skb, imm_data, skb->len);
7a291083
JBT
2107 } else {
2108 /* first copy data from the skb->data buffer ... */
d626f62b
ACM
2109 skb_copy_from_linear_data(skb, imm_data,
2110 skb->len - skb->data_len);
7a291083
JBT
2111 imm_data += skb->len - skb->data_len;
2112
2113 /* ... then copy data from the fragments */
2114 for (i = 0; i < nfrags; i++) {
2115 frag = &skb_shinfo(skb)->frags[i];
2116 memcpy(imm_data,
2117 page_address(frag->page) + frag->page_offset,
2118 frag->size);
2119 imm_data += frag->size;
2120 }
2121 }
2122 swqe->immediate_data_length = skb->len;
2123 dev_kfree_skb(skb);
2124}
2125
18604c54
JBT
2126static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2127{
2128 struct tcphdr *tcp;
2129 u32 tmp;
2130
2131 if ((skb->protocol == htons(ETH_P_IP)) &&
88ca2d07 2132 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
508d2b5d
DM
2133 tcp = (struct tcphdr *)(skb_network_header(skb) +
2134 (ip_hdr(skb)->ihl * 4));
18604c54 2135 tmp = (tcp->source + (tcp->dest << 16)) % 31;
88ca2d07 2136 tmp += ip_hdr(skb)->daddr % 31;
18604c54 2137 return tmp % num_qps;
508d2b5d 2138 } else
18604c54
JBT
2139 return 0;
2140}
2141
7a291083
JBT
2142static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2143{
2144 struct ehea_port *port = netdev_priv(dev);
2145 struct ehea_swqe *swqe;
2146 unsigned long flags;
2147 u32 lkey;
2148 int swqe_index;
18604c54
JBT
2149 struct ehea_port_res *pr;
2150
2151 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2152
18604c54
JBT
2153 if (!spin_trylock(&pr->xmit_lock))
2154 return NETDEV_TX_BUSY;
2155
2156 if (pr->queue_stopped) {
2157 spin_unlock(&pr->xmit_lock);
2158 return NETDEV_TX_BUSY;
2159 }
7a291083
JBT
2160
2161 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2162 memset(swqe, 0, SWQE_HEADER_SIZE);
2163 atomic_dec(&pr->swqe_avail);
2164
2165 if (skb->len <= SWQE3_MAX_IMM) {
2166 u32 sig_iv = port->sig_comp_iv;
2167 u32 swqe_num = pr->swqe_id_counter;
2168 ehea_xmit3(skb, dev, swqe);
2169 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2170 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2171 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2172 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2173 sig_iv);
2174 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2175 pr->swqe_ll_count = 0;
2176 } else
2177 pr->swqe_ll_count += 1;
2178 } else {
2179 swqe->wr_id =
2180 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2181 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
acbddb59 2182 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
7a291083
JBT
2183 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2184 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2185
2186 pr->sq_skba.index++;
2187 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2188
2189 lkey = pr->send_mr.lkey;
2190 ehea_xmit2(skb, dev, swqe, lkey);
acbddb59 2191 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
7a291083
JBT
2192 }
2193 pr->swqe_id_counter += 1;
2194
2195 if (port->vgrp && vlan_tx_tag_present(skb)) {
2196 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2197 swqe->vlan_tag = vlan_tx_tag_get(skb);
2198 }
2199
2200 if (netif_msg_tx_queued(port)) {
2201 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
bff0a55f 2202 ehea_dump(swqe, 512, "swqe");
7a291083
JBT
2203 }
2204
2c69448b
JBT
2205 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2206 netif_stop_queue(dev);
2207 swqe->tx_control |= EHEA_SWQE_PURGE;
2208 }
44c82152 2209
7a291083 2210 ehea_post_swqe(pr->qp, swqe);
7393b87c 2211 pr->tx_packets++;
7a291083
JBT
2212
2213 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2214 spin_lock_irqsave(&pr->netif_queue, flags);
2215 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
acbddb59 2216 pr->p_stats.queue_stopped++;
7a291083
JBT
2217 netif_stop_queue(dev);
2218 pr->queue_stopped = 1;
2219 }
2220 spin_unlock_irqrestore(&pr->netif_queue, flags);
2221 }
2222 dev->trans_start = jiffies;
2223 spin_unlock(&pr->xmit_lock);
2c69448b 2224
7a291083
JBT
2225 return NETDEV_TX_OK;
2226}
2227
2228static void ehea_vlan_rx_register(struct net_device *dev,
2229 struct vlan_group *grp)
2230{
2231 struct ehea_port *port = netdev_priv(dev);
2232 struct ehea_adapter *adapter = port->adapter;
2233 struct hcp_ehea_port_cb1 *cb1;
2234 u64 hret;
2235
2236 port->vgrp = grp;
2237
3faf2693 2238 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2239 if (!cb1) {
2240 ehea_error("no mem for cb1");
2241 goto out;
2242 }
2243
7a291083
JBT
2244 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2245 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2246 if (hret != H_SUCCESS)
2247 ehea_error("modify_ehea_port failed");
2248
3faf2693 2249 free_page((unsigned long)cb1);
7a291083
JBT
2250out:
2251 return;
2252}
2253
2254static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2255{
2256 struct ehea_port *port = netdev_priv(dev);
2257 struct ehea_adapter *adapter = port->adapter;
2258 struct hcp_ehea_port_cb1 *cb1;
2259 int index;
2260 u64 hret;
2261
3faf2693 2262 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2263 if (!cb1) {
2264 ehea_error("no mem for cb1");
2265 goto out;
2266 }
2267
2268 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2269 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2270 if (hret != H_SUCCESS) {
2271 ehea_error("query_ehea_port failed");
2272 goto out;
2273 }
2274
2275 index = (vid / 64);
dec590c1 2276 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
7a291083
JBT
2277
2278 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2279 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2280 if (hret != H_SUCCESS)
2281 ehea_error("modify_ehea_port failed");
2282out:
3faf2693 2283 free_page((unsigned long)cb1);
7a291083
JBT
2284 return;
2285}
2286
2287static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2288{
2289 struct ehea_port *port = netdev_priv(dev);
2290 struct ehea_adapter *adapter = port->adapter;
2291 struct hcp_ehea_port_cb1 *cb1;
2292 int index;
2293 u64 hret;
2294
5c15bdec 2295 vlan_group_set_device(port->vgrp, vid, NULL);
7a291083 2296
3faf2693 2297 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2298 if (!cb1) {
2299 ehea_error("no mem for cb1");
2300 goto out;
2301 }
2302
2303 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2304 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2305 if (hret != H_SUCCESS) {
2306 ehea_error("query_ehea_port failed");
2307 goto out;
2308 }
2309
2310 index = (vid / 64);
dec590c1 2311 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
7a291083
JBT
2312
2313 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2314 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2315 if (hret != H_SUCCESS)
2316 ehea_error("modify_ehea_port failed");
2317out:
3faf2693 2318 free_page((unsigned long)cb1);
7a291083
JBT
2319 return;
2320}
2321
2322int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2323{
2324 int ret = -EIO;
2325 u64 hret;
2326 u16 dummy16 = 0;
2327 u64 dummy64 = 0;
508d2b5d 2328 struct hcp_modify_qp_cb0 *cb0;
7a291083 2329
3faf2693 2330 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2331 if (!cb0) {
2332 ret = -ENOMEM;
2333 goto out;
2334 }
2335
2336 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2337 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2338 if (hret != H_SUCCESS) {
2339 ehea_error("query_ehea_qp failed (1)");
2340 goto out;
2341 }
2342
2343 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2344 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2345 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2346 &dummy64, &dummy64, &dummy16, &dummy16);
2347 if (hret != H_SUCCESS) {
2348 ehea_error("modify_ehea_qp failed (1)");
2349 goto out;
2350 }
2351
2352 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2353 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2354 if (hret != H_SUCCESS) {
2355 ehea_error("query_ehea_qp failed (2)");
2356 goto out;
2357 }
2358
2359 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2360 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2361 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2362 &dummy64, &dummy64, &dummy16, &dummy16);
2363 if (hret != H_SUCCESS) {
2364 ehea_error("modify_ehea_qp failed (2)");
2365 goto out;
2366 }
2367
2368 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2369 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2370 if (hret != H_SUCCESS) {
2371 ehea_error("query_ehea_qp failed (3)");
2372 goto out;
2373 }
2374
2375 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2376 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2377 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2378 &dummy64, &dummy64, &dummy16, &dummy16);
2379 if (hret != H_SUCCESS) {
2380 ehea_error("modify_ehea_qp failed (3)");
2381 goto out;
2382 }
2383
2384 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2385 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2386 if (hret != H_SUCCESS) {
2387 ehea_error("query_ehea_qp failed (4)");
2388 goto out;
2389 }
2390
2391 ret = 0;
2392out:
3faf2693 2393 free_page((unsigned long)cb0);
7a291083
JBT
2394 return ret;
2395}
2396
2397static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2398 int add_tx_qps)
2399{
2400 int ret, i;
2401 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2402 enum ehea_eq_type eq_type = EHEA_EQ;
2403
2404 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2405 EHEA_MAX_ENTRIES_EQ, 1);
2406 if (!port->qp_eq) {
2407 ret = -EINVAL;
2408 ehea_error("ehea_create_eq failed (qp_eq)");
2409 goto out_kill_eq;
2410 }
2411
2412 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
18604c54 2413 pr_cfg.max_entries_scq = sq_entries * 2;
7a291083
JBT
2414 pr_cfg.max_entries_sq = sq_entries;
2415 pr_cfg.max_entries_rq1 = rq1_entries;
2416 pr_cfg.max_entries_rq2 = rq2_entries;
2417 pr_cfg.max_entries_rq3 = rq3_entries;
2418
2419 pr_cfg_small_rx.max_entries_rcq = 1;
2420 pr_cfg_small_rx.max_entries_scq = sq_entries;
2421 pr_cfg_small_rx.max_entries_sq = sq_entries;
2422 pr_cfg_small_rx.max_entries_rq1 = 1;
2423 pr_cfg_small_rx.max_entries_rq2 = 1;
2424 pr_cfg_small_rx.max_entries_rq3 = 1;
2425
2426 for (i = 0; i < def_qps; i++) {
2427 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2428 if (ret)
2429 goto out_clean_pr;
2430 }
2431 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2432 ret = ehea_init_port_res(port, &port->port_res[i],
2433 &pr_cfg_small_rx, i);
2434 if (ret)
2435 goto out_clean_pr;
2436 }
2437
2438 return 0;
2439
2440out_clean_pr:
2441 while (--i >= 0)
2442 ehea_clean_portres(port, &port->port_res[i]);
2443
2444out_kill_eq:
2445 ehea_destroy_eq(port->qp_eq);
2446 return ret;
2447}
2448
2449static int ehea_clean_all_portres(struct ehea_port *port)
2450{
2451 int ret = 0;
2452 int i;
2453
508d2b5d 2454 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
7a291083
JBT
2455 ret |= ehea_clean_portres(port, &port->port_res[i]);
2456
2457 ret |= ehea_destroy_eq(port->qp_eq);
2458
2459 return ret;
2460}
2461
35cf2e2e 2462static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
1211bb6d 2463{
35cf2e2e
TK
2464 if (adapter->active_ports)
2465 return;
1211bb6d
TK
2466
2467 ehea_rem_mr(&adapter->mr);
2468}
2469
35cf2e2e 2470static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
1211bb6d 2471{
35cf2e2e
TK
2472 if (adapter->active_ports)
2473 return 0;
1211bb6d
TK
2474
2475 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2476}
2477
7a291083
JBT
2478static int ehea_up(struct net_device *dev)
2479{
2480 int ret, i;
2481 struct ehea_port *port = netdev_priv(dev);
7a291083
JBT
2482
2483 if (port->state == EHEA_PORT_UP)
2484 return 0;
2485
2486 ret = ehea_port_res_setup(port, port->num_def_qps,
2487 port->num_add_tx_qps);
2488 if (ret) {
2489 ehea_error("port_res_failed");
2490 goto out;
2491 }
2492
2493 /* Set default QP for this port */
2494 ret = ehea_configure_port(port);
2495 if (ret) {
2496 ehea_error("ehea_configure_port failed. ret:%d", ret);
2497 goto out_clean_pr;
2498 }
2499
7a291083
JBT
2500 ret = ehea_reg_interrupts(dev);
2501 if (ret) {
f9e29228
TK
2502 ehea_error("reg_interrupts failed. ret:%d", ret);
2503 goto out_clean_pr;
7a291083
JBT
2504 }
2505
508d2b5d 2506 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
7a291083
JBT
2507 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2508 if (ret) {
2509 ehea_error("activate_qp failed");
2510 goto out_free_irqs;
2511 }
2512 }
2513
508d2b5d 2514 for (i = 0; i < port->num_def_qps; i++) {
7a291083
JBT
2515 ret = ehea_fill_port_res(&port->port_res[i]);
2516 if (ret) {
2517 ehea_error("out_free_irqs");
2518 goto out_free_irqs;
2519 }
2520 }
2521
21eee2dd
TK
2522 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2523 if (ret) {
2524 ret = -EIO;
2525 goto out_free_irqs;
2526 }
2527
7a291083 2528 port->state = EHEA_PORT_UP;
21eee2dd
TK
2529
2530 ret = 0;
7a291083
JBT
2531 goto out;
2532
2533out_free_irqs:
2534 ehea_free_interrupts(dev);
2535
7a291083
JBT
2536out_clean_pr:
2537 ehea_clean_all_portres(port);
2538out:
44c82152
TK
2539 if (ret)
2540 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2541
21eee2dd 2542 ehea_update_bcmc_registrations();
21eee2dd 2543 ehea_update_firmware_handles();
21eee2dd 2544
7a291083
JBT
2545 return ret;
2546}
2547
bea3348e
SH
2548static void port_napi_disable(struct ehea_port *port)
2549{
2550 int i;
2551
0173b793 2552 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
bea3348e
SH
2553 napi_disable(&port->port_res[i].napi);
2554}
2555
2556static void port_napi_enable(struct ehea_port *port)
2557{
2558 int i;
2559
0173b793 2560 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
bea3348e
SH
2561 napi_enable(&port->port_res[i].napi);
2562}
2563
7a291083
JBT
2564static int ehea_open(struct net_device *dev)
2565{
2566 int ret;
2567 struct ehea_port *port = netdev_priv(dev);
2568
a5af6ad3 2569 mutex_lock(&port->port_lock);
7a291083
JBT
2570
2571 if (netif_msg_ifup(port))
2572 ehea_info("enabling port %s", dev->name);
2573
2574 ret = ehea_up(dev);
bea3348e
SH
2575 if (!ret) {
2576 port_napi_enable(port);
7a291083 2577 netif_start_queue(dev);
bea3348e 2578 }
7a291083 2579
a5af6ad3 2580 mutex_unlock(&port->port_lock);
7a291083
JBT
2581
2582 return ret;
2583}
2584
2585static int ehea_down(struct net_device *dev)
2586{
bea3348e 2587 int ret;
7a291083
JBT
2588 struct ehea_port *port = netdev_priv(dev);
2589
2590 if (port->state == EHEA_PORT_DOWN)
2591 return 0;
2592
2593 ehea_drop_multicast_list(dev);
21eee2dd
TK
2594 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2595
7a291083
JBT
2596 ehea_free_interrupts(dev);
2597
7a291083 2598 port->state = EHEA_PORT_DOWN;
44c82152 2599
21eee2dd 2600 ehea_update_bcmc_registrations();
21eee2dd 2601
44c82152
TK
2602 ret = ehea_clean_all_portres(port);
2603 if (ret)
2604 ehea_info("Failed freeing resources for %s. ret=%i",
2605 dev->name, ret);
2606
21eee2dd 2607 ehea_update_firmware_handles();
21eee2dd 2608
7a291083
JBT
2609 return ret;
2610}
2611
2612static int ehea_stop(struct net_device *dev)
2613{
2614 int ret;
2615 struct ehea_port *port = netdev_priv(dev);
2616
2617 if (netif_msg_ifdown(port))
2618 ehea_info("disabling port %s", dev->name);
2619
2f69ae01 2620 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
4bb073c0 2621 cancel_work_sync(&port->reset_task);
a5af6ad3 2622 mutex_lock(&port->port_lock);
7a291083 2623 netif_stop_queue(dev);
0173b793 2624 port_napi_disable(port);
7a291083 2625 ret = ehea_down(dev);
a5af6ad3 2626 mutex_unlock(&port->port_lock);
2f69ae01 2627 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
7a291083
JBT
2628 return ret;
2629}
2630
22559c5d 2631static void ehea_purge_sq(struct ehea_qp *orig_qp)
2c69448b
JBT
2632{
2633 struct ehea_qp qp = *orig_qp;
2634 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2635 struct ehea_swqe *swqe;
2636 int wqe_index;
2637 int i;
2638
2639 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2640 swqe = ehea_get_swqe(&qp, &wqe_index);
2641 swqe->tx_control |= EHEA_SWQE_PURGE;
2642 }
2643}
2644
22559c5d 2645static void ehea_flush_sq(struct ehea_port *port)
44fb3126
TK
2646{
2647 int i;
2648
2649 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2650 struct ehea_port_res *pr = &port->port_res[i];
2651 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2652 int k = 0;
2653 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2654 msleep(5);
2655 if (++k == 20)
2656 break;
2657 }
2658 }
2659}
2660
2c69448b
JBT
2661int ehea_stop_qps(struct net_device *dev)
2662{
2663 struct ehea_port *port = netdev_priv(dev);
2664 struct ehea_adapter *adapter = port->adapter;
508d2b5d 2665 struct hcp_modify_qp_cb0 *cb0;
2c69448b
JBT
2666 int ret = -EIO;
2667 int dret;
2668 int i;
2669 u64 hret;
2670 u64 dummy64 = 0;
2671 u16 dummy16 = 0;
2672
3faf2693 2673 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2c69448b
JBT
2674 if (!cb0) {
2675 ret = -ENOMEM;
2676 goto out;
2677 }
2678
2679 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2680 struct ehea_port_res *pr = &port->port_res[i];
2681 struct ehea_qp *qp = pr->qp;
2682
2683 /* Purge send queue */
2684 ehea_purge_sq(qp);
2685
2686 /* Disable queue pair */
2687 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2688 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2689 cb0);
2690 if (hret != H_SUCCESS) {
2691 ehea_error("query_ehea_qp failed (1)");
2692 goto out;
2693 }
2694
2695 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2696 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2697
2698 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2699 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2700 1), cb0, &dummy64,
2701 &dummy64, &dummy16, &dummy16);
2702 if (hret != H_SUCCESS) {
2703 ehea_error("modify_ehea_qp failed (1)");
2704 goto out;
2705 }
2706
2707 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2708 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2709 cb0);
2710 if (hret != H_SUCCESS) {
2711 ehea_error("query_ehea_qp failed (2)");
2712 goto out;
2713 }
2714
2715 /* deregister shared memory regions */
2716 dret = ehea_rem_smrs(pr);
2717 if (dret) {
2718 ehea_error("unreg shared memory region failed");
2719 goto out;
2720 }
2721 }
2722
2723 ret = 0;
2724out:
3faf2693 2725 free_page((unsigned long)cb0);
2c69448b
JBT
2726
2727 return ret;
2728}
2729
508d2b5d 2730void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2c69448b
JBT
2731{
2732 struct ehea_qp qp = *orig_qp;
2733 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2734 struct ehea_rwqe *rwqe;
2735 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2736 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2737 struct sk_buff *skb;
2738 u32 lkey = pr->recv_mr.lkey;
2739
2740
2741 int i;
2742 int index;
2743
2744 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2745 rwqe = ehea_get_next_rwqe(&qp, 2);
2746 rwqe->sg_list[0].l_key = lkey;
2747 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2748 skb = skba_rq2[index];
2749 if (skb)
2750 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2751 }
2752
2753 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2754 rwqe = ehea_get_next_rwqe(&qp, 3);
2755 rwqe->sg_list[0].l_key = lkey;
2756 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2757 skb = skba_rq3[index];
2758 if (skb)
2759 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2760 }
2761}
2762
2763int ehea_restart_qps(struct net_device *dev)
2764{
2765 struct ehea_port *port = netdev_priv(dev);
2766 struct ehea_adapter *adapter = port->adapter;
2767 int ret = 0;
2768 int i;
2769
508d2b5d 2770 struct hcp_modify_qp_cb0 *cb0;
2c69448b
JBT
2771 u64 hret;
2772 u64 dummy64 = 0;
2773 u16 dummy16 = 0;
2774
3faf2693 2775 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2c69448b
JBT
2776 if (!cb0) {
2777 ret = -ENOMEM;
2778 goto out;
2779 }
2780
2781 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2782 struct ehea_port_res *pr = &port->port_res[i];
2783 struct ehea_qp *qp = pr->qp;
2784
2785 ret = ehea_gen_smrs(pr);
2786 if (ret) {
2787 ehea_error("creation of shared memory regions failed");
2788 goto out;
2789 }
2790
2791 ehea_update_rqs(qp, pr);
2792
2793 /* Enable queue pair */
2794 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2795 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2796 cb0);
2797 if (hret != H_SUCCESS) {
2798 ehea_error("query_ehea_qp failed (1)");
2799 goto out;
2800 }
2801
2802 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2803 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2804
2805 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2806 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2807 1), cb0, &dummy64,
2808 &dummy64, &dummy16, &dummy16);
2809 if (hret != H_SUCCESS) {
2810 ehea_error("modify_ehea_qp failed (1)");
2811 goto out;
2812 }
2813
2814 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2815 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2816 cb0);
2817 if (hret != H_SUCCESS) {
2818 ehea_error("query_ehea_qp failed (2)");
2819 goto out;
2820 }
2821
2822 /* refill entire queue */
2823 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2824 ehea_refill_rq2(pr, 0);
2825 ehea_refill_rq3(pr, 0);
2826 }
2827out:
3faf2693 2828 free_page((unsigned long)cb0);
2c69448b
JBT
2829
2830 return ret;
2831}
2832
c4028958 2833static void ehea_reset_port(struct work_struct *work)
7a291083
JBT
2834{
2835 int ret;
c4028958
DH
2836 struct ehea_port *port =
2837 container_of(work, struct ehea_port, reset_task);
2838 struct net_device *dev = port->netdev;
7a291083
JBT
2839
2840 port->resets++;
a5af6ad3 2841 mutex_lock(&port->port_lock);
7a291083 2842 netif_stop_queue(dev);
bea3348e
SH
2843
2844 port_napi_disable(port);
7a291083 2845
44c82152 2846 ehea_down(dev);
7a291083
JBT
2847
2848 ret = ehea_up(dev);
44c82152 2849 if (ret)
7a291083 2850 goto out;
7a291083 2851
2c69448b
JBT
2852 ehea_set_multicast_list(dev);
2853
7a291083
JBT
2854 if (netif_msg_timer(port))
2855 ehea_info("Device %s resetted successfully", dev->name);
2856
bea3348e
SH
2857 port_napi_enable(port);
2858
7a291083
JBT
2859 netif_wake_queue(dev);
2860out:
a5af6ad3 2861 mutex_unlock(&port->port_lock);
7a291083
JBT
2862 return;
2863}
2864
44c82152
TK
2865static void ehea_rereg_mrs(struct work_struct *work)
2866{
2867 int ret, i;
2868 struct ehea_adapter *adapter;
2869
06f89edf 2870 mutex_lock(&dlpar_mem_lock);
d4f12daf 2871 ehea_info("LPAR memory changed - re-initializing driver");
44c82152
TK
2872
2873 list_for_each_entry(adapter, &adapter_list, list)
2874 if (adapter->active_ports) {
2875 /* Shutdown all ports */
2876 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2877 struct ehea_port *port = adapter->port[i];
a5af6ad3 2878 struct net_device *dev;
44c82152 2879
a5af6ad3
DW
2880 if (!port)
2881 continue;
44c82152 2882
a5af6ad3
DW
2883 dev = port->netdev;
2884
2885 if (dev->flags & IFF_UP) {
2886 mutex_lock(&port->port_lock);
2887 netif_stop_queue(dev);
df39e8ba 2888 ehea_flush_sq(port);
a5af6ad3
DW
2889 ret = ehea_stop_qps(dev);
2890 if (ret) {
2891 mutex_unlock(&port->port_lock);
2892 goto out;
44c82152 2893 }
a5af6ad3
DW
2894 port_napi_disable(port);
2895 mutex_unlock(&port->port_lock);
44c82152
TK
2896 }
2897 }
2898
2899 /* Unregister old memory region */
2900 ret = ehea_rem_mr(&adapter->mr);
2901 if (ret) {
2902 ehea_error("unregister MR failed - driver"
2903 " inoperable!");
2904 goto out;
2905 }
2906 }
2907
44c82152
TK
2908 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2909
2910 list_for_each_entry(adapter, &adapter_list, list)
2911 if (adapter->active_ports) {
2912 /* Register new memory region */
2913 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2914 if (ret) {
2915 ehea_error("register MR failed - driver"
2916 " inoperable!");
2917 goto out;
2918 }
2919
2920 /* Restart all ports */
2921 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2922 struct ehea_port *port = adapter->port[i];
2923
2924 if (port) {
2925 struct net_device *dev = port->netdev;
2926
2927 if (dev->flags & IFF_UP) {
a5af6ad3 2928 mutex_lock(&port->port_lock);
2c69448b
JBT
2929 port_napi_enable(port);
2930 ret = ehea_restart_qps(dev);
2931 if (!ret)
44c82152 2932 netif_wake_queue(dev);
a5af6ad3 2933 mutex_unlock(&port->port_lock);
44c82152
TK
2934 }
2935 }
2936 }
2937 }
68905eb4 2938 ehea_info("re-initializing driver complete");
44c82152 2939out:
68905eb4 2940 mutex_unlock(&dlpar_mem_lock);
44c82152
TK
2941 return;
2942}
2943
7a291083
JBT
2944static void ehea_tx_watchdog(struct net_device *dev)
2945{
2946 struct ehea_port *port = netdev_priv(dev);
2947
2c69448b
JBT
2948 if (netif_carrier_ok(dev) &&
2949 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2f69ae01 2950 ehea_schedule_port_reset(port);
7a291083
JBT
2951}
2952
2953int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2954{
2955 struct hcp_query_ehea *cb;
2956 u64 hret;
2957 int ret;
2958
3faf2693 2959 cb = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2960 if (!cb) {
2961 ret = -ENOMEM;
2962 goto out;
2963 }
2964
2965 hret = ehea_h_query_ehea(adapter->handle, cb);
2966
2967 if (hret != H_SUCCESS) {
2968 ret = -EIO;
2969 goto out_herr;
2970 }
2971
7a291083
JBT
2972 adapter->max_mc_mac = cb->max_mc_mac - 1;
2973 ret = 0;
2974
2975out_herr:
3faf2693 2976 free_page((unsigned long)cb);
7a291083
JBT
2977out:
2978 return ret;
2979}
2980
1acf2318 2981int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
7a291083 2982{
7a291083 2983 struct hcp_ehea_port_cb4 *cb4;
1acf2318
JBT
2984 u64 hret;
2985 int ret = 0;
7a291083 2986
1acf2318 2987 *jumbo = 0;
7a291083 2988
1acf2318 2989 /* (Try to) enable *jumbo frames */
3faf2693 2990 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2991 if (!cb4) {
2992 ehea_error("no mem for cb4");
1acf2318
JBT
2993 ret = -ENOMEM;
2994 goto out;
7a291083 2995 } else {
1acf2318 2996 hret = ehea_h_query_ehea_port(port->adapter->handle,
9c750b7d
TK
2997 port->logical_port_id,
2998 H_PORT_CB4,
2999 H_PORT_CB4_JUMBO, cb4);
9c750b7d
TK
3000 if (hret == H_SUCCESS) {
3001 if (cb4->jumbo_frame)
1acf2318 3002 *jumbo = 1;
9c750b7d
TK
3003 else {
3004 cb4->jumbo_frame = 1;
1acf2318
JBT
3005 hret = ehea_h_modify_ehea_port(port->adapter->
3006 handle,
9c750b7d 3007 port->
1acf2318 3008 logical_port_id,
9c750b7d
TK
3009 H_PORT_CB4,
3010 H_PORT_CB4_JUMBO,
3011 cb4);
3012 if (hret == H_SUCCESS)
1acf2318 3013 *jumbo = 1;
9c750b7d 3014 }
1acf2318
JBT
3015 } else
3016 ret = -EINVAL;
3017
3faf2693 3018 free_page((unsigned long)cb4);
7a291083 3019 }
1acf2318
JBT
3020out:
3021 return ret;
3022}
3023
3024static ssize_t ehea_show_port_id(struct device *dev,
3025 struct device_attribute *attr, char *buf)
3026{
3027 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
a8e34fda 3028 return sprintf(buf, "%d", port->logical_port_id);
1acf2318
JBT
3029}
3030
3031static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3032 NULL);
3033
3034static void __devinit logical_port_release(struct device *dev)
3035{
3036 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3037 of_node_put(port->ofdev.node);
3038}
3039
3040static struct device *ehea_register_port(struct ehea_port *port,
3041 struct device_node *dn)
3042{
3043 int ret;
3044
3045 port->ofdev.node = of_node_get(dn);
6b08f3ae 3046 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
d1dea38d 3047 port->ofdev.dev.bus = &ibmebus_bus_type;
1acf2318 3048
db1d7bf7 3049 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
1acf2318
JBT
3050 port->ofdev.dev.release = logical_port_release;
3051
3052 ret = of_device_register(&port->ofdev);
3053 if (ret) {
3054 ehea_error("failed to register device. ret=%d", ret);
3055 goto out;
3056 }
3057
3058 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
d1d25aab 3059 if (ret) {
1acf2318
JBT
3060 ehea_error("failed to register attributes, ret=%d", ret);
3061 goto out_unreg_of_dev;
3062 }
e542aa6b 3063
1acf2318
JBT
3064 return &port->ofdev.dev;
3065
3066out_unreg_of_dev:
3067 of_device_unregister(&port->ofdev);
3068out:
3069 return NULL;
3070}
3071
3072static void ehea_unregister_port(struct ehea_port *port)
3073{
3074 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3075 of_device_unregister(&port->ofdev);
3076}
3077
086c1b2c
TK
3078static const struct net_device_ops ehea_netdev_ops = {
3079 .ndo_open = ehea_open,
3080 .ndo_stop = ehea_stop,
3081 .ndo_start_xmit = ehea_start_xmit,
3082#ifdef CONFIG_NET_POLL_CONTROLLER
3083 .ndo_poll_controller = ehea_netpoll,
3084#endif
3085 .ndo_get_stats = ehea_get_stats,
3086 .ndo_set_mac_address = ehea_set_mac_addr,
240c102d 3087 .ndo_validate_addr = eth_validate_addr,
086c1b2c
TK
3088 .ndo_set_multicast_list = ehea_set_multicast_list,
3089 .ndo_change_mtu = ehea_change_mtu,
3090 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3091 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
32e8f9a8
AB
3092 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3093 .ndo_tx_timeout = ehea_tx_watchdog,
086c1b2c
TK
3094};
3095
1acf2318
JBT
3096struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3097 u32 logical_port_id,
3098 struct device_node *dn)
3099{
3100 int ret;
3101 struct net_device *dev;
3102 struct ehea_port *port;
3103 struct device *port_dev;
3104 int jumbo;
3105
3106 /* allocate memory for the port structures */
3107 dev = alloc_etherdev(sizeof(struct ehea_port));
3108
3109 if (!dev) {
3110 ehea_error("no mem for net_device");
3111 ret = -ENOMEM;
3112 goto out_err;
3113 }
3114
3115 port = netdev_priv(dev);
3116
a5af6ad3 3117 mutex_init(&port->port_lock);
1acf2318
JBT
3118 port->state = EHEA_PORT_DOWN;
3119 port->sig_comp_iv = sq_entries / 10;
3120
3121 port->adapter = adapter;
3122 port->netdev = dev;
3123 port->logical_port_id = logical_port_id;
3124
3125 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3126
3127 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3128 if (!port->mc_list) {
3129 ret = -ENOMEM;
3130 goto out_free_ethdev;
3131 }
3132
3133 INIT_LIST_HEAD(&port->mc_list->list);
3134
3135 ret = ehea_sense_port_attr(port);
3136 if (ret)
3137 goto out_free_mc_list;
3138
3139 port_dev = ehea_register_port(port, dn);
3140 if (!port_dev)
3141 goto out_free_mc_list;
3142
3143 SET_NETDEV_DEV(dev, port_dev);
7a291083
JBT
3144
3145 /* initialize net_device structure */
7a291083
JBT
3146 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3147
086c1b2c
TK
3148 dev->netdev_ops = &ehea_netdev_ops;
3149 ehea_set_ethtool_ops(dev);
3150
7a291083 3151 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
dc01c447 3152 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
7a291083
JBT
3153 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3154 | NETIF_F_LLTX;
7a291083
JBT
3155 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3156
c4028958 3157 INIT_WORK(&port->reset_task, ehea_reset_port);
7a291083
JBT
3158
3159 ret = register_netdev(dev);
3160 if (ret) {
3161 ehea_error("register_netdev failed. ret=%d", ret);
21eee2dd 3162 goto out_unreg_port;
7a291083
JBT
3163 }
3164
d4dc4ec9
JBT
3165 port->lro_max_aggr = lro_max_aggr;
3166
1acf2318 3167 ret = ehea_get_jumboframe_status(port, &jumbo);
e542aa6b 3168 if (ret)
1acf2318
JBT
3169 ehea_error("failed determining jumbo frame status for %s",
3170 port->netdev->name);
3171
9c750b7d
TK
3172 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3173 jumbo == 1 ? "en" : "dis");
3174
44c82152
TK
3175 adapter->active_ports++;
3176
1acf2318 3177 return port;
7a291083 3178
1acf2318
JBT
3179out_unreg_port:
3180 ehea_unregister_port(port);
3181
3182out_free_mc_list:
7a291083 3183 kfree(port->mc_list);
1acf2318
JBT
3184
3185out_free_ethdev:
3186 free_netdev(dev);
3187
3188out_err:
3189 ehea_error("setting up logical port with id=%d failed, ret=%d",
3190 logical_port_id, ret);
3191 return NULL;
3192}
3193
3194static void ehea_shutdown_single_port(struct ehea_port *port)
3195{
7fb1c2ac 3196 struct ehea_adapter *adapter = port->adapter;
1acf2318
JBT
3197 unregister_netdev(port->netdev);
3198 ehea_unregister_port(port);
3199 kfree(port->mc_list);
3200 free_netdev(port->netdev);
7fb1c2ac 3201 adapter->active_ports--;
7a291083
JBT
3202}
3203
3204static int ehea_setup_ports(struct ehea_adapter *adapter)
3205{
1acf2318
JBT
3206 struct device_node *lhea_dn;
3207 struct device_node *eth_dn = NULL;
d1d25aab 3208
9f9a3b8a 3209 const u32 *dn_log_port_id;
1acf2318
JBT
3210 int i = 0;
3211
6b08f3ae 3212 lhea_dn = adapter->ofdev->node;
1eef4e04 3213 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
e542aa6b 3214
40cd3a45 3215 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
d1d25aab 3216 NULL);
1acf2318
JBT
3217 if (!dn_log_port_id) {
3218 ehea_error("bad device node: eth_dn name=%s",
3219 eth_dn->full_name);
3220 continue;
3221 }
7a291083 3222
1211bb6d
TK
3223 if (ehea_add_adapter_mr(adapter)) {
3224 ehea_error("creating MR failed");
3225 of_node_put(eth_dn);
3226 return -EIO;
3227 }
3228
1acf2318
JBT
3229 adapter->port[i] = ehea_setup_single_port(adapter,
3230 *dn_log_port_id,
3231 eth_dn);
7a291083 3232 if (adapter->port[i])
1acf2318 3233 ehea_info("%s -> logical port id #%d",
e542aa6b 3234 adapter->port[i]->netdev->name,
1acf2318 3235 *dn_log_port_id);
1211bb6d
TK
3236 else
3237 ehea_remove_adapter_mr(adapter);
3238
1acf2318 3239 i++;
1eef4e04 3240 };
1211bb6d 3241 return 0;
1acf2318
JBT
3242}
3243
e542aa6b
JBT
3244static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3245 u32 logical_port_id)
1acf2318
JBT
3246{
3247 struct device_node *lhea_dn;
3248 struct device_node *eth_dn = NULL;
9f9a3b8a 3249 const u32 *dn_log_port_id;
1acf2318 3250
6b08f3ae 3251 lhea_dn = adapter->ofdev->node;
1eef4e04 3252 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
e542aa6b 3253
40cd3a45 3254 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
d1d25aab 3255 NULL);
1acf2318
JBT
3256 if (dn_log_port_id)
3257 if (*dn_log_port_id == logical_port_id)
3258 return eth_dn;
1eef4e04 3259 };
1acf2318
JBT
3260
3261 return NULL;
3262}
3263
3264static ssize_t ehea_probe_port(struct device *dev,
3265 struct device_attribute *attr,
3266 const char *buf, size_t count)
3267{
c7ae011d 3268 struct ehea_adapter *adapter = dev_get_drvdata(dev);
1acf2318
JBT
3269 struct ehea_port *port;
3270 struct device_node *eth_dn = NULL;
3271 int i;
3272
3273 u32 logical_port_id;
3274
a8e34fda 3275 sscanf(buf, "%d", &logical_port_id);
1acf2318
JBT
3276
3277 port = ehea_get_port(adapter, logical_port_id);
3278
3279 if (port) {
3280 ehea_info("adding port with logical port id=%d failed. port "
3281 "already configured as %s.", logical_port_id,
3282 port->netdev->name);
3283 return -EINVAL;
7a291083 3284 }
e542aa6b 3285
1acf2318 3286 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
7a291083 3287
1acf2318
JBT
3288 if (!eth_dn) {
3289 ehea_info("no logical port with id %d found", logical_port_id);
3290 return -EINVAL;
3291 }
e542aa6b 3292
1211bb6d
TK
3293 if (ehea_add_adapter_mr(adapter)) {
3294 ehea_error("creating MR failed");
3295 return -EIO;
3296 }
3297
1acf2318 3298 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
7a291083 3299
1eef4e04
JBT
3300 of_node_put(eth_dn);
3301
1acf2318 3302 if (port) {
508d2b5d 3303 for (i = 0; i < EHEA_MAX_PORTS; i++)
1acf2318
JBT
3304 if (!adapter->port[i]) {
3305 adapter->port[i] = port;
3306 break;
3307 }
7a291083 3308
1acf2318
JBT
3309 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3310 logical_port_id);
1211bb6d
TK
3311 } else {
3312 ehea_remove_adapter_mr(adapter);
e542aa6b 3313 return -EIO;
1211bb6d 3314 }
7a291083 3315
1acf2318
JBT
3316 return (ssize_t) count;
3317}
3318
3319static ssize_t ehea_remove_port(struct device *dev,
3320 struct device_attribute *attr,
3321 const char *buf, size_t count)
3322{
c7ae011d 3323 struct ehea_adapter *adapter = dev_get_drvdata(dev);
1acf2318
JBT
3324 struct ehea_port *port;
3325 int i;
3326 u32 logical_port_id;
3327
a8e34fda 3328 sscanf(buf, "%d", &logical_port_id);
1acf2318
JBT
3329
3330 port = ehea_get_port(adapter, logical_port_id);
3331
3332 if (port) {
3333 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3334 logical_port_id);
3335
3336 ehea_shutdown_single_port(port);
3337
508d2b5d 3338 for (i = 0; i < EHEA_MAX_PORTS; i++)
1acf2318
JBT
3339 if (adapter->port[i] == port) {
3340 adapter->port[i] = NULL;
3341 break;
3342 }
3343 } else {
3344 ehea_error("removing port with logical port id=%d failed. port "
3345 "not configured.", logical_port_id);
3346 return -EINVAL;
3347 }
3348
1211bb6d
TK
3349 ehea_remove_adapter_mr(adapter);
3350
1acf2318
JBT
3351 return (ssize_t) count;
3352}
3353
3354static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3355static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3356
6b08f3ae 3357int ehea_create_device_sysfs(struct of_device *dev)
1acf2318 3358{
6b08f3ae 3359 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
1acf2318
JBT
3360 if (ret)
3361 goto out;
3362
6b08f3ae 3363 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
1acf2318 3364out:
7a291083
JBT
3365 return ret;
3366}
3367
6b08f3ae 3368void ehea_remove_device_sysfs(struct of_device *dev)
1acf2318 3369{
6b08f3ae
JF
3370 device_remove_file(&dev->dev, &dev_attr_probe_port);
3371 device_remove_file(&dev->dev, &dev_attr_remove_port);
1acf2318
JBT
3372}
3373
6b08f3ae 3374static int __devinit ehea_probe_adapter(struct of_device *dev,
1acf2318 3375 const struct of_device_id *id)
7a291083
JBT
3376{
3377 struct ehea_adapter *adapter;
9f9a3b8a 3378 const u64 *adapter_handle;
7a291083
JBT
3379 int ret;
3380
6b08f3ae 3381 if (!dev || !dev->node) {
1eef4e04
JBT
3382 ehea_error("Invalid ibmebus device probed");
3383 return -EINVAL;
3384 }
3385
7a291083
JBT
3386 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3387 if (!adapter) {
3388 ret = -ENOMEM;
6b08f3ae 3389 dev_err(&dev->dev, "no mem for ehea_adapter\n");
7a291083
JBT
3390 goto out;
3391 }
3392
44c82152
TK
3393 list_add(&adapter->list, &adapter_list);
3394
6b08f3ae 3395 adapter->ofdev = dev;
1acf2318 3396
6b08f3ae 3397 adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
d1d25aab 3398 NULL);
061bf3cd
TK
3399 if (adapter_handle)
3400 adapter->handle = *adapter_handle;
3401
3402 if (!adapter->handle) {
6b08f3ae
JF
3403 dev_err(&dev->dev, "failed getting handle for adapter"
3404 " '%s'\n", dev->node->full_name);
7a291083
JBT
3405 ret = -ENODEV;
3406 goto out_free_ad;
3407 }
3408
7a291083
JBT
3409 adapter->pd = EHEA_PD_ID;
3410
c7ae011d 3411 dev_set_drvdata(&dev->dev, adapter);
7a291083 3412
7a291083
JBT
3413
3414 /* initialize adapter and ports */
3415 /* get adapter properties */
3416 ret = ehea_sense_adapter_attr(adapter);
3417 if (ret) {
898eb71c 3418 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
1211bb6d 3419 goto out_free_ad;
7a291083 3420 }
7a291083
JBT
3421
3422 adapter->neq = ehea_create_eq(adapter,
3423 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3424 if (!adapter->neq) {
1eef4e04 3425 ret = -EIO;
898eb71c 3426 dev_err(&dev->dev, "NEQ creation failed\n");
1211bb6d 3427 goto out_free_ad;
7a291083
JBT
3428 }
3429
3430 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3431 (unsigned long)adapter);
3432
6b08f3ae 3433 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
38515e90 3434 ehea_interrupt_neq, IRQF_DISABLED,
7a291083
JBT
3435 "ehea_neq", adapter);
3436 if (ret) {
898eb71c 3437 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
7a291083
JBT
3438 goto out_kill_eq;
3439 }
3440
1eef4e04
JBT
3441 ret = ehea_create_device_sysfs(dev);
3442 if (ret)
3bf76b81 3443 goto out_free_irq;
1acf2318 3444
7a291083
JBT
3445 ret = ehea_setup_ports(adapter);
3446 if (ret) {
898eb71c 3447 dev_err(&dev->dev, "setup_ports failed\n");
1acf2318 3448 goto out_rem_dev_sysfs;
7a291083
JBT
3449 }
3450
3451 ret = 0;
3452 goto out;
3453
1acf2318
JBT
3454out_rem_dev_sysfs:
3455 ehea_remove_device_sysfs(dev);
3456
7a291083 3457out_free_irq:
6b08f3ae 3458 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
7a291083
JBT
3459
3460out_kill_eq:
3461 ehea_destroy_eq(adapter->neq);
3462
7a291083 3463out_free_ad:
51621fbd 3464 list_del(&adapter->list);
7a291083 3465 kfree(adapter);
21eee2dd 3466
7a291083 3467out:
21eee2dd 3468 ehea_update_firmware_handles();
52e21b1b 3469
7a291083
JBT
3470 return ret;
3471}
3472
6b08f3ae 3473static int __devexit ehea_remove(struct of_device *dev)
7a291083 3474{
c7ae011d 3475 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
7a291083
JBT
3476 int i;
3477
1acf2318 3478 for (i = 0; i < EHEA_MAX_PORTS; i++)
7a291083
JBT
3479 if (adapter->port[i]) {
3480 ehea_shutdown_single_port(adapter->port[i]);
3481 adapter->port[i] = NULL;
3482 }
1acf2318
JBT
3483
3484 ehea_remove_device_sysfs(dev);
3485
3bf76b81 3486 flush_scheduled_work();
7a291083 3487
6b08f3ae 3488 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
d4150a27 3489 tasklet_kill(&adapter->neq_tasklet);
7a291083
JBT
3490
3491 ehea_destroy_eq(adapter->neq);
1211bb6d 3492 ehea_remove_adapter_mr(adapter);
44c82152 3493 list_del(&adapter->list);
7a291083 3494 kfree(adapter);
44c82152 3495
21eee2dd 3496 ehea_update_firmware_handles();
21eee2dd 3497
7a291083
JBT
3498 return 0;
3499}
3500
21eee2dd
TK
3501void ehea_crash_handler(void)
3502{
3503 int i;
3504
3505 if (ehea_fw_handles.arr)
3506 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3507 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3508 ehea_fw_handles.arr[i].fwh,
3509 FORCE_FREE);
3510
3511 if (ehea_bcmc_regs.arr)
3512 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3513 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3514 ehea_bcmc_regs.arr[i].port_id,
3515 ehea_bcmc_regs.arr[i].reg_type,
3516 ehea_bcmc_regs.arr[i].macaddr,
3517 0, H_DEREG_BCMC);
3518}
3519
48cfb14f
HH
3520static int ehea_mem_notifier(struct notifier_block *nb,
3521 unsigned long action, void *data)
3522{
d4f12daf 3523 struct memory_notify *arg = data;
48cfb14f 3524 switch (action) {
d4f12daf
HH
3525 case MEM_CANCEL_OFFLINE:
3526 ehea_info("memory offlining canceled");
3527 /* Readd canceled memory block */
3528 case MEM_ONLINE:
3529 ehea_info("memory is going online");
3876732c 3530 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
d4f12daf
HH
3531 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3532 return NOTIFY_BAD;
3533 ehea_rereg_mrs(NULL);
3534 break;
3535 case MEM_GOING_OFFLINE:
3536 ehea_info("memory is going offline");
3876732c 3537 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
d4f12daf
HH
3538 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3539 return NOTIFY_BAD;
48cfb14f
HH
3540 ehea_rereg_mrs(NULL);
3541 break;
3542 default:
3543 break;
3544 }
52e21b1b
JBT
3545
3546 ehea_update_firmware_handles();
3547
48cfb14f
HH
3548 return NOTIFY_OK;
3549}
3550
3551static struct notifier_block ehea_mem_nb = {
3552 .notifier_call = ehea_mem_notifier,
3553};
3554
2a6f4e49
JBT
3555static int ehea_reboot_notifier(struct notifier_block *nb,
3556 unsigned long action, void *unused)
3557{
3558 if (action == SYS_RESTART) {
3559 ehea_info("Reboot: freeing all eHEA resources");
3560 ibmebus_unregister_driver(&ehea_driver);
3561 }
3562 return NOTIFY_DONE;
3563}
3564
3565static struct notifier_block ehea_reboot_nb = {
508d2b5d 3566 .notifier_call = ehea_reboot_notifier,
2a6f4e49
JBT
3567};
3568
7a291083
JBT
3569static int check_module_parm(void)
3570{
3571 int ret = 0;
3572
3573 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3574 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3575 ehea_info("Bad parameter: rq1_entries");
3576 ret = -EINVAL;
3577 }
3578 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3579 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3580 ehea_info("Bad parameter: rq2_entries");
3581 ret = -EINVAL;
3582 }
3583 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3584 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3585 ehea_info("Bad parameter: rq3_entries");
3586 ret = -EINVAL;
3587 }
3588 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3589 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3590 ehea_info("Bad parameter: sq_entries");
3591 ret = -EINVAL;
3592 }
3593
3594 return ret;
3595}
3596
4c3ca4da
JBT
3597static ssize_t ehea_show_capabilities(struct device_driver *drv,
3598 char *buf)
3599{
3600 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3601}
3602
3603static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3604 ehea_show_capabilities, NULL);
3605
7a291083
JBT
3606int __init ehea_module_init(void)
3607{
3608 int ret;
3609
3610 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3611 DRV_VERSION);
3612
44c82152
TK
3613
3614 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
21eee2dd
TK
3615 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3616 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3617
9f71a568 3618 mutex_init(&ehea_fw_handles.lock);
5c2cec14 3619 spin_lock_init(&ehea_bcmc_regs.lock);
44c82152 3620
7a291083
JBT
3621 ret = check_module_parm();
3622 if (ret)
3623 goto out;
44c82152
TK
3624
3625 ret = ehea_create_busmap();
3626 if (ret)
3627 goto out;
3628
21eee2dd
TK
3629 ret = register_reboot_notifier(&ehea_reboot_nb);
3630 if (ret)
3631 ehea_info("failed registering reboot notifier");
3632
48cfb14f
HH
3633 ret = register_memory_notifier(&ehea_mem_nb);
3634 if (ret)
3635 ehea_info("failed registering memory remove notifier");
3636
21eee2dd
TK
3637 ret = crash_shutdown_register(&ehea_crash_handler);
3638 if (ret)
3639 ehea_info("failed registering crash handler");
2a6f4e49 3640
7a291083 3641 ret = ibmebus_register_driver(&ehea_driver);
4c3ca4da 3642 if (ret) {
7a291083 3643 ehea_error("failed registering eHEA device driver on ebus");
21eee2dd 3644 goto out2;
4c3ca4da
JBT
3645 }
3646
3647 ret = driver_create_file(&ehea_driver.driver,
3648 &driver_attr_capabilities);
3649 if (ret) {
3650 ehea_error("failed to register capabilities attribute, ret=%d",
3651 ret);
21eee2dd 3652 goto out3;
4c3ca4da 3653 }
7a291083 3654
21eee2dd
TK
3655 return ret;
3656
3657out3:
3658 ibmebus_unregister_driver(&ehea_driver);
3659out2:
48cfb14f 3660 unregister_memory_notifier(&ehea_mem_nb);
21eee2dd
TK
3661 unregister_reboot_notifier(&ehea_reboot_nb);
3662 crash_shutdown_unregister(&ehea_crash_handler);
7a291083
JBT
3663out:
3664 return ret;
3665}
3666
3667static void __exit ehea_module_exit(void)
3668{
21eee2dd
TK
3669 int ret;
3670
3bf76b81 3671 flush_scheduled_work();
4c3ca4da 3672 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
7a291083 3673 ibmebus_unregister_driver(&ehea_driver);
2a6f4e49 3674 unregister_reboot_notifier(&ehea_reboot_nb);
21eee2dd
TK
3675 ret = crash_shutdown_unregister(&ehea_crash_handler);
3676 if (ret)
3677 ehea_info("failed unregistering crash handler");
48cfb14f 3678 unregister_memory_notifier(&ehea_mem_nb);
21eee2dd
TK
3679 kfree(ehea_fw_handles.arr);
3680 kfree(ehea_bcmc_regs.arr);
44c82152 3681 ehea_destroy_busmap();
7a291083
JBT
3682}
3683
3684module_init(ehea_module_init);
3685module_exit(ehea_module_exit);