2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/if_ether.h>
26 #include <linux/slab.h>
28 #include "vnic_resource.h"
29 #include "vnic_devcmd.h"
31 #include "vnic_stats.h"
33 enum vnic_proxy_type {
44 #define VNIC_DEV_CAP_INIT 0x0001
49 struct vnic_res res[RES_TYPE_MAX];
50 enum vnic_dev_intr_mode intr_mode;
51 struct vnic_devcmd __iomem *devcmd;
52 struct vnic_devcmd_notify *notify;
53 struct vnic_devcmd_notify notify_copy;
57 dma_addr_t linkstatus_pa;
58 struct vnic_stats *stats;
60 struct vnic_devcmd_fw_info *fw_info;
61 dma_addr_t fw_info_pa;
63 enum vnic_proxy_type proxy;
65 u64 args[VNIC_DEVCMD_NARGS];
68 #define VNIC_MAX_RES_HDR_SIZE \
69 (sizeof(struct vnic_resource_header) + \
70 sizeof(struct vnic_resource) * RES_TYPE_MAX)
71 #define VNIC_RES_STRIDE 128
73 void *vnic_dev_priv(struct vnic_dev *vdev)
78 static int vnic_dev_discover_res(struct vnic_dev *vdev,
79 struct vnic_dev_bar *bar, unsigned int num_bars)
81 struct vnic_resource_header __iomem *rh;
82 struct vnic_resource __iomem *r;
88 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
89 pr_err("vNIC BAR0 res hdr length error\n");
95 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
99 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
100 ioread32(&rh->version) != VNIC_RES_VERSION) {
101 pr_err("vNIC BAR0 res magic/version error "
102 "exp (%lx/%lx) curr (%x/%x)\n",
103 VNIC_RES_MAGIC, VNIC_RES_VERSION,
104 ioread32(&rh->magic), ioread32(&rh->version));
108 r = (struct vnic_resource __iomem *)(rh + 1);
110 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
112 u8 bar_num = ioread8(&r->bar);
113 u32 bar_offset = ioread32(&r->bar_offset);
114 u32 count = ioread32(&r->count);
119 if (bar_num >= num_bars)
122 if (!bar[bar_num].len || !bar[bar_num].vaddr)
129 case RES_TYPE_INTR_CTRL:
130 /* each count is stride bytes long */
131 len = count * VNIC_RES_STRIDE;
132 if (len + bar_offset > bar[bar_num].len) {
133 pr_err("vNIC BAR0 resource %d "
134 "out-of-bounds, offset 0x%x + "
135 "size 0x%x > bar len 0x%lx\n",
142 case RES_TYPE_INTR_PBA_LEGACY:
143 case RES_TYPE_DEVCMD:
150 vdev->res[type].count = count;
151 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
153 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
159 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
160 enum vnic_res_type type)
162 return vdev->res[type].count;
165 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
168 if (!vdev->res[type].vaddr)
175 case RES_TYPE_INTR_CTRL:
176 return (char __iomem *)vdev->res[type].vaddr +
177 index * VNIC_RES_STRIDE;
179 return (char __iomem *)vdev->res[type].vaddr;
183 dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev,
184 enum vnic_res_type type, unsigned int index)
190 case RES_TYPE_INTR_CTRL:
191 return vdev->res[type].bus_addr +
192 index * VNIC_RES_STRIDE;
194 return vdev->res[type].bus_addr;
198 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
199 unsigned int desc_count, unsigned int desc_size)
201 /* The base address of the desc rings must be 512 byte aligned.
202 * Descriptor count is aligned to groups of 32 descriptors. A
203 * count of 0 means the maximum 4096 descriptors. Descriptor
204 * size is aligned to 16 bytes.
207 unsigned int count_align = 32;
208 unsigned int desc_align = 16;
210 ring->base_align = 512;
215 ring->desc_count = ALIGN(desc_count, count_align);
217 ring->desc_size = ALIGN(desc_size, desc_align);
219 ring->size = ring->desc_count * ring->desc_size;
220 ring->size_unaligned = ring->size + ring->base_align;
222 return ring->size_unaligned;
225 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
227 memset(ring->descs, 0, ring->size);
230 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
231 unsigned int desc_count, unsigned int desc_size)
233 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
235 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
236 ring->size_unaligned,
237 &ring->base_addr_unaligned);
239 if (!ring->descs_unaligned) {
240 pr_err("Failed to allocate ring (size=%d), aborting\n",
245 ring->base_addr = ALIGN(ring->base_addr_unaligned,
247 ring->descs = (u8 *)ring->descs_unaligned +
248 (ring->base_addr - ring->base_addr_unaligned);
250 vnic_dev_clear_desc_ring(ring);
252 ring->desc_avail = ring->desc_count - 1;
257 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
260 pci_free_consistent(vdev->pdev,
261 ring->size_unaligned,
262 ring->descs_unaligned,
263 ring->base_addr_unaligned);
268 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
271 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
277 status = ioread32(&devcmd->status);
278 if (status & STAT_BUSY) {
279 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
283 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
284 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
285 writeq(vdev->args[i], &devcmd->args[i]);
289 iowrite32(cmd, &devcmd->cmd);
291 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
294 for (delay = 0; delay < wait; delay++) {
298 status = ioread32(&devcmd->status);
300 if (!(status & STAT_BUSY)) {
302 if (status & STAT_ERROR) {
303 err = (int)readq(&devcmd->args[0]);
304 if (err != ERR_ECMDUNKNOWN ||
305 cmd != CMD_CAPABILITY)
306 pr_err("Error %d devcmd %d\n",
311 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
313 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
314 vdev->args[i] = readq(&devcmd->args[i]);
321 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
325 static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev,
326 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
331 memset(vdev->args, 0, sizeof(vdev->args));
333 vdev->args[0] = vdev->proxy_index; /* bdf */
338 err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait);
342 status = (u32)vdev->args[0];
343 if (status & STAT_ERROR) {
344 err = (int)vdev->args[1];
345 if (err != ERR_ECMDUNKNOWN ||
346 cmd != CMD_CAPABILITY)
347 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
357 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
358 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
365 err = _vnic_dev_cmd(vdev, cmd, wait);
373 void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
375 vdev->proxy = PROXY_BY_BDF;
376 vdev->proxy_index = bdf;
379 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
381 vdev->proxy = PROXY_NONE;
382 vdev->proxy_index = 0;
385 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
386 u64 *a0, u64 *a1, int wait)
388 memset(vdev->args, 0, sizeof(vdev->args));
390 switch (vdev->proxy) {
392 return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait);
395 return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
399 static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
401 u64 a0 = (u32)cmd, a1 = 0;
405 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
410 int vnic_dev_fw_info(struct vnic_dev *vdev,
411 struct vnic_devcmd_fw_info **fw_info)
417 if (!vdev->fw_info) {
418 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
419 sizeof(struct vnic_devcmd_fw_info),
424 a0 = vdev->fw_info_pa;
426 /* only get fw_info once and cache it */
427 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
430 *fw_info = vdev->fw_info;
435 int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
437 struct vnic_devcmd_fw_info *fw_info;
440 err = vnic_dev_fw_info(vdev, &fw_info);
444 if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
445 *hw_ver = VNIC_DEV_HW_VER_A1;
446 else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
447 *hw_ver = VNIC_DEV_HW_VER_A2;
449 *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
454 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
464 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
467 case 1: *(u8 *)value = (u8)a0; break;
468 case 2: *(u16 *)value = (u16)a0; break;
469 case 4: *(u32 *)value = (u32)a0; break;
470 case 8: *(u64 *)value = a0; break;
471 default: BUG(); break;
477 int vnic_dev_stats_clear(struct vnic_dev *vdev)
481 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
484 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
490 vdev->stats = pci_alloc_consistent(vdev->pdev,
491 sizeof(struct vnic_stats), &vdev->stats_pa);
496 *stats = vdev->stats;
498 a1 = sizeof(struct vnic_stats);
500 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
503 int vnic_dev_close(struct vnic_dev *vdev)
507 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
510 int vnic_dev_enable(struct vnic_dev *vdev)
514 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
517 int vnic_dev_enable_wait(struct vnic_dev *vdev)
523 err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
524 if (err == ERR_ECMDUNKNOWN)
525 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
530 int vnic_dev_disable(struct vnic_dev *vdev)
534 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
537 int vnic_dev_open(struct vnic_dev *vdev, int arg)
539 u64 a0 = (u32)arg, a1 = 0;
541 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
544 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
552 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
561 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
563 u64 a0 = (u32)arg, a1 = 0;
565 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
568 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
576 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
585 int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
587 u64 a0 = (u32)arg, a1 = 0;
591 err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
592 if (err == ERR_ECMDUNKNOWN) {
593 err = vnic_dev_soft_reset(vdev, arg);
597 return vnic_dev_init(vdev, 0);
603 int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
611 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
613 if (err == ERR_ECMDUNKNOWN)
614 return vnic_dev_soft_reset_done(vdev, done);
623 int vnic_dev_hang_notify(struct vnic_dev *vdev)
627 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
630 int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
636 for (i = 0; i < ETH_ALEN; i++)
639 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
643 for (i = 0; i < ETH_ALEN; i++)
644 mac_addr[i] = ((u8 *)&a0)[i];
649 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
650 int broadcast, int promisc, int allmulti)
656 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
657 (multicast ? CMD_PFILTER_MULTICAST : 0) |
658 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
659 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
660 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
662 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
664 pr_err("Can't set packet filter\n");
669 int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
670 int multicast, int broadcast, int promisc, int allmulti)
676 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
677 (multicast ? CMD_PFILTER_MULTICAST : 0) |
678 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
679 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
680 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
682 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
684 pr_err("Can't set packet filter\n");
689 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
696 for (i = 0; i < ETH_ALEN; i++)
697 ((u8 *)&a0)[i] = addr[i];
699 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
701 pr_err("Can't add addr [%pM], %d\n", addr, err);
706 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
713 for (i = 0; i < ETH_ALEN; i++)
714 ((u8 *)&a0)[i] = addr[i];
716 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
718 pr_err("Can't del addr [%pM], %d\n", addr, err);
723 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
724 u8 ig_vlan_rewrite_mode)
726 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
730 err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
731 if (err == ERR_ECMDUNKNOWN)
737 int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
739 u64 a0 = intr, a1 = 0;
743 err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
745 pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
750 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
751 void *notify_addr, dma_addr_t notify_pa, u16 intr)
757 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
758 vdev->notify = notify_addr;
759 vdev->notify_pa = notify_pa;
762 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
763 a1 += sizeof(struct vnic_devcmd_notify);
765 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
766 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
770 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
773 dma_addr_t notify_pa;
775 if (vdev->notify || vdev->notify_pa) {
776 pr_err("notify block %p still allocated", vdev->notify);
780 notify_addr = pci_alloc_consistent(vdev->pdev,
781 sizeof(struct vnic_devcmd_notify),
786 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
789 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
795 a0 = 0; /* paddr = 0 to unset notify buffer */
796 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
797 a1 += sizeof(struct vnic_devcmd_notify);
799 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
807 int vnic_dev_notify_unset(struct vnic_dev *vdev)
810 pci_free_consistent(vdev->pdev,
811 sizeof(struct vnic_devcmd_notify),
816 return vnic_dev_notify_unsetcmd(vdev);
819 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
822 unsigned int nwords = vdev->notify_sz / 4;
826 if (!vdev->notify || !vdev->notify_sz)
831 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
832 words = (u32 *)&vdev->notify_copy;
833 for (i = 1; i < nwords; i++)
835 } while (csum != words[0]);
840 int vnic_dev_init(struct vnic_dev *vdev, int arg)
842 u64 a0 = (u32)arg, a1 = 0;
846 if (vdev->cap_flags & VNIC_DEV_CAP_INIT)
847 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
849 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
850 if (a0 & CMD_INITF_DEFAULT_MAC) {
851 /* Emulate these for old CMD_INIT_v1 which
852 * didn't pass a0 so no CMD_INITF_*.
854 vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
855 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
861 int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
869 ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
875 *err = (a0 == 0) ? (int)a1:0;
880 int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
888 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
892 memcpy(prov_buf, buf, len);
896 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
898 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
903 int vnic_dev_deinit(struct vnic_dev *vdev)
908 return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
911 int vnic_dev_link_status(struct vnic_dev *vdev)
913 if (vdev->linkstatus)
914 return *vdev->linkstatus;
916 if (!vnic_dev_notify_ready(vdev))
919 return vdev->notify_copy.link_state;
922 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
924 if (!vnic_dev_notify_ready(vdev))
927 return vdev->notify_copy.port_speed;
930 u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
932 if (!vnic_dev_notify_ready(vdev))
935 return vdev->notify_copy.msglvl;
938 u32 vnic_dev_mtu(struct vnic_dev *vdev)
940 if (!vnic_dev_notify_ready(vdev))
943 return vdev->notify_copy.mtu;
946 u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
948 if (!vnic_dev_notify_ready(vdev))
951 return vdev->notify_copy.link_down_cnt;
954 u32 vnic_dev_notify_status(struct vnic_dev *vdev)
956 if (!vnic_dev_notify_ready(vdev))
959 return vdev->notify_copy.status;
962 u32 vnic_dev_uif(struct vnic_dev *vdev)
964 if (!vnic_dev_notify_ready(vdev))
967 return vdev->notify_copy.uif;
970 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
971 enum vnic_dev_intr_mode intr_mode)
973 vdev->intr_mode = intr_mode;
976 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
977 struct vnic_dev *vdev)
979 return vdev->intr_mode;
982 void vnic_dev_unregister(struct vnic_dev *vdev)
986 pci_free_consistent(vdev->pdev,
987 sizeof(struct vnic_devcmd_notify),
990 if (vdev->linkstatus)
991 pci_free_consistent(vdev->pdev,
994 vdev->linkstatus_pa);
996 pci_free_consistent(vdev->pdev,
997 sizeof(struct vnic_dev),
998 vdev->stats, vdev->stats_pa);
1000 pci_free_consistent(vdev->pdev,
1001 sizeof(struct vnic_devcmd_fw_info),
1002 vdev->fw_info, vdev->fw_info_pa);
1007 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
1008 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
1009 unsigned int num_bars)
1012 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
1020 if (vnic_dev_discover_res(vdev, bar, num_bars))
1023 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1027 vdev->cap_flags = 0;
1029 if (vnic_dev_capable(vdev, CMD_INIT))
1030 vdev->cap_flags |= VNIC_DEV_CAP_INIT;
1035 vnic_dev_unregister(vdev);