vnic_dev.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/types.h>
  21. #include <linux/pci.h>
  22. #include <linux/delay.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/slab.h>
  25. #include "vnic_resource.h"
  26. #include "vnic_devcmd.h"
  27. #include "vnic_dev.h"
  28. #include "vnic_stats.h"
  29. struct vnic_res {
  30. void __iomem *vaddr;
  31. unsigned int count;
  32. };
  33. struct vnic_dev {
  34. void *priv;
  35. struct pci_dev *pdev;
  36. struct vnic_res res[RES_TYPE_MAX];
  37. enum vnic_dev_intr_mode intr_mode;
  38. struct vnic_devcmd __iomem *devcmd;
  39. struct vnic_devcmd_notify *notify;
  40. struct vnic_devcmd_notify notify_copy;
  41. dma_addr_t notify_pa;
  42. u32 *linkstatus;
  43. dma_addr_t linkstatus_pa;
  44. struct vnic_stats *stats;
  45. dma_addr_t stats_pa;
  46. struct vnic_devcmd_fw_info *fw_info;
  47. dma_addr_t fw_info_pa;
  48. };
  49. #define VNIC_MAX_RES_HDR_SIZE \
  50. (sizeof(struct vnic_resource_header) + \
  51. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  52. #define VNIC_RES_STRIDE 128
  53. void *vnic_dev_priv(struct vnic_dev *vdev)
  54. {
  55. return vdev->priv;
  56. }
  57. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  58. struct vnic_dev_bar *bar)
  59. {
  60. struct vnic_resource_header __iomem *rh;
  61. struct vnic_resource __iomem *r;
  62. u8 type;
  63. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  64. printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
  65. return -EINVAL;
  66. }
  67. rh = bar->vaddr;
  68. if (!rh) {
  69. printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
  70. return -EINVAL;
  71. }
  72. if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
  73. ioread32(&rh->version) != VNIC_RES_VERSION) {
  74. printk(KERN_ERR "vNIC BAR0 res magic/version error "
  75. "exp (%lx/%lx) curr (%x/%x)\n",
  76. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  77. ioread32(&rh->magic), ioread32(&rh->version));
  78. return -EINVAL;
  79. }
  80. r = (struct vnic_resource __iomem *)(rh + 1);
  81. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  82. u8 bar_num = ioread8(&r->bar);
  83. u32 bar_offset = ioread32(&r->bar_offset);
  84. u32 count = ioread32(&r->count);
  85. u32 len;
  86. r++;
  87. if (bar_num != 0) /* only mapping in BAR0 resources */
  88. continue;
  89. switch (type) {
  90. case RES_TYPE_WQ:
  91. case RES_TYPE_RQ:
  92. case RES_TYPE_CQ:
  93. case RES_TYPE_INTR_CTRL:
  94. /* each count is stride bytes long */
  95. len = count * VNIC_RES_STRIDE;
  96. if (len + bar_offset > bar->len) {
  97. printk(KERN_ERR "vNIC BAR0 resource %d "
  98. "out-of-bounds, offset 0x%x + "
  99. "size 0x%x > bar len 0x%lx\n",
  100. type, bar_offset,
  101. len,
  102. bar->len);
  103. return -EINVAL;
  104. }
  105. break;
  106. case RES_TYPE_INTR_PBA_LEGACY:
  107. case RES_TYPE_DEVCMD:
  108. len = count;
  109. break;
  110. default:
  111. continue;
  112. }
  113. vdev->res[type].count = count;
  114. vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
  115. }
  116. return 0;
  117. }
  118. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  119. enum vnic_res_type type)
  120. {
  121. return vdev->res[type].count;
  122. }
  123. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  124. unsigned int index)
  125. {
  126. if (!vdev->res[type].vaddr)
  127. return NULL;
  128. switch (type) {
  129. case RES_TYPE_WQ:
  130. case RES_TYPE_RQ:
  131. case RES_TYPE_CQ:
  132. case RES_TYPE_INTR_CTRL:
  133. return (char __iomem *)vdev->res[type].vaddr +
  134. index * VNIC_RES_STRIDE;
  135. default:
  136. return (char __iomem *)vdev->res[type].vaddr;
  137. }
  138. }
  139. unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  140. unsigned int desc_count,
  141. unsigned int desc_size)
  142. {
  143. /* The base address of the desc rings must be 512 byte aligned.
  144. * Descriptor count is aligned to groups of 32 descriptors. A
  145. * count of 0 means the maximum 4096 descriptors. Descriptor
  146. * size is aligned to 16 bytes.
  147. */
  148. unsigned int count_align = 32;
  149. unsigned int desc_align = 16;
  150. ring->base_align = 512;
  151. if (desc_count == 0)
  152. desc_count = 4096;
  153. ring->desc_count = ALIGN(desc_count, count_align);
  154. ring->desc_size = ALIGN(desc_size, desc_align);
  155. ring->size = ring->desc_count * ring->desc_size;
  156. ring->size_unaligned = ring->size + ring->base_align;
  157. return ring->size_unaligned;
  158. }
  159. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  160. {
  161. memset(ring->descs, 0, ring->size);
  162. }
  163. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  164. unsigned int desc_count, unsigned int desc_size)
  165. {
  166. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  167. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  168. ring->size_unaligned,
  169. &ring->base_addr_unaligned);
  170. if (!ring->descs_unaligned) {
  171. printk(KERN_ERR
  172. "Failed to allocate ring (size=%d), aborting\n",
  173. (int)ring->size);
  174. return -ENOMEM;
  175. }
  176. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  177. ring->base_align);
  178. ring->descs = (u8 *)ring->descs_unaligned +
  179. (ring->base_addr - ring->base_addr_unaligned);
  180. vnic_dev_clear_desc_ring(ring);
  181. ring->desc_avail = ring->desc_count - 1;
  182. return 0;
  183. }
  184. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  185. {
  186. if (ring->descs) {
  187. pci_free_consistent(vdev->pdev,
  188. ring->size_unaligned,
  189. ring->descs_unaligned,
  190. ring->base_addr_unaligned);
  191. ring->descs = NULL;
  192. }
  193. }
  194. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  195. u64 *a0, u64 *a1, int wait)
  196. {
  197. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  198. int delay;
  199. u32 status;
  200. int dev_cmd_err[] = {
  201. /* convert from fw's version of error.h to host's version */
  202. 0, /* ERR_SUCCESS */
  203. EINVAL, /* ERR_EINVAL */
  204. EFAULT, /* ERR_EFAULT */
  205. EPERM, /* ERR_EPERM */
  206. EBUSY, /* ERR_EBUSY */
  207. };
  208. int err;
  209. status = ioread32(&devcmd->status);
  210. if (status & STAT_BUSY) {
  211. printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
  212. return -EBUSY;
  213. }
  214. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  215. writeq(*a0, &devcmd->args[0]);
  216. writeq(*a1, &devcmd->args[1]);
  217. wmb();
  218. }
  219. iowrite32(cmd, &devcmd->cmd);
  220. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  221. return 0;
  222. for (delay = 0; delay < wait; delay++) {
  223. udelay(100);
  224. status = ioread32(&devcmd->status);
  225. if (!(status & STAT_BUSY)) {
  226. if (status & STAT_ERROR) {
  227. err = dev_cmd_err[(int)readq(&devcmd->args[0])];
  228. printk(KERN_ERR "Error %d devcmd %d\n",
  229. err, _CMD_N(cmd));
  230. return -err;
  231. }
  232. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  233. rmb();
  234. *a0 = readq(&devcmd->args[0]);
  235. *a1 = readq(&devcmd->args[1]);
  236. }
  237. return 0;
  238. }
  239. }
  240. printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
  241. return -ETIMEDOUT;
  242. }
  243. int vnic_dev_fw_info(struct vnic_dev *vdev,
  244. struct vnic_devcmd_fw_info **fw_info)
  245. {
  246. u64 a0, a1 = 0;
  247. int wait = 1000;
  248. int err = 0;
  249. if (!vdev->fw_info) {
  250. vdev->fw_info = pci_alloc_consistent(vdev->pdev,
  251. sizeof(struct vnic_devcmd_fw_info),
  252. &vdev->fw_info_pa);
  253. if (!vdev->fw_info)
  254. return -ENOMEM;
  255. a0 = vdev->fw_info_pa;
  256. /* only get fw_info once and cache it */
  257. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
  258. }
  259. *fw_info = vdev->fw_info;
  260. return err;
  261. }
  262. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  263. void *value)
  264. {
  265. u64 a0, a1;
  266. int wait = 1000;
  267. int err;
  268. a0 = offset;
  269. a1 = size;
  270. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  271. switch (size) {
  272. case 1:
  273. *(u8 *)value = (u8)a0;
  274. break;
  275. case 2:
  276. *(u16 *)value = (u16)a0;
  277. break;
  278. case 4:
  279. *(u32 *)value = (u32)a0;
  280. break;
  281. case 8:
  282. *(u64 *)value = a0;
  283. break;
  284. default:
  285. BUG();
  286. break;
  287. }
  288. return err;
  289. }
  290. int vnic_dev_stats_clear(struct vnic_dev *vdev)
  291. {
  292. u64 a0 = 0, a1 = 0;
  293. int wait = 1000;
  294. return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
  295. }
  296. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  297. {
  298. u64 a0, a1;
  299. int wait = 1000;
  300. if (!vdev->stats) {
  301. vdev->stats = pci_alloc_consistent(vdev->pdev,
  302. sizeof(struct vnic_stats), &vdev->stats_pa);
  303. if (!vdev->stats)
  304. return -ENOMEM;
  305. }
  306. *stats = vdev->stats;
  307. a0 = vdev->stats_pa;
  308. a1 = sizeof(struct vnic_stats);
  309. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  310. }
  311. int vnic_dev_close(struct vnic_dev *vdev)
  312. {
  313. u64 a0 = 0, a1 = 0;
  314. int wait = 1000;
  315. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  316. }
  317. int vnic_dev_enable(struct vnic_dev *vdev)
  318. {
  319. u64 a0 = 0, a1 = 0;
  320. int wait = 1000;
  321. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  322. }
  323. int vnic_dev_disable(struct vnic_dev *vdev)
  324. {
  325. u64 a0 = 0, a1 = 0;
  326. int wait = 1000;
  327. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  328. }
  329. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  330. {
  331. u64 a0 = (u32)arg, a1 = 0;
  332. int wait = 1000;
  333. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  334. }
  335. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  336. {
  337. u64 a0 = 0, a1 = 0;
  338. int wait = 1000;
  339. int err;
  340. *done = 0;
  341. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  342. if (err)
  343. return err;
  344. *done = (a0 == 0);
  345. return 0;
  346. }
  347. int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  348. {
  349. u64 a0 = (u32)arg, a1 = 0;
  350. int wait = 1000;
  351. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  352. }
  353. int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  354. {
  355. u64 a0 = 0, a1 = 0;
  356. int wait = 1000;
  357. int err;
  358. *done = 0;
  359. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  360. if (err)
  361. return err;
  362. *done = (a0 == 0);
  363. return 0;
  364. }
  365. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  366. {
  367. u64 a0, a1;
  368. int wait = 1000;
  369. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  370. }
  371. int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  372. {
  373. u64 a0, a1;
  374. int wait = 1000;
  375. int err, i;
  376. for (i = 0; i < ETH_ALEN; i++)
  377. mac_addr[i] = 0;
  378. err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
  379. if (err)
  380. return err;
  381. for (i = 0; i < ETH_ALEN; i++)
  382. mac_addr[i] = ((u8 *)&a0)[i];
  383. return 0;
  384. }
  385. void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  386. int broadcast, int promisc, int allmulti)
  387. {
  388. u64 a0, a1 = 0;
  389. int wait = 1000;
  390. int err;
  391. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  392. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  393. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  394. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  395. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  396. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  397. if (err)
  398. printk(KERN_ERR "Can't set packet filter\n");
  399. }
  400. void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
  401. {
  402. u64 a0 = 0, a1 = 0;
  403. int wait = 1000;
  404. int err;
  405. int i;
  406. for (i = 0; i < ETH_ALEN; i++)
  407. ((u8 *)&a0)[i] = addr[i];
  408. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  409. if (err)
  410. printk(KERN_ERR
  411. "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
  412. addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
  413. err);
  414. }
  415. void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
  416. {
  417. u64 a0 = 0, a1 = 0;
  418. int wait = 1000;
  419. int err;
  420. int i;
  421. for (i = 0; i < ETH_ALEN; i++)
  422. ((u8 *)&a0)[i] = addr[i];
  423. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  424. if (err)
  425. printk(KERN_ERR
  426. "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
  427. addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
  428. err);
  429. }
  430. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  431. {
  432. u64 a0, a1;
  433. int wait = 1000;
  434. if (!vdev->notify) {
  435. vdev->notify = pci_alloc_consistent(vdev->pdev,
  436. sizeof(struct vnic_devcmd_notify),
  437. &vdev->notify_pa);
  438. if (!vdev->notify)
  439. return -ENOMEM;
  440. }
  441. a0 = vdev->notify_pa;
  442. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  443. a1 += sizeof(struct vnic_devcmd_notify);
  444. return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  445. }
  446. void vnic_dev_notify_unset(struct vnic_dev *vdev)
  447. {
  448. u64 a0, a1;
  449. int wait = 1000;
  450. a0 = 0; /* paddr = 0 to unset notify buffer */
  451. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  452. a1 += sizeof(struct vnic_devcmd_notify);
  453. vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  454. }
  455. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  456. {
  457. u32 *words;
  458. unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
  459. unsigned int i;
  460. u32 csum;
  461. if (!vdev->notify)
  462. return 0;
  463. do {
  464. csum = 0;
  465. memcpy(&vdev->notify_copy, vdev->notify,
  466. sizeof(struct vnic_devcmd_notify));
  467. words = (u32 *)&vdev->notify_copy;
  468. for (i = 1; i < nwords; i++)
  469. csum += words[i];
  470. } while (csum != words[0]);
  471. return 1;
  472. }
  473. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  474. {
  475. u64 a0 = (u32)arg, a1 = 0;
  476. int wait = 1000;
  477. return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  478. }
  479. u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
  480. {
  481. u64 a0 = new_default_vlan, a1 = 0;
  482. int wait = 1000;
  483. int old_vlan = 0;
  484. old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
  485. return (u16)old_vlan;
  486. }
  487. int vnic_dev_link_status(struct vnic_dev *vdev)
  488. {
  489. if (vdev->linkstatus)
  490. return *vdev->linkstatus;
  491. if (!vnic_dev_notify_ready(vdev))
  492. return 0;
  493. return vdev->notify_copy.link_state;
  494. }
  495. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  496. {
  497. if (!vnic_dev_notify_ready(vdev))
  498. return 0;
  499. return vdev->notify_copy.port_speed;
  500. }
  501. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  502. {
  503. if (!vnic_dev_notify_ready(vdev))
  504. return 0;
  505. return vdev->notify_copy.msglvl;
  506. }
  507. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  508. {
  509. if (!vnic_dev_notify_ready(vdev))
  510. return 0;
  511. return vdev->notify_copy.mtu;
  512. }
  513. u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
  514. {
  515. if (!vnic_dev_notify_ready(vdev))
  516. return 0;
  517. return vdev->notify_copy.link_down_cnt;
  518. }
  519. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  520. enum vnic_dev_intr_mode intr_mode)
  521. {
  522. vdev->intr_mode = intr_mode;
  523. }
  524. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  525. struct vnic_dev *vdev)
  526. {
  527. return vdev->intr_mode;
  528. }
  529. void vnic_dev_unregister(struct vnic_dev *vdev)
  530. {
  531. if (vdev) {
  532. if (vdev->notify)
  533. pci_free_consistent(vdev->pdev,
  534. sizeof(struct vnic_devcmd_notify),
  535. vdev->notify,
  536. vdev->notify_pa);
  537. if (vdev->linkstatus)
  538. pci_free_consistent(vdev->pdev,
  539. sizeof(u32),
  540. vdev->linkstatus,
  541. vdev->linkstatus_pa);
  542. if (vdev->stats)
  543. pci_free_consistent(vdev->pdev,
  544. sizeof(struct vnic_stats),
  545. vdev->stats, vdev->stats_pa);
  546. if (vdev->fw_info)
  547. pci_free_consistent(vdev->pdev,
  548. sizeof(struct vnic_devcmd_fw_info),
  549. vdev->fw_info, vdev->fw_info_pa);
  550. kfree(vdev);
  551. }
  552. }
  553. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  554. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
  555. {
  556. if (!vdev) {
  557. vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
  558. if (!vdev)
  559. return NULL;
  560. }
  561. vdev->priv = priv;
  562. vdev->pdev = pdev;
  563. if (vnic_dev_discover_res(vdev, bar))
  564. goto err_out;
  565. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  566. if (!vdev->devcmd)
  567. goto err_out;
  568. return vdev;
  569. err_out:
  570. vnic_dev_unregister(vdev);
  571. return NULL;
  572. }