vnic_dev.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/if_ether.h>
  25. #include "vnic_resource.h"
  26. #include "vnic_devcmd.h"
  27. #include "vnic_dev.h"
  28. #include "vnic_wq.h"
  29. #include "vnic_stats.h"
  30. #include "enic.h"
  31. #define VNIC_MAX_RES_HDR_SIZE \
  32. (sizeof(struct vnic_resource_header) + \
  33. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  34. #define VNIC_RES_STRIDE 128
  35. void *vnic_dev_priv(struct vnic_dev *vdev)
  36. {
  37. return vdev->priv;
  38. }
  39. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  40. struct vnic_dev_bar *bar, unsigned int num_bars)
  41. {
  42. struct vnic_resource_header __iomem *rh;
  43. struct mgmt_barmap_hdr __iomem *mrh;
  44. struct vnic_resource __iomem *r;
  45. u8 type;
  46. if (num_bars == 0)
  47. return -EINVAL;
  48. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  49. vdev_err("vNIC BAR0 res hdr length error\n");
  50. return -EINVAL;
  51. }
  52. rh = bar->vaddr;
  53. mrh = bar->vaddr;
  54. if (!rh) {
  55. vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
  56. return -EINVAL;
  57. }
  58. /* Check for mgmt vnic in addition to normal vnic */
  59. if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
  60. (ioread32(&rh->version) != VNIC_RES_VERSION)) {
  61. if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
  62. (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
  63. vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
  64. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  65. MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
  66. ioread32(&rh->magic), ioread32(&rh->version));
  67. return -EINVAL;
  68. }
  69. }
  70. if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
  71. r = (struct vnic_resource __iomem *)(mrh + 1);
  72. else
  73. r = (struct vnic_resource __iomem *)(rh + 1);
  74. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  75. u8 bar_num = ioread8(&r->bar);
  76. u32 bar_offset = ioread32(&r->bar_offset);
  77. u32 count = ioread32(&r->count);
  78. u32 len;
  79. r++;
  80. if (bar_num >= num_bars)
  81. continue;
  82. if (!bar[bar_num].len || !bar[bar_num].vaddr)
  83. continue;
  84. switch (type) {
  85. case RES_TYPE_WQ:
  86. case RES_TYPE_RQ:
  87. case RES_TYPE_CQ:
  88. case RES_TYPE_INTR_CTRL:
  89. /* each count is stride bytes long */
  90. len = count * VNIC_RES_STRIDE;
  91. if (len + bar_offset > bar[bar_num].len) {
  92. vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
  93. type, bar_offset, len,
  94. bar[bar_num].len);
  95. return -EINVAL;
  96. }
  97. break;
  98. case RES_TYPE_INTR_PBA_LEGACY:
  99. case RES_TYPE_DEVCMD:
  100. case RES_TYPE_DEVCMD2:
  101. len = count;
  102. break;
  103. default:
  104. continue;
  105. }
  106. vdev->res[type].count = count;
  107. vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
  108. bar_offset;
  109. vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
  110. }
  111. return 0;
  112. }
  113. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  114. enum vnic_res_type type)
  115. {
  116. return vdev->res[type].count;
  117. }
  118. EXPORT_SYMBOL(vnic_dev_get_res_count);
  119. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  120. unsigned int index)
  121. {
  122. if (!vdev->res[type].vaddr)
  123. return NULL;
  124. switch (type) {
  125. case RES_TYPE_WQ:
  126. case RES_TYPE_RQ:
  127. case RES_TYPE_CQ:
  128. case RES_TYPE_INTR_CTRL:
  129. return (char __iomem *)vdev->res[type].vaddr +
  130. index * VNIC_RES_STRIDE;
  131. default:
  132. return (char __iomem *)vdev->res[type].vaddr;
  133. }
  134. }
  135. EXPORT_SYMBOL(vnic_dev_get_res);
  136. static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  137. unsigned int desc_count, unsigned int desc_size)
  138. {
  139. /* The base address of the desc rings must be 512 byte aligned.
  140. * Descriptor count is aligned to groups of 32 descriptors. A
  141. * count of 0 means the maximum 4096 descriptors. Descriptor
  142. * size is aligned to 16 bytes.
  143. */
  144. unsigned int count_align = 32;
  145. unsigned int desc_align = 16;
  146. ring->base_align = 512;
  147. if (desc_count == 0)
  148. desc_count = 4096;
  149. ring->desc_count = ALIGN(desc_count, count_align);
  150. ring->desc_size = ALIGN(desc_size, desc_align);
  151. ring->size = ring->desc_count * ring->desc_size;
  152. ring->size_unaligned = ring->size + ring->base_align;
  153. return ring->size_unaligned;
  154. }
  155. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  156. {
  157. memset(ring->descs, 0, ring->size);
  158. }
  159. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  160. unsigned int desc_count, unsigned int desc_size)
  161. {
  162. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  163. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  164. ring->size_unaligned,
  165. &ring->base_addr_unaligned);
  166. if (!ring->descs_unaligned) {
  167. vdev_err("Failed to allocate ring (size=%d), aborting\n",
  168. (int)ring->size);
  169. return -ENOMEM;
  170. }
  171. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  172. ring->base_align);
  173. ring->descs = (u8 *)ring->descs_unaligned +
  174. (ring->base_addr - ring->base_addr_unaligned);
  175. vnic_dev_clear_desc_ring(ring);
  176. ring->desc_avail = ring->desc_count - 1;
  177. return 0;
  178. }
  179. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  180. {
  181. if (ring->descs) {
  182. pci_free_consistent(vdev->pdev,
  183. ring->size_unaligned,
  184. ring->descs_unaligned,
  185. ring->base_addr_unaligned);
  186. ring->descs = NULL;
  187. }
  188. }
  189. static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  190. int wait)
  191. {
  192. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  193. unsigned int i;
  194. int delay;
  195. u32 status;
  196. int err;
  197. status = ioread32(&devcmd->status);
  198. if (status == 0xFFFFFFFF) {
  199. /* PCI-e target device is gone */
  200. return -ENODEV;
  201. }
  202. if (status & STAT_BUSY) {
  203. vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
  204. return -EBUSY;
  205. }
  206. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  207. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  208. writeq(vdev->args[i], &devcmd->args[i]);
  209. wmb();
  210. }
  211. iowrite32(cmd, &devcmd->cmd);
  212. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  213. return 0;
  214. for (delay = 0; delay < wait; delay++) {
  215. udelay(100);
  216. status = ioread32(&devcmd->status);
  217. if (status == 0xFFFFFFFF) {
  218. /* PCI-e target device is gone */
  219. return -ENODEV;
  220. }
  221. if (!(status & STAT_BUSY)) {
  222. if (status & STAT_ERROR) {
  223. err = (int)readq(&devcmd->args[0]);
  224. if (err == ERR_EINVAL &&
  225. cmd == CMD_CAPABILITY)
  226. return -err;
  227. if (err != ERR_ECMDUNKNOWN ||
  228. cmd != CMD_CAPABILITY)
  229. vdev_neterr("Error %d devcmd %d\n",
  230. err, _CMD_N(cmd));
  231. return -err;
  232. }
  233. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  234. rmb();
  235. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  236. vdev->args[i] = readq(&devcmd->args[i]);
  237. }
  238. return 0;
  239. }
  240. }
  241. vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
  242. return -ETIMEDOUT;
  243. }
  244. static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  245. int wait)
  246. {
  247. struct devcmd2_controller *dc2c = vdev->devcmd2;
  248. struct devcmd2_result *result;
  249. u8 color;
  250. unsigned int i;
  251. int delay, err;
  252. u32 fetch_index, new_posted;
  253. u32 posted = dc2c->posted;
  254. fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
  255. if (fetch_index == 0xFFFFFFFF)
  256. return -ENODEV;
  257. new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
  258. if (new_posted == fetch_index) {
  259. vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
  260. _CMD_N(cmd), fetch_index, posted);
  261. return -EBUSY;
  262. }
  263. dc2c->cmd_ring[posted].cmd = cmd;
  264. dc2c->cmd_ring[posted].flags = 0;
  265. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  266. dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
  267. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
  268. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  269. dc2c->cmd_ring[posted].args[i] = vdev->args[i];
  270. /* Adding write memory barrier prevents compiler and/or CPU reordering,
  271. * thus avoiding descriptor posting before descriptor is initialized.
  272. * Otherwise, hardware can read stale descriptor fields.
  273. */
  274. wmb();
  275. iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
  276. dc2c->posted = new_posted;
  277. if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
  278. return 0;
  279. result = dc2c->result + dc2c->next_result;
  280. color = dc2c->color;
  281. dc2c->next_result++;
  282. if (dc2c->next_result == dc2c->result_size) {
  283. dc2c->next_result = 0;
  284. dc2c->color = dc2c->color ? 0 : 1;
  285. }
  286. for (delay = 0; delay < wait; delay++) {
  287. if (result->color == color) {
  288. if (result->error) {
  289. err = result->error;
  290. if (err != ERR_ECMDUNKNOWN ||
  291. cmd != CMD_CAPABILITY)
  292. vdev_neterr("Error %d devcmd %d\n",
  293. err, _CMD_N(cmd));
  294. return -err;
  295. }
  296. if (_CMD_DIR(cmd) & _CMD_DIR_READ)
  297. for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
  298. vdev->args[i] = result->results[i];
  299. return 0;
  300. }
  301. udelay(100);
  302. }
  303. vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
  304. return -ETIMEDOUT;
  305. }
  306. static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
  307. {
  308. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  309. if (!vdev->devcmd)
  310. return -ENODEV;
  311. vdev->devcmd_rtn = _vnic_dev_cmd;
  312. return 0;
  313. }
  314. static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
  315. {
  316. int err;
  317. unsigned int fetch_index;
  318. if (vdev->devcmd2)
  319. return 0;
  320. vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
  321. if (!vdev->devcmd2)
  322. return -ENOMEM;
  323. vdev->devcmd2->color = 1;
  324. vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
  325. err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
  326. DEVCMD2_DESC_SIZE);
  327. if (err)
  328. goto err_free_devcmd2;
  329. fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
  330. if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
  331. vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
  332. return -ENODEV;
  333. }
  334. enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
  335. 0);
  336. vdev->devcmd2->posted = fetch_index;
  337. vnic_wq_enable(&vdev->devcmd2->wq);
  338. err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
  339. DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
  340. if (err)
  341. goto err_free_wq;
  342. vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
  343. vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
  344. vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
  345. vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
  346. VNIC_PADDR_TARGET;
  347. vdev->args[1] = DEVCMD2_RING_SIZE;
  348. err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
  349. if (err)
  350. goto err_free_desc_ring;
  351. vdev->devcmd_rtn = _vnic_dev_cmd2;
  352. return 0;
  353. err_free_desc_ring:
  354. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  355. err_free_wq:
  356. vnic_wq_disable(&vdev->devcmd2->wq);
  357. vnic_wq_free(&vdev->devcmd2->wq);
  358. err_free_devcmd2:
  359. kfree(vdev->devcmd2);
  360. vdev->devcmd2 = NULL;
  361. return err;
  362. }
  363. static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
  364. {
  365. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  366. vnic_wq_disable(&vdev->devcmd2->wq);
  367. vnic_wq_free(&vdev->devcmd2->wq);
  368. kfree(vdev->devcmd2);
  369. }
  370. static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
  371. enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
  372. u64 *a0, u64 *a1, int wait)
  373. {
  374. u32 status;
  375. int err;
  376. memset(vdev->args, 0, sizeof(vdev->args));
  377. vdev->args[0] = vdev->proxy_index;
  378. vdev->args[1] = cmd;
  379. vdev->args[2] = *a0;
  380. vdev->args[3] = *a1;
  381. err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
  382. if (err)
  383. return err;
  384. status = (u32)vdev->args[0];
  385. if (status & STAT_ERROR) {
  386. err = (int)vdev->args[1];
  387. if (err != ERR_ECMDUNKNOWN ||
  388. cmd != CMD_CAPABILITY)
  389. vdev_neterr("Error %d proxy devcmd %d\n", err,
  390. _CMD_N(cmd));
  391. return err;
  392. }
  393. *a0 = vdev->args[1];
  394. *a1 = vdev->args[2];
  395. return 0;
  396. }
  397. static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
  398. enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
  399. {
  400. int err;
  401. vdev->args[0] = *a0;
  402. vdev->args[1] = *a1;
  403. err = vdev->devcmd_rtn(vdev, cmd, wait);
  404. *a0 = vdev->args[0];
  405. *a1 = vdev->args[1];
  406. return err;
  407. }
  408. void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
  409. {
  410. vdev->proxy = PROXY_BY_INDEX;
  411. vdev->proxy_index = index;
  412. }
  413. void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
  414. {
  415. vdev->proxy = PROXY_NONE;
  416. vdev->proxy_index = 0;
  417. }
  418. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  419. u64 *a0, u64 *a1, int wait)
  420. {
  421. memset(vdev->args, 0, sizeof(vdev->args));
  422. switch (vdev->proxy) {
  423. case PROXY_BY_INDEX:
  424. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
  425. a0, a1, wait);
  426. case PROXY_BY_BDF:
  427. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
  428. a0, a1, wait);
  429. case PROXY_NONE:
  430. default:
  431. return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
  432. }
  433. }
  434. static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
  435. {
  436. u64 a0 = (u32)cmd, a1 = 0;
  437. int wait = 1000;
  438. int err;
  439. err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
  440. return !(err || a0);
  441. }
  442. int vnic_dev_fw_info(struct vnic_dev *vdev,
  443. struct vnic_devcmd_fw_info **fw_info)
  444. {
  445. u64 a0, a1 = 0;
  446. int wait = 1000;
  447. int err = 0;
  448. if (!vdev->fw_info) {
  449. vdev->fw_info = pci_zalloc_consistent(vdev->pdev,
  450. sizeof(struct vnic_devcmd_fw_info),
  451. &vdev->fw_info_pa);
  452. if (!vdev->fw_info)
  453. return -ENOMEM;
  454. a0 = vdev->fw_info_pa;
  455. a1 = sizeof(struct vnic_devcmd_fw_info);
  456. /* only get fw_info once and cache it */
  457. if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
  458. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
  459. &a0, &a1, wait);
  460. else
  461. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
  462. &a0, &a1, wait);
  463. }
  464. *fw_info = vdev->fw_info;
  465. return err;
  466. }
  467. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  468. void *value)
  469. {
  470. u64 a0, a1;
  471. int wait = 1000;
  472. int err;
  473. a0 = offset;
  474. a1 = size;
  475. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  476. switch (size) {
  477. case 1: *(u8 *)value = (u8)a0; break;
  478. case 2: *(u16 *)value = (u16)a0; break;
  479. case 4: *(u32 *)value = (u32)a0; break;
  480. case 8: *(u64 *)value = a0; break;
  481. default: BUG(); break;
  482. }
  483. return err;
  484. }
  485. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  486. {
  487. u64 a0, a1;
  488. int wait = 1000;
  489. if (!vdev->stats) {
  490. vdev->stats = pci_alloc_consistent(vdev->pdev,
  491. sizeof(struct vnic_stats), &vdev->stats_pa);
  492. if (!vdev->stats)
  493. return -ENOMEM;
  494. }
  495. *stats = vdev->stats;
  496. a0 = vdev->stats_pa;
  497. a1 = sizeof(struct vnic_stats);
  498. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  499. }
  500. int vnic_dev_close(struct vnic_dev *vdev)
  501. {
  502. u64 a0 = 0, a1 = 0;
  503. int wait = 1000;
  504. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  505. }
  506. int vnic_dev_enable_wait(struct vnic_dev *vdev)
  507. {
  508. u64 a0 = 0, a1 = 0;
  509. int wait = 1000;
  510. if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
  511. return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
  512. else
  513. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  514. }
  515. int vnic_dev_disable(struct vnic_dev *vdev)
  516. {
  517. u64 a0 = 0, a1 = 0;
  518. int wait = 1000;
  519. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  520. }
  521. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  522. {
  523. u64 a0 = (u32)arg, a1 = 0;
  524. int wait = 1000;
  525. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  526. }
  527. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  528. {
  529. u64 a0 = 0, a1 = 0;
  530. int wait = 1000;
  531. int err;
  532. *done = 0;
  533. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  534. if (err)
  535. return err;
  536. *done = (a0 == 0);
  537. return 0;
  538. }
  539. int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  540. {
  541. u64 a0 = (u32)arg, a1 = 0;
  542. int wait = 1000;
  543. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  544. }
  545. int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  546. {
  547. u64 a0 = 0, a1 = 0;
  548. int wait = 1000;
  549. int err;
  550. *done = 0;
  551. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  552. if (err)
  553. return err;
  554. *done = (a0 == 0);
  555. return 0;
  556. }
  557. int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
  558. {
  559. u64 a0 = (u32)arg, a1 = 0;
  560. int wait = 1000;
  561. int err;
  562. if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
  563. return vnic_dev_cmd(vdev, CMD_HANG_RESET,
  564. &a0, &a1, wait);
  565. } else {
  566. err = vnic_dev_soft_reset(vdev, arg);
  567. if (err)
  568. return err;
  569. return vnic_dev_init(vdev, 0);
  570. }
  571. }
  572. int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
  573. {
  574. u64 a0 = 0, a1 = 0;
  575. int wait = 1000;
  576. int err;
  577. *done = 0;
  578. if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
  579. err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
  580. &a0, &a1, wait);
  581. if (err)
  582. return err;
  583. } else {
  584. return vnic_dev_soft_reset_done(vdev, done);
  585. }
  586. *done = (a0 == 0);
  587. return 0;
  588. }
  589. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  590. {
  591. u64 a0, a1;
  592. int wait = 1000;
  593. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  594. }
  595. int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  596. {
  597. u64 a0, a1;
  598. int wait = 1000;
  599. int err, i;
  600. for (i = 0; i < ETH_ALEN; i++)
  601. mac_addr[i] = 0;
  602. err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  603. if (err)
  604. return err;
  605. for (i = 0; i < ETH_ALEN; i++)
  606. mac_addr[i] = ((u8 *)&a0)[i];
  607. return 0;
  608. }
  609. int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  610. int broadcast, int promisc, int allmulti)
  611. {
  612. u64 a0, a1 = 0;
  613. int wait = 1000;
  614. int err;
  615. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  616. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  617. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  618. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  619. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  620. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  621. if (err)
  622. vdev_neterr("Can't set packet filter\n");
  623. return err;
  624. }
  625. int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
  626. {
  627. u64 a0 = 0, a1 = 0;
  628. int wait = 1000;
  629. int err;
  630. int i;
  631. for (i = 0; i < ETH_ALEN; i++)
  632. ((u8 *)&a0)[i] = addr[i];
  633. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  634. if (err)
  635. vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
  636. return err;
  637. }
  638. int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
  639. {
  640. u64 a0 = 0, a1 = 0;
  641. int wait = 1000;
  642. int err;
  643. int i;
  644. for (i = 0; i < ETH_ALEN; i++)
  645. ((u8 *)&a0)[i] = addr[i];
  646. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  647. if (err)
  648. vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
  649. return err;
  650. }
  651. int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
  652. u8 ig_vlan_rewrite_mode)
  653. {
  654. u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
  655. int wait = 1000;
  656. if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
  657. return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
  658. &a0, &a1, wait);
  659. else
  660. return 0;
  661. }
  662. static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
  663. void *notify_addr, dma_addr_t notify_pa, u16 intr)
  664. {
  665. u64 a0, a1;
  666. int wait = 1000;
  667. int r;
  668. memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
  669. vdev->notify = notify_addr;
  670. vdev->notify_pa = notify_pa;
  671. a0 = (u64)notify_pa;
  672. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  673. a1 += sizeof(struct vnic_devcmd_notify);
  674. r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  675. vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
  676. return r;
  677. }
  678. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  679. {
  680. void *notify_addr;
  681. dma_addr_t notify_pa;
  682. if (vdev->notify || vdev->notify_pa) {
  683. vdev_neterr("notify block %p still allocated", vdev->notify);
  684. return -EINVAL;
  685. }
  686. notify_addr = pci_alloc_consistent(vdev->pdev,
  687. sizeof(struct vnic_devcmd_notify),
  688. &notify_pa);
  689. if (!notify_addr)
  690. return -ENOMEM;
  691. return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
  692. }
  693. static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
  694. {
  695. u64 a0, a1;
  696. int wait = 1000;
  697. int err;
  698. a0 = 0; /* paddr = 0 to unset notify buffer */
  699. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  700. a1 += sizeof(struct vnic_devcmd_notify);
  701. err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  702. vdev->notify = NULL;
  703. vdev->notify_pa = 0;
  704. vdev->notify_sz = 0;
  705. return err;
  706. }
  707. int vnic_dev_notify_unset(struct vnic_dev *vdev)
  708. {
  709. if (vdev->notify) {
  710. pci_free_consistent(vdev->pdev,
  711. sizeof(struct vnic_devcmd_notify),
  712. vdev->notify,
  713. vdev->notify_pa);
  714. }
  715. return vnic_dev_notify_unsetcmd(vdev);
  716. }
  717. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  718. {
  719. u32 *words;
  720. unsigned int nwords = vdev->notify_sz / 4;
  721. unsigned int i;
  722. u32 csum;
  723. if (!vdev->notify || !vdev->notify_sz)
  724. return 0;
  725. do {
  726. csum = 0;
  727. memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
  728. words = (u32 *)&vdev->notify_copy;
  729. for (i = 1; i < nwords; i++)
  730. csum += words[i];
  731. } while (csum != words[0]);
  732. return 1;
  733. }
  734. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  735. {
  736. u64 a0 = (u32)arg, a1 = 0;
  737. int wait = 1000;
  738. int r = 0;
  739. if (vnic_dev_capable(vdev, CMD_INIT))
  740. r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  741. else {
  742. vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
  743. if (a0 & CMD_INITF_DEFAULT_MAC) {
  744. /* Emulate these for old CMD_INIT_v1 which
  745. * didn't pass a0 so no CMD_INITF_*.
  746. */
  747. vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  748. vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  749. }
  750. }
  751. return r;
  752. }
  753. int vnic_dev_deinit(struct vnic_dev *vdev)
  754. {
  755. u64 a0 = 0, a1 = 0;
  756. int wait = 1000;
  757. return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
  758. }
  759. void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
  760. {
  761. /* Default: hardware intr coal timer is in units of 1.5 usecs */
  762. vdev->intr_coal_timer_info.mul = 2;
  763. vdev->intr_coal_timer_info.div = 3;
  764. vdev->intr_coal_timer_info.max_usec =
  765. vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
  766. }
  767. int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
  768. {
  769. int wait = 1000;
  770. int err;
  771. memset(vdev->args, 0, sizeof(vdev->args));
  772. if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
  773. err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
  774. else
  775. err = ERR_ECMDUNKNOWN;
  776. /* Use defaults when firmware doesn't support the devcmd at all or
  777. * supports it for only specific hardware
  778. */
  779. if ((err == ERR_ECMDUNKNOWN) ||
  780. (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
  781. vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
  782. vnic_dev_intr_coal_timer_info_default(vdev);
  783. return 0;
  784. }
  785. if (!err) {
  786. vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
  787. vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
  788. vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
  789. }
  790. return err;
  791. }
  792. int vnic_dev_link_status(struct vnic_dev *vdev)
  793. {
  794. if (!vnic_dev_notify_ready(vdev))
  795. return 0;
  796. return vdev->notify_copy.link_state;
  797. }
  798. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  799. {
  800. if (!vnic_dev_notify_ready(vdev))
  801. return 0;
  802. return vdev->notify_copy.port_speed;
  803. }
  804. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  805. {
  806. if (!vnic_dev_notify_ready(vdev))
  807. return 0;
  808. return vdev->notify_copy.msglvl;
  809. }
  810. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  811. {
  812. if (!vnic_dev_notify_ready(vdev))
  813. return 0;
  814. return vdev->notify_copy.mtu;
  815. }
  816. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  817. enum vnic_dev_intr_mode intr_mode)
  818. {
  819. vdev->intr_mode = intr_mode;
  820. }
  821. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  822. struct vnic_dev *vdev)
  823. {
  824. return vdev->intr_mode;
  825. }
  826. u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
  827. {
  828. return (usec * vdev->intr_coal_timer_info.mul) /
  829. vdev->intr_coal_timer_info.div;
  830. }
  831. u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
  832. {
  833. return (hw_cycles * vdev->intr_coal_timer_info.div) /
  834. vdev->intr_coal_timer_info.mul;
  835. }
  836. u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
  837. {
  838. return vdev->intr_coal_timer_info.max_usec;
  839. }
  840. void vnic_dev_unregister(struct vnic_dev *vdev)
  841. {
  842. if (vdev) {
  843. if (vdev->notify)
  844. pci_free_consistent(vdev->pdev,
  845. sizeof(struct vnic_devcmd_notify),
  846. vdev->notify,
  847. vdev->notify_pa);
  848. if (vdev->stats)
  849. pci_free_consistent(vdev->pdev,
  850. sizeof(struct vnic_stats),
  851. vdev->stats, vdev->stats_pa);
  852. if (vdev->fw_info)
  853. pci_free_consistent(vdev->pdev,
  854. sizeof(struct vnic_devcmd_fw_info),
  855. vdev->fw_info, vdev->fw_info_pa);
  856. if (vdev->devcmd2)
  857. vnic_dev_deinit_devcmd2(vdev);
  858. kfree(vdev);
  859. }
  860. }
  861. EXPORT_SYMBOL(vnic_dev_unregister);
  862. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  863. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
  864. unsigned int num_bars)
  865. {
  866. if (!vdev) {
  867. vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
  868. if (!vdev)
  869. return NULL;
  870. }
  871. vdev->priv = priv;
  872. vdev->pdev = pdev;
  873. if (vnic_dev_discover_res(vdev, bar, num_bars))
  874. goto err_out;
  875. return vdev;
  876. err_out:
  877. vnic_dev_unregister(vdev);
  878. return NULL;
  879. }
  880. EXPORT_SYMBOL(vnic_dev_register);
  881. struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
  882. {
  883. return vdev->pdev;
  884. }
  885. EXPORT_SYMBOL(vnic_dev_get_pdev);
  886. int vnic_devcmd_init(struct vnic_dev *vdev)
  887. {
  888. void __iomem *res;
  889. int err;
  890. res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
  891. if (res) {
  892. err = vnic_dev_init_devcmd2(vdev);
  893. if (err)
  894. vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
  895. err);
  896. else
  897. return 0;
  898. } else {
  899. vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
  900. }
  901. err = vnic_dev_init_devcmd1(vdev);
  902. if (err)
  903. vdev_err("DEVCMD1 initialization failed: %d", err);
  904. return err;
  905. }
  906. int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
  907. {
  908. u64 a0, a1 = len;
  909. int wait = 1000;
  910. dma_addr_t prov_pa;
  911. void *prov_buf;
  912. int ret;
  913. prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
  914. if (!prov_buf)
  915. return -ENOMEM;
  916. memcpy(prov_buf, buf, len);
  917. a0 = prov_pa;
  918. ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
  919. pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
  920. return ret;
  921. }
  922. int vnic_dev_enable2(struct vnic_dev *vdev, int active)
  923. {
  924. u64 a0, a1 = 0;
  925. int wait = 1000;
  926. a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
  927. return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
  928. }
  929. static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  930. int *status)
  931. {
  932. u64 a0 = cmd, a1 = 0;
  933. int wait = 1000;
  934. int ret;
  935. ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
  936. if (!ret)
  937. *status = (int)a0;
  938. return ret;
  939. }
  940. int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
  941. {
  942. return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
  943. }
  944. int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
  945. {
  946. return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
  947. }
  948. int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  949. {
  950. u64 a0, a1;
  951. int wait = 1000;
  952. int i;
  953. for (i = 0; i < ETH_ALEN; i++)
  954. ((u8 *)&a0)[i] = mac_addr[i];
  955. return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
  956. }
  957. /* vnic_dev_classifier: Add/Delete classifier entries
  958. * @vdev: vdev of the device
  959. * @cmd: CLSF_ADD for Add filter
  960. * CLSF_DEL for Delete filter
  961. * @entry: In case of ADD filter, the caller passes the RQ number in this
  962. * variable.
  963. *
  964. * This function stores the filter_id returned by the firmware in the
  965. * same variable before return;
  966. *
  967. * In case of DEL filter, the caller passes the RQ number. Return
  968. * value is irrelevant.
  969. * @data: filter data
  970. */
  971. int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
  972. struct filter *data)
  973. {
  974. u64 a0, a1;
  975. int wait = 1000;
  976. dma_addr_t tlv_pa;
  977. int ret = -EINVAL;
  978. struct filter_tlv *tlv, *tlv_va;
  979. struct filter_action *action;
  980. u64 tlv_size;
  981. if (cmd == CLSF_ADD) {
  982. tlv_size = sizeof(struct filter) +
  983. sizeof(struct filter_action) +
  984. 2 * sizeof(struct filter_tlv);
  985. tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
  986. if (!tlv_va)
  987. return -ENOMEM;
  988. tlv = tlv_va;
  989. a0 = tlv_pa;
  990. a1 = tlv_size;
  991. memset(tlv, 0, tlv_size);
  992. tlv->type = CLSF_TLV_FILTER;
  993. tlv->length = sizeof(struct filter);
  994. *(struct filter *)&tlv->val = *data;
  995. tlv = (struct filter_tlv *)((char *)tlv +
  996. sizeof(struct filter_tlv) +
  997. sizeof(struct filter));
  998. tlv->type = CLSF_TLV_ACTION;
  999. tlv->length = sizeof(struct filter_action);
  1000. action = (struct filter_action *)&tlv->val;
  1001. action->type = FILTER_ACTION_RQ_STEERING;
  1002. action->u.rq_idx = *entry;
  1003. ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
  1004. *entry = (u16)a0;
  1005. pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
  1006. } else if (cmd == CLSF_DEL) {
  1007. a0 = *entry;
  1008. ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
  1009. }
  1010. return ret;
  1011. }