nicvf_queues.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include <linux/pci.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/ip.h>
  11. #include <linux/etherdevice.h>
  12. #include <net/ip.h>
  13. #include <net/tso.h>
  14. #include "nic_reg.h"
  15. #include "nic.h"
  16. #include "q_struct.h"
  17. #include "nicvf_queues.h"
  18. struct rbuf_info {
  19. struct page *page;
  20. void *data;
  21. u64 offset;
  22. };
  23. #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
  24. /* Poll a register for a specific value */
  25. static int nicvf_poll_reg(struct nicvf *nic, int qidx,
  26. u64 reg, int bit_pos, int bits, int val)
  27. {
  28. u64 bit_mask;
  29. u64 reg_val;
  30. int timeout = 10;
  31. bit_mask = (1ULL << bits) - 1;
  32. bit_mask = (bit_mask << bit_pos);
  33. while (timeout) {
  34. reg_val = nicvf_queue_reg_read(nic, reg, qidx);
  35. if (((reg_val & bit_mask) >> bit_pos) == val)
  36. return 0;
  37. usleep_range(1000, 2000);
  38. timeout--;
  39. }
  40. netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
  41. return 1;
  42. }
  43. /* Allocate memory for a queue's descriptors */
  44. static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
  45. int q_len, int desc_size, int align_bytes)
  46. {
  47. dmem->q_len = q_len;
  48. dmem->size = (desc_size * q_len) + align_bytes;
  49. /* Save address, need it while freeing */
  50. dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
  51. &dmem->dma, GFP_KERNEL);
  52. if (!dmem->unalign_base)
  53. return -ENOMEM;
  54. /* Align memory address for 'align_bytes' */
  55. dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
  56. dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
  57. return 0;
  58. }
  59. /* Free queue's descriptor memory */
  60. static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
  61. {
  62. if (!dmem)
  63. return;
  64. dma_free_coherent(&nic->pdev->dev, dmem->size,
  65. dmem->unalign_base, dmem->dma);
  66. dmem->unalign_base = NULL;
  67. dmem->base = NULL;
  68. }
  69. /* Allocate buffer for packet reception
  70. * HW returns memory address where packet is DMA'ed but not a pointer
  71. * into RBDR ring, so save buffer address at the start of fragment and
  72. * align the start address to a cache aligned address
  73. */
  74. static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
  75. u32 buf_len, u64 **rbuf)
  76. {
  77. u64 data;
  78. struct rbuf_info *rinfo;
  79. int order = get_order(buf_len);
  80. /* Check if request can be accomodated in previous allocated page */
  81. if (nic->rb_page) {
  82. if ((nic->rb_page_offset + buf_len + buf_len) >
  83. (PAGE_SIZE << order)) {
  84. nic->rb_page = NULL;
  85. } else {
  86. nic->rb_page_offset += buf_len;
  87. get_page(nic->rb_page);
  88. }
  89. }
  90. /* Allocate a new page */
  91. if (!nic->rb_page) {
  92. nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
  93. order);
  94. if (!nic->rb_page) {
  95. netdev_err(nic->netdev,
  96. "Failed to allocate new rcv buffer\n");
  97. return -ENOMEM;
  98. }
  99. nic->rb_page_offset = 0;
  100. }
  101. data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
  102. /* Align buffer addr to cache line i.e 128 bytes */
  103. rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
  104. /* Save page address for reference updation */
  105. rinfo->page = nic->rb_page;
  106. /* Store start address for later retrieval */
  107. rinfo->data = (void *)data;
  108. /* Store alignment offset */
  109. rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
  110. data += rinfo->offset;
  111. /* Give next aligned address to hw for DMA */
  112. *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
  113. return 0;
  114. }
  115. /* Retrieve actual buffer start address and build skb for received packet */
  116. static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
  117. u64 rb_ptr, int len)
  118. {
  119. struct sk_buff *skb;
  120. struct rbuf_info *rinfo;
  121. rb_ptr = (u64)phys_to_virt(rb_ptr);
  122. /* Get buffer start address and alignment offset */
  123. rinfo = GET_RBUF_INFO(rb_ptr);
  124. /* Now build an skb to give to stack */
  125. skb = build_skb(rinfo->data, RCV_FRAG_LEN);
  126. if (!skb) {
  127. put_page(rinfo->page);
  128. return NULL;
  129. }
  130. /* Set correct skb->data */
  131. skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
  132. prefetch((void *)rb_ptr);
  133. return skb;
  134. }
  135. /* Allocate RBDR ring and populate receive buffers */
  136. static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
  137. int ring_len, int buf_size)
  138. {
  139. int idx;
  140. u64 *rbuf;
  141. struct rbdr_entry_t *desc;
  142. int err;
  143. err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
  144. sizeof(struct rbdr_entry_t),
  145. NICVF_RCV_BUF_ALIGN_BYTES);
  146. if (err)
  147. return err;
  148. rbdr->desc = rbdr->dmem.base;
  149. /* Buffer size has to be in multiples of 128 bytes */
  150. rbdr->dma_size = buf_size;
  151. rbdr->enable = true;
  152. rbdr->thresh = RBDR_THRESH;
  153. nic->rb_page = NULL;
  154. for (idx = 0; idx < ring_len; idx++) {
  155. err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
  156. &rbuf);
  157. if (err)
  158. return err;
  159. desc = GET_RBDR_DESC(rbdr, idx);
  160. desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
  161. }
  162. return 0;
  163. }
  164. /* Free RBDR ring and its receive buffers */
  165. static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
  166. {
  167. int head, tail;
  168. u64 buf_addr;
  169. struct rbdr_entry_t *desc;
  170. struct rbuf_info *rinfo;
  171. if (!rbdr)
  172. return;
  173. rbdr->enable = false;
  174. if (!rbdr->dmem.base)
  175. return;
  176. head = rbdr->head;
  177. tail = rbdr->tail;
  178. /* Free SKBs */
  179. while (head != tail) {
  180. desc = GET_RBDR_DESC(rbdr, head);
  181. buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  182. rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
  183. put_page(rinfo->page);
  184. head++;
  185. head &= (rbdr->dmem.q_len - 1);
  186. }
  187. /* Free SKB of tail desc */
  188. desc = GET_RBDR_DESC(rbdr, tail);
  189. buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  190. rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
  191. put_page(rinfo->page);
  192. /* Free RBDR ring */
  193. nicvf_free_q_desc_mem(nic, &rbdr->dmem);
  194. }
  195. /* Refill receive buffer descriptors with new buffers.
  196. */
  197. static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
  198. {
  199. struct queue_set *qs = nic->qs;
  200. int rbdr_idx = qs->rbdr_cnt;
  201. int tail, qcount;
  202. int refill_rb_cnt;
  203. struct rbdr *rbdr;
  204. struct rbdr_entry_t *desc;
  205. u64 *rbuf;
  206. int new_rb = 0;
  207. refill:
  208. if (!rbdr_idx)
  209. return;
  210. rbdr_idx--;
  211. rbdr = &qs->rbdr[rbdr_idx];
  212. /* Check if it's enabled */
  213. if (!rbdr->enable)
  214. goto next_rbdr;
  215. /* Get no of desc's to be refilled */
  216. qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
  217. qcount &= 0x7FFFF;
  218. /* Doorbell can be ringed with a max of ring size minus 1 */
  219. if (qcount >= (qs->rbdr_len - 1))
  220. goto next_rbdr;
  221. else
  222. refill_rb_cnt = qs->rbdr_len - qcount - 1;
  223. /* Start filling descs from tail */
  224. tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
  225. while (refill_rb_cnt) {
  226. tail++;
  227. tail &= (rbdr->dmem.q_len - 1);
  228. if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
  229. break;
  230. desc = GET_RBDR_DESC(rbdr, tail);
  231. desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
  232. refill_rb_cnt--;
  233. new_rb++;
  234. }
  235. /* make sure all memory stores are done before ringing doorbell */
  236. smp_wmb();
  237. /* Check if buffer allocation failed */
  238. if (refill_rb_cnt)
  239. nic->rb_alloc_fail = true;
  240. else
  241. nic->rb_alloc_fail = false;
  242. /* Notify HW */
  243. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
  244. rbdr_idx, new_rb);
  245. next_rbdr:
  246. /* Re-enable RBDR interrupts only if buffer allocation is success */
  247. if (!nic->rb_alloc_fail && rbdr->enable)
  248. nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
  249. if (rbdr_idx)
  250. goto refill;
  251. }
  252. /* Alloc rcv buffers in non-atomic mode for better success */
  253. void nicvf_rbdr_work(struct work_struct *work)
  254. {
  255. struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
  256. nicvf_refill_rbdr(nic, GFP_KERNEL);
  257. if (nic->rb_alloc_fail)
  258. schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
  259. else
  260. nic->rb_work_scheduled = false;
  261. }
  262. /* In Softirq context, alloc rcv buffers in atomic mode */
  263. void nicvf_rbdr_task(unsigned long data)
  264. {
  265. struct nicvf *nic = (struct nicvf *)data;
  266. nicvf_refill_rbdr(nic, GFP_ATOMIC);
  267. if (nic->rb_alloc_fail) {
  268. nic->rb_work_scheduled = true;
  269. schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
  270. }
  271. }
  272. /* Initialize completion queue */
  273. static int nicvf_init_cmp_queue(struct nicvf *nic,
  274. struct cmp_queue *cq, int q_len)
  275. {
  276. int err;
  277. err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
  278. NICVF_CQ_BASE_ALIGN_BYTES);
  279. if (err)
  280. return err;
  281. cq->desc = cq->dmem.base;
  282. cq->thresh = CMP_QUEUE_CQE_THRESH;
  283. nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
  284. return 0;
  285. }
  286. static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
  287. {
  288. if (!cq)
  289. return;
  290. if (!cq->dmem.base)
  291. return;
  292. nicvf_free_q_desc_mem(nic, &cq->dmem);
  293. }
  294. /* Initialize transmit queue */
  295. static int nicvf_init_snd_queue(struct nicvf *nic,
  296. struct snd_queue *sq, int q_len)
  297. {
  298. int err;
  299. err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
  300. NICVF_SQ_BASE_ALIGN_BYTES);
  301. if (err)
  302. return err;
  303. sq->desc = sq->dmem.base;
  304. sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
  305. if (!sq->skbuff)
  306. return -ENOMEM;
  307. sq->head = 0;
  308. sq->tail = 0;
  309. atomic_set(&sq->free_cnt, q_len - 1);
  310. sq->thresh = SND_QUEUE_THRESH;
  311. /* Preallocate memory for TSO segment's header */
  312. sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
  313. q_len * TSO_HEADER_SIZE,
  314. &sq->tso_hdrs_phys, GFP_KERNEL);
  315. if (!sq->tso_hdrs)
  316. return -ENOMEM;
  317. return 0;
  318. }
  319. static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
  320. {
  321. if (!sq)
  322. return;
  323. if (!sq->dmem.base)
  324. return;
  325. if (sq->tso_hdrs)
  326. dma_free_coherent(&nic->pdev->dev,
  327. sq->dmem.q_len * TSO_HEADER_SIZE,
  328. sq->tso_hdrs, sq->tso_hdrs_phys);
  329. kfree(sq->skbuff);
  330. nicvf_free_q_desc_mem(nic, &sq->dmem);
  331. }
  332. static void nicvf_reclaim_snd_queue(struct nicvf *nic,
  333. struct queue_set *qs, int qidx)
  334. {
  335. /* Disable send queue */
  336. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
  337. /* Check if SQ is stopped */
  338. if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
  339. return;
  340. /* Reset send queue */
  341. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
  342. }
  343. static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
  344. struct queue_set *qs, int qidx)
  345. {
  346. union nic_mbx mbx = {};
  347. /* Make sure all packets in the pipeline are written back into mem */
  348. mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
  349. nicvf_send_msg_to_pf(nic, &mbx);
  350. }
  351. static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
  352. struct queue_set *qs, int qidx)
  353. {
  354. /* Disable timer threshold (doesn't get reset upon CQ reset */
  355. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
  356. /* Disable completion queue */
  357. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
  358. /* Reset completion queue */
  359. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
  360. }
  361. static void nicvf_reclaim_rbdr(struct nicvf *nic,
  362. struct rbdr *rbdr, int qidx)
  363. {
  364. u64 tmp, fifo_state;
  365. int timeout = 10;
  366. /* Save head and tail pointers for feeing up buffers */
  367. rbdr->head = nicvf_queue_reg_read(nic,
  368. NIC_QSET_RBDR_0_1_HEAD,
  369. qidx) >> 3;
  370. rbdr->tail = nicvf_queue_reg_read(nic,
  371. NIC_QSET_RBDR_0_1_TAIL,
  372. qidx) >> 3;
  373. /* If RBDR FIFO is in 'FAIL' state then do a reset first
  374. * before relaiming.
  375. */
  376. fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
  377. if (((fifo_state >> 62) & 0x03) == 0x3)
  378. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
  379. qidx, NICVF_RBDR_RESET);
  380. /* Disable RBDR */
  381. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
  382. if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
  383. return;
  384. while (1) {
  385. tmp = nicvf_queue_reg_read(nic,
  386. NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
  387. qidx);
  388. if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
  389. break;
  390. usleep_range(1000, 2000);
  391. timeout--;
  392. if (!timeout) {
  393. netdev_err(nic->netdev,
  394. "Failed polling on prefetch status\n");
  395. return;
  396. }
  397. }
  398. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
  399. qidx, NICVF_RBDR_RESET);
  400. if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
  401. return;
  402. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
  403. if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
  404. return;
  405. }
  406. void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
  407. {
  408. u64 rq_cfg;
  409. int sqs;
  410. rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
  411. /* Enable first VLAN stripping */
  412. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  413. rq_cfg |= (1ULL << 25);
  414. else
  415. rq_cfg &= ~(1ULL << 25);
  416. nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
  417. /* Configure Secondary Qsets, if any */
  418. for (sqs = 0; sqs < nic->sqs_count; sqs++)
  419. if (nic->snicvf[sqs])
  420. nicvf_queue_reg_write(nic->snicvf[sqs],
  421. NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
  422. }
  423. /* Configures receive queue */
  424. static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
  425. int qidx, bool enable)
  426. {
  427. union nic_mbx mbx = {};
  428. struct rcv_queue *rq;
  429. struct rq_cfg rq_cfg;
  430. rq = &qs->rq[qidx];
  431. rq->enable = enable;
  432. /* Disable receive queue */
  433. nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
  434. if (!rq->enable) {
  435. nicvf_reclaim_rcv_queue(nic, qs, qidx);
  436. return;
  437. }
  438. rq->cq_qs = qs->vnic_id;
  439. rq->cq_idx = qidx;
  440. rq->start_rbdr_qs = qs->vnic_id;
  441. rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
  442. rq->cont_rbdr_qs = qs->vnic_id;
  443. rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
  444. /* all writes of RBDR data to be loaded into L2 Cache as well*/
  445. rq->caching = 1;
  446. /* Send a mailbox msg to PF to config RQ */
  447. mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
  448. mbx.rq.qs_num = qs->vnic_id;
  449. mbx.rq.rq_num = qidx;
  450. mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
  451. (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
  452. (rq->cont_qs_rbdr_idx << 8) |
  453. (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
  454. nicvf_send_msg_to_pf(nic, &mbx);
  455. mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
  456. mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
  457. nicvf_send_msg_to_pf(nic, &mbx);
  458. /* RQ drop config
  459. * Enable CQ drop to reserve sufficient CQEs for all tx packets
  460. */
  461. mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
  462. mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
  463. nicvf_send_msg_to_pf(nic, &mbx);
  464. nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
  465. if (!nic->sqs_mode)
  466. nicvf_config_vlan_stripping(nic, nic->netdev->features);
  467. /* Enable Receive queue */
  468. memset(&rq_cfg, 0, sizeof(struct rq_cfg));
  469. rq_cfg.ena = 1;
  470. rq_cfg.tcp_ena = 0;
  471. nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
  472. }
  473. /* Configures completion queue */
  474. void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
  475. int qidx, bool enable)
  476. {
  477. struct cmp_queue *cq;
  478. struct cq_cfg cq_cfg;
  479. cq = &qs->cq[qidx];
  480. cq->enable = enable;
  481. if (!cq->enable) {
  482. nicvf_reclaim_cmp_queue(nic, qs, qidx);
  483. return;
  484. }
  485. /* Reset completion queue */
  486. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
  487. if (!cq->enable)
  488. return;
  489. spin_lock_init(&cq->lock);
  490. /* Set completion queue base address */
  491. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
  492. qidx, (u64)(cq->dmem.phys_base));
  493. /* Enable Completion queue */
  494. memset(&cq_cfg, 0, sizeof(struct cq_cfg));
  495. cq_cfg.ena = 1;
  496. cq_cfg.reset = 0;
  497. cq_cfg.caching = 0;
  498. cq_cfg.qsize = CMP_QSIZE;
  499. cq_cfg.avg_con = 0;
  500. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
  501. /* Set threshold value for interrupt generation */
  502. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
  503. nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
  504. qidx, CMP_QUEUE_TIMER_THRESH);
  505. }
  506. /* Configures transmit queue */
  507. static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
  508. int qidx, bool enable)
  509. {
  510. union nic_mbx mbx = {};
  511. struct snd_queue *sq;
  512. struct sq_cfg sq_cfg;
  513. sq = &qs->sq[qidx];
  514. sq->enable = enable;
  515. if (!sq->enable) {
  516. nicvf_reclaim_snd_queue(nic, qs, qidx);
  517. return;
  518. }
  519. /* Reset send queue */
  520. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
  521. sq->cq_qs = qs->vnic_id;
  522. sq->cq_idx = qidx;
  523. /* Send a mailbox msg to PF to config SQ */
  524. mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
  525. mbx.sq.qs_num = qs->vnic_id;
  526. mbx.sq.sq_num = qidx;
  527. mbx.sq.sqs_mode = nic->sqs_mode;
  528. mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
  529. nicvf_send_msg_to_pf(nic, &mbx);
  530. /* Set queue base address */
  531. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
  532. qidx, (u64)(sq->dmem.phys_base));
  533. /* Enable send queue & set queue size */
  534. memset(&sq_cfg, 0, sizeof(struct sq_cfg));
  535. sq_cfg.ena = 1;
  536. sq_cfg.reset = 0;
  537. sq_cfg.ldwb = 0;
  538. sq_cfg.qsize = SND_QSIZE;
  539. sq_cfg.tstmp_bgx_intf = 0;
  540. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
  541. /* Set threshold value for interrupt generation */
  542. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
  543. /* Set queue:cpu affinity for better load distribution */
  544. if (cpu_online(qidx)) {
  545. cpumask_set_cpu(qidx, &sq->affinity_mask);
  546. netif_set_xps_queue(nic->netdev,
  547. &sq->affinity_mask, qidx);
  548. }
  549. }
  550. /* Configures receive buffer descriptor ring */
  551. static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
  552. int qidx, bool enable)
  553. {
  554. struct rbdr *rbdr;
  555. struct rbdr_cfg rbdr_cfg;
  556. rbdr = &qs->rbdr[qidx];
  557. nicvf_reclaim_rbdr(nic, rbdr, qidx);
  558. if (!enable)
  559. return;
  560. /* Set descriptor base address */
  561. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
  562. qidx, (u64)(rbdr->dmem.phys_base));
  563. /* Enable RBDR & set queue size */
  564. /* Buffer size should be in multiples of 128 bytes */
  565. memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
  566. rbdr_cfg.ena = 1;
  567. rbdr_cfg.reset = 0;
  568. rbdr_cfg.ldwb = 0;
  569. rbdr_cfg.qsize = RBDR_SIZE;
  570. rbdr_cfg.avg_con = 0;
  571. rbdr_cfg.lines = rbdr->dma_size / 128;
  572. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
  573. qidx, *(u64 *)&rbdr_cfg);
  574. /* Notify HW */
  575. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
  576. qidx, qs->rbdr_len - 1);
  577. /* Set threshold value for interrupt generation */
  578. nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
  579. qidx, rbdr->thresh - 1);
  580. }
  581. /* Requests PF to assign and enable Qset */
  582. void nicvf_qset_config(struct nicvf *nic, bool enable)
  583. {
  584. union nic_mbx mbx = {};
  585. struct queue_set *qs = nic->qs;
  586. struct qs_cfg *qs_cfg;
  587. if (!qs) {
  588. netdev_warn(nic->netdev,
  589. "Qset is still not allocated, don't init queues\n");
  590. return;
  591. }
  592. qs->enable = enable;
  593. qs->vnic_id = nic->vf_id;
  594. /* Send a mailbox msg to PF to config Qset */
  595. mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
  596. mbx.qs.num = qs->vnic_id;
  597. mbx.qs.sqs_count = nic->sqs_count;
  598. mbx.qs.cfg = 0;
  599. qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
  600. if (qs->enable) {
  601. qs_cfg->ena = 1;
  602. #ifdef __BIG_ENDIAN
  603. qs_cfg->be = 1;
  604. #endif
  605. qs_cfg->vnic = qs->vnic_id;
  606. }
  607. nicvf_send_msg_to_pf(nic, &mbx);
  608. }
  609. static void nicvf_free_resources(struct nicvf *nic)
  610. {
  611. int qidx;
  612. struct queue_set *qs = nic->qs;
  613. /* Free receive buffer descriptor ring */
  614. for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
  615. nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
  616. /* Free completion queue */
  617. for (qidx = 0; qidx < qs->cq_cnt; qidx++)
  618. nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
  619. /* Free send queue */
  620. for (qidx = 0; qidx < qs->sq_cnt; qidx++)
  621. nicvf_free_snd_queue(nic, &qs->sq[qidx]);
  622. }
  623. static int nicvf_alloc_resources(struct nicvf *nic)
  624. {
  625. int qidx;
  626. struct queue_set *qs = nic->qs;
  627. /* Alloc receive buffer descriptor ring */
  628. for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
  629. if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
  630. DMA_BUFFER_LEN))
  631. goto alloc_fail;
  632. }
  633. /* Alloc send queue */
  634. for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
  635. if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
  636. goto alloc_fail;
  637. }
  638. /* Alloc completion queue */
  639. for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
  640. if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
  641. goto alloc_fail;
  642. }
  643. return 0;
  644. alloc_fail:
  645. nicvf_free_resources(nic);
  646. return -ENOMEM;
  647. }
  648. int nicvf_set_qset_resources(struct nicvf *nic)
  649. {
  650. struct queue_set *qs;
  651. qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
  652. if (!qs)
  653. return -ENOMEM;
  654. nic->qs = qs;
  655. /* Set count of each queue */
  656. qs->rbdr_cnt = RBDR_CNT;
  657. qs->rq_cnt = RCV_QUEUE_CNT;
  658. qs->sq_cnt = SND_QUEUE_CNT;
  659. qs->cq_cnt = CMP_QUEUE_CNT;
  660. /* Set queue lengths */
  661. qs->rbdr_len = RCV_BUF_COUNT;
  662. qs->sq_len = SND_QUEUE_LEN;
  663. qs->cq_len = CMP_QUEUE_LEN;
  664. nic->rx_queues = qs->rq_cnt;
  665. nic->tx_queues = qs->sq_cnt;
  666. return 0;
  667. }
  668. int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
  669. {
  670. bool disable = false;
  671. struct queue_set *qs = nic->qs;
  672. int qidx;
  673. if (!qs)
  674. return 0;
  675. if (enable) {
  676. if (nicvf_alloc_resources(nic))
  677. return -ENOMEM;
  678. for (qidx = 0; qidx < qs->sq_cnt; qidx++)
  679. nicvf_snd_queue_config(nic, qs, qidx, enable);
  680. for (qidx = 0; qidx < qs->cq_cnt; qidx++)
  681. nicvf_cmp_queue_config(nic, qs, qidx, enable);
  682. for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
  683. nicvf_rbdr_config(nic, qs, qidx, enable);
  684. for (qidx = 0; qidx < qs->rq_cnt; qidx++)
  685. nicvf_rcv_queue_config(nic, qs, qidx, enable);
  686. } else {
  687. for (qidx = 0; qidx < qs->rq_cnt; qidx++)
  688. nicvf_rcv_queue_config(nic, qs, qidx, disable);
  689. for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
  690. nicvf_rbdr_config(nic, qs, qidx, disable);
  691. for (qidx = 0; qidx < qs->sq_cnt; qidx++)
  692. nicvf_snd_queue_config(nic, qs, qidx, disable);
  693. for (qidx = 0; qidx < qs->cq_cnt; qidx++)
  694. nicvf_cmp_queue_config(nic, qs, qidx, disable);
  695. nicvf_free_resources(nic);
  696. }
  697. return 0;
  698. }
  699. /* Get a free desc from SQ
  700. * returns descriptor ponter & descriptor number
  701. */
  702. static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
  703. {
  704. int qentry;
  705. qentry = sq->tail;
  706. atomic_sub(desc_cnt, &sq->free_cnt);
  707. sq->tail += desc_cnt;
  708. sq->tail &= (sq->dmem.q_len - 1);
  709. return qentry;
  710. }
  711. /* Free descriptor back to SQ for future use */
  712. void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
  713. {
  714. atomic_add(desc_cnt, &sq->free_cnt);
  715. sq->head += desc_cnt;
  716. sq->head &= (sq->dmem.q_len - 1);
  717. }
  718. static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
  719. {
  720. qentry++;
  721. qentry &= (sq->dmem.q_len - 1);
  722. return qentry;
  723. }
  724. void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
  725. {
  726. u64 sq_cfg;
  727. sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
  728. sq_cfg |= NICVF_SQ_EN;
  729. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
  730. /* Ring doorbell so that H/W restarts processing SQEs */
  731. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
  732. }
  733. void nicvf_sq_disable(struct nicvf *nic, int qidx)
  734. {
  735. u64 sq_cfg;
  736. sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
  737. sq_cfg &= ~NICVF_SQ_EN;
  738. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
  739. }
  740. void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
  741. int qidx)
  742. {
  743. u64 head, tail;
  744. struct sk_buff *skb;
  745. struct nicvf *nic = netdev_priv(netdev);
  746. struct sq_hdr_subdesc *hdr;
  747. head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
  748. tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
  749. while (sq->head != head) {
  750. hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
  751. if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
  752. nicvf_put_sq_desc(sq, 1);
  753. continue;
  754. }
  755. skb = (struct sk_buff *)sq->skbuff[sq->head];
  756. if (skb)
  757. dev_kfree_skb_any(skb);
  758. atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
  759. atomic64_add(hdr->tot_len,
  760. (atomic64_t *)&netdev->stats.tx_bytes);
  761. nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
  762. }
  763. }
  764. /* Calculate no of SQ subdescriptors needed to transmit all
  765. * segments of this TSO packet.
  766. * Taken from 'Tilera network driver' with a minor modification.
  767. */
  768. static int nicvf_tso_count_subdescs(struct sk_buff *skb)
  769. {
  770. struct skb_shared_info *sh = skb_shinfo(skb);
  771. unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  772. unsigned int data_len = skb->len - sh_len;
  773. unsigned int p_len = sh->gso_size;
  774. long f_id = -1; /* id of the current fragment */
  775. long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
  776. long f_used = 0; /* bytes used from the current fragment */
  777. long n; /* size of the current piece of payload */
  778. int num_edescs = 0;
  779. int segment;
  780. for (segment = 0; segment < sh->gso_segs; segment++) {
  781. unsigned int p_used = 0;
  782. /* One edesc for header and for each piece of the payload. */
  783. for (num_edescs++; p_used < p_len; num_edescs++) {
  784. /* Advance as needed. */
  785. while (f_used >= f_size) {
  786. f_id++;
  787. f_size = skb_frag_size(&sh->frags[f_id]);
  788. f_used = 0;
  789. }
  790. /* Use bytes from the current fragment. */
  791. n = p_len - p_used;
  792. if (n > f_size - f_used)
  793. n = f_size - f_used;
  794. f_used += n;
  795. p_used += n;
  796. }
  797. /* The last segment may be less than gso_size. */
  798. data_len -= p_len;
  799. if (data_len < p_len)
  800. p_len = data_len;
  801. }
  802. /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
  803. return num_edescs + sh->gso_segs;
  804. }
  805. /* Get the number of SQ descriptors needed to xmit this skb */
  806. static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
  807. {
  808. int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
  809. if (skb_shinfo(skb)->gso_size) {
  810. subdesc_cnt = nicvf_tso_count_subdescs(skb);
  811. return subdesc_cnt;
  812. }
  813. if (skb_shinfo(skb)->nr_frags)
  814. subdesc_cnt += skb_shinfo(skb)->nr_frags;
  815. return subdesc_cnt;
  816. }
  817. /* Add SQ HEADER subdescriptor.
  818. * First subdescriptor for every send descriptor.
  819. */
  820. static inline void
  821. nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
  822. int subdesc_cnt, struct sk_buff *skb, int len)
  823. {
  824. int proto;
  825. struct sq_hdr_subdesc *hdr;
  826. hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
  827. sq->skbuff[qentry] = (u64)skb;
  828. memset(hdr, 0, SND_QUEUE_DESC_SIZE);
  829. hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
  830. /* Enable notification via CQE after processing SQE */
  831. hdr->post_cqe = 1;
  832. /* No of subdescriptors following this */
  833. hdr->subdesc_cnt = subdesc_cnt;
  834. hdr->tot_len = len;
  835. /* Offload checksum calculation to HW */
  836. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  837. hdr->csum_l3 = 1; /* Enable IP csum calculation */
  838. hdr->l3_offset = skb_network_offset(skb);
  839. hdr->l4_offset = skb_transport_offset(skb);
  840. proto = ip_hdr(skb)->protocol;
  841. switch (proto) {
  842. case IPPROTO_TCP:
  843. hdr->csum_l4 = SEND_L4_CSUM_TCP;
  844. break;
  845. case IPPROTO_UDP:
  846. hdr->csum_l4 = SEND_L4_CSUM_UDP;
  847. break;
  848. case IPPROTO_SCTP:
  849. hdr->csum_l4 = SEND_L4_CSUM_SCTP;
  850. break;
  851. }
  852. }
  853. }
  854. /* SQ GATHER subdescriptor
  855. * Must follow HDR descriptor
  856. */
  857. static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
  858. int size, u64 data)
  859. {
  860. struct sq_gather_subdesc *gather;
  861. qentry &= (sq->dmem.q_len - 1);
  862. gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
  863. memset(gather, 0, SND_QUEUE_DESC_SIZE);
  864. gather->subdesc_type = SQ_DESC_TYPE_GATHER;
  865. gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
  866. gather->size = size;
  867. gather->addr = data;
  868. }
  869. /* Segment a TSO packet into 'gso_size' segments and append
  870. * them to SQ for transfer
  871. */
  872. static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
  873. int sq_num, int qentry, struct sk_buff *skb)
  874. {
  875. struct tso_t tso;
  876. int seg_subdescs = 0, desc_cnt = 0;
  877. int seg_len, total_len, data_left;
  878. int hdr_qentry = qentry;
  879. int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  880. tso_start(skb, &tso);
  881. total_len = skb->len - hdr_len;
  882. while (total_len > 0) {
  883. char *hdr;
  884. /* Save Qentry for adding HDR_SUBDESC at the end */
  885. hdr_qentry = qentry;
  886. data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
  887. total_len -= data_left;
  888. /* Add segment's header */
  889. qentry = nicvf_get_nxt_sqentry(sq, qentry);
  890. hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
  891. tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
  892. nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
  893. sq->tso_hdrs_phys +
  894. qentry * TSO_HEADER_SIZE);
  895. /* HDR_SUDESC + GATHER */
  896. seg_subdescs = 2;
  897. seg_len = hdr_len;
  898. /* Add segment's payload fragments */
  899. while (data_left > 0) {
  900. int size;
  901. size = min_t(int, tso.size, data_left);
  902. qentry = nicvf_get_nxt_sqentry(sq, qentry);
  903. nicvf_sq_add_gather_subdesc(sq, qentry, size,
  904. virt_to_phys(tso.data));
  905. seg_subdescs++;
  906. seg_len += size;
  907. data_left -= size;
  908. tso_build_data(skb, &tso, size);
  909. }
  910. nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
  911. seg_subdescs - 1, skb, seg_len);
  912. sq->skbuff[hdr_qentry] = (u64)NULL;
  913. qentry = nicvf_get_nxt_sqentry(sq, qentry);
  914. desc_cnt += seg_subdescs;
  915. }
  916. /* Save SKB in the last segment for freeing */
  917. sq->skbuff[hdr_qentry] = (u64)skb;
  918. /* make sure all memory stores are done before ringing doorbell */
  919. smp_wmb();
  920. /* Inform HW to xmit all TSO segments */
  921. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
  922. sq_num, desc_cnt);
  923. nic->drv_stats.tx_tso++;
  924. return 1;
  925. }
  926. /* Append an skb to a SQ for packet transfer. */
  927. int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
  928. {
  929. int i, size;
  930. int subdesc_cnt;
  931. int sq_num, qentry;
  932. struct queue_set *qs;
  933. struct snd_queue *sq;
  934. sq_num = skb_get_queue_mapping(skb);
  935. if (sq_num >= MAX_SND_QUEUES_PER_QS) {
  936. /* Get secondary Qset's SQ structure */
  937. i = sq_num / MAX_SND_QUEUES_PER_QS;
  938. if (!nic->snicvf[i - 1]) {
  939. netdev_warn(nic->netdev,
  940. "Secondary Qset#%d's ptr not initialized\n",
  941. i - 1);
  942. return 1;
  943. }
  944. nic = (struct nicvf *)nic->snicvf[i - 1];
  945. sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
  946. }
  947. qs = nic->qs;
  948. sq = &qs->sq[sq_num];
  949. subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
  950. if (subdesc_cnt > atomic_read(&sq->free_cnt))
  951. goto append_fail;
  952. qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
  953. /* Check if its a TSO packet */
  954. if (skb_shinfo(skb)->gso_size)
  955. return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
  956. /* Add SQ header subdesc */
  957. nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
  958. /* Add SQ gather subdescs */
  959. qentry = nicvf_get_nxt_sqentry(sq, qentry);
  960. size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
  961. nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
  962. /* Check for scattered buffer */
  963. if (!skb_is_nonlinear(skb))
  964. goto doorbell;
  965. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  966. const struct skb_frag_struct *frag;
  967. frag = &skb_shinfo(skb)->frags[i];
  968. qentry = nicvf_get_nxt_sqentry(sq, qentry);
  969. size = skb_frag_size(frag);
  970. nicvf_sq_add_gather_subdesc(sq, qentry, size,
  971. virt_to_phys(
  972. skb_frag_address(frag)));
  973. }
  974. doorbell:
  975. /* make sure all memory stores are done before ringing doorbell */
  976. smp_wmb();
  977. /* Inform HW to xmit new packet */
  978. nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
  979. sq_num, subdesc_cnt);
  980. return 1;
  981. append_fail:
  982. /* Use original PCI dev for debug log */
  983. nic = nic->pnicvf;
  984. netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
  985. return 0;
  986. }
  987. static inline unsigned frag_num(unsigned i)
  988. {
  989. #ifdef __BIG_ENDIAN
  990. return (i & ~3) + 3 - (i & 3);
  991. #else
  992. return i;
  993. #endif
  994. }
  995. /* Returns SKB for a received packet */
  996. struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
  997. {
  998. int frag;
  999. int payload_len = 0;
  1000. struct sk_buff *skb = NULL;
  1001. struct sk_buff *skb_frag = NULL;
  1002. struct sk_buff *prev_frag = NULL;
  1003. u16 *rb_lens = NULL;
  1004. u64 *rb_ptrs = NULL;
  1005. rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
  1006. rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
  1007. netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
  1008. __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
  1009. for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
  1010. payload_len = rb_lens[frag_num(frag)];
  1011. if (!frag) {
  1012. /* First fragment */
  1013. skb = nicvf_rb_ptr_to_skb(nic,
  1014. *rb_ptrs - cqe_rx->align_pad,
  1015. payload_len);
  1016. if (!skb)
  1017. return NULL;
  1018. skb_reserve(skb, cqe_rx->align_pad);
  1019. skb_put(skb, payload_len);
  1020. } else {
  1021. /* Add fragments */
  1022. skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
  1023. payload_len);
  1024. if (!skb_frag) {
  1025. dev_kfree_skb(skb);
  1026. return NULL;
  1027. }
  1028. if (!skb_shinfo(skb)->frag_list)
  1029. skb_shinfo(skb)->frag_list = skb_frag;
  1030. else
  1031. prev_frag->next = skb_frag;
  1032. prev_frag = skb_frag;
  1033. skb->len += payload_len;
  1034. skb->data_len += payload_len;
  1035. skb_frag->len = payload_len;
  1036. }
  1037. /* Next buffer pointer */
  1038. rb_ptrs++;
  1039. }
  1040. return skb;
  1041. }
  1042. /* Enable interrupt */
  1043. void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
  1044. {
  1045. u64 reg_val;
  1046. reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
  1047. switch (int_type) {
  1048. case NICVF_INTR_CQ:
  1049. reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
  1050. break;
  1051. case NICVF_INTR_SQ:
  1052. reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
  1053. break;
  1054. case NICVF_INTR_RBDR:
  1055. reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
  1056. break;
  1057. case NICVF_INTR_PKT_DROP:
  1058. reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
  1059. break;
  1060. case NICVF_INTR_TCP_TIMER:
  1061. reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
  1062. break;
  1063. case NICVF_INTR_MBOX:
  1064. reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
  1065. break;
  1066. case NICVF_INTR_QS_ERR:
  1067. reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
  1068. break;
  1069. default:
  1070. netdev_err(nic->netdev,
  1071. "Failed to enable interrupt: unknown type\n");
  1072. break;
  1073. }
  1074. nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
  1075. }
  1076. /* Disable interrupt */
  1077. void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
  1078. {
  1079. u64 reg_val = 0;
  1080. switch (int_type) {
  1081. case NICVF_INTR_CQ:
  1082. reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
  1083. break;
  1084. case NICVF_INTR_SQ:
  1085. reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
  1086. break;
  1087. case NICVF_INTR_RBDR:
  1088. reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
  1089. break;
  1090. case NICVF_INTR_PKT_DROP:
  1091. reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
  1092. break;
  1093. case NICVF_INTR_TCP_TIMER:
  1094. reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
  1095. break;
  1096. case NICVF_INTR_MBOX:
  1097. reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
  1098. break;
  1099. case NICVF_INTR_QS_ERR:
  1100. reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
  1101. break;
  1102. default:
  1103. netdev_err(nic->netdev,
  1104. "Failed to disable interrupt: unknown type\n");
  1105. break;
  1106. }
  1107. nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
  1108. }
  1109. /* Clear interrupt */
  1110. void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
  1111. {
  1112. u64 reg_val = 0;
  1113. switch (int_type) {
  1114. case NICVF_INTR_CQ:
  1115. reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
  1116. break;
  1117. case NICVF_INTR_SQ:
  1118. reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
  1119. break;
  1120. case NICVF_INTR_RBDR:
  1121. reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
  1122. break;
  1123. case NICVF_INTR_PKT_DROP:
  1124. reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
  1125. break;
  1126. case NICVF_INTR_TCP_TIMER:
  1127. reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
  1128. break;
  1129. case NICVF_INTR_MBOX:
  1130. reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
  1131. break;
  1132. case NICVF_INTR_QS_ERR:
  1133. reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
  1134. break;
  1135. default:
  1136. netdev_err(nic->netdev,
  1137. "Failed to clear interrupt: unknown type\n");
  1138. break;
  1139. }
  1140. nicvf_reg_write(nic, NIC_VF_INT, reg_val);
  1141. }
  1142. /* Check if interrupt is enabled */
  1143. int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
  1144. {
  1145. u64 reg_val;
  1146. u64 mask = 0xff;
  1147. reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
  1148. switch (int_type) {
  1149. case NICVF_INTR_CQ:
  1150. mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
  1151. break;
  1152. case NICVF_INTR_SQ:
  1153. mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
  1154. break;
  1155. case NICVF_INTR_RBDR:
  1156. mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
  1157. break;
  1158. case NICVF_INTR_PKT_DROP:
  1159. mask = NICVF_INTR_PKT_DROP_MASK;
  1160. break;
  1161. case NICVF_INTR_TCP_TIMER:
  1162. mask = NICVF_INTR_TCP_TIMER_MASK;
  1163. break;
  1164. case NICVF_INTR_MBOX:
  1165. mask = NICVF_INTR_MBOX_MASK;
  1166. break;
  1167. case NICVF_INTR_QS_ERR:
  1168. mask = NICVF_INTR_QS_ERR_MASK;
  1169. break;
  1170. default:
  1171. netdev_err(nic->netdev,
  1172. "Failed to check interrupt enable: unknown type\n");
  1173. break;
  1174. }
  1175. return (reg_val & mask);
  1176. }
  1177. void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
  1178. {
  1179. struct rcv_queue *rq;
  1180. #define GET_RQ_STATS(reg) \
  1181. nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
  1182. (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
  1183. rq = &nic->qs->rq[rq_idx];
  1184. rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
  1185. rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
  1186. }
  1187. void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
  1188. {
  1189. struct snd_queue *sq;
  1190. #define GET_SQ_STATS(reg) \
  1191. nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
  1192. (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
  1193. sq = &nic->qs->sq[sq_idx];
  1194. sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
  1195. sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
  1196. }
  1197. /* Check for errors in the receive cmp.queue entry */
  1198. int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
  1199. {
  1200. struct nicvf_hw_stats *stats = &nic->hw_stats;
  1201. if (!cqe_rx->err_level && !cqe_rx->err_opcode)
  1202. return 0;
  1203. if (netif_msg_rx_err(nic))
  1204. netdev_err(nic->netdev,
  1205. "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
  1206. nic->netdev->name,
  1207. cqe_rx->err_level, cqe_rx->err_opcode);
  1208. switch (cqe_rx->err_opcode) {
  1209. case CQ_RX_ERROP_RE_PARTIAL:
  1210. stats->rx_bgx_truncated_pkts++;
  1211. break;
  1212. case CQ_RX_ERROP_RE_JABBER:
  1213. stats->rx_jabber_errs++;
  1214. break;
  1215. case CQ_RX_ERROP_RE_FCS:
  1216. stats->rx_fcs_errs++;
  1217. break;
  1218. case CQ_RX_ERROP_RE_RX_CTL:
  1219. stats->rx_bgx_errs++;
  1220. break;
  1221. case CQ_RX_ERROP_PREL2_ERR:
  1222. stats->rx_prel2_errs++;
  1223. break;
  1224. case CQ_RX_ERROP_L2_MAL:
  1225. stats->rx_l2_hdr_malformed++;
  1226. break;
  1227. case CQ_RX_ERROP_L2_OVERSIZE:
  1228. stats->rx_oversize++;
  1229. break;
  1230. case CQ_RX_ERROP_L2_UNDERSIZE:
  1231. stats->rx_undersize++;
  1232. break;
  1233. case CQ_RX_ERROP_L2_LENMISM:
  1234. stats->rx_l2_len_mismatch++;
  1235. break;
  1236. case CQ_RX_ERROP_L2_PCLP:
  1237. stats->rx_l2_pclp++;
  1238. break;
  1239. case CQ_RX_ERROP_IP_NOT:
  1240. stats->rx_ip_ver_errs++;
  1241. break;
  1242. case CQ_RX_ERROP_IP_CSUM_ERR:
  1243. stats->rx_ip_csum_errs++;
  1244. break;
  1245. case CQ_RX_ERROP_IP_MAL:
  1246. stats->rx_ip_hdr_malformed++;
  1247. break;
  1248. case CQ_RX_ERROP_IP_MALD:
  1249. stats->rx_ip_payload_malformed++;
  1250. break;
  1251. case CQ_RX_ERROP_IP_HOP:
  1252. stats->rx_ip_ttl_errs++;
  1253. break;
  1254. case CQ_RX_ERROP_L3_PCLP:
  1255. stats->rx_l3_pclp++;
  1256. break;
  1257. case CQ_RX_ERROP_L4_MAL:
  1258. stats->rx_l4_malformed++;
  1259. break;
  1260. case CQ_RX_ERROP_L4_CHK:
  1261. stats->rx_l4_csum_errs++;
  1262. break;
  1263. case CQ_RX_ERROP_UDP_LEN:
  1264. stats->rx_udp_len_errs++;
  1265. break;
  1266. case CQ_RX_ERROP_L4_PORT:
  1267. stats->rx_l4_port_errs++;
  1268. break;
  1269. case CQ_RX_ERROP_TCP_FLAG:
  1270. stats->rx_tcp_flag_errs++;
  1271. break;
  1272. case CQ_RX_ERROP_TCP_OFFSET:
  1273. stats->rx_tcp_offset_errs++;
  1274. break;
  1275. case CQ_RX_ERROP_L4_PCLP:
  1276. stats->rx_l4_pclp++;
  1277. break;
  1278. case CQ_RX_ERROP_RBDR_TRUNC:
  1279. stats->rx_truncated_pkts++;
  1280. break;
  1281. }
  1282. return 1;
  1283. }
  1284. /* Check for errors in the send cmp.queue entry */
  1285. int nicvf_check_cqe_tx_errs(struct nicvf *nic,
  1286. struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
  1287. {
  1288. struct cmp_queue_stats *stats = &cq->stats;
  1289. switch (cqe_tx->send_status) {
  1290. case CQ_TX_ERROP_GOOD:
  1291. stats->tx.good++;
  1292. return 0;
  1293. case CQ_TX_ERROP_DESC_FAULT:
  1294. stats->tx.desc_fault++;
  1295. break;
  1296. case CQ_TX_ERROP_HDR_CONS_ERR:
  1297. stats->tx.hdr_cons_err++;
  1298. break;
  1299. case CQ_TX_ERROP_SUBDC_ERR:
  1300. stats->tx.subdesc_err++;
  1301. break;
  1302. case CQ_TX_ERROP_IMM_SIZE_OFLOW:
  1303. stats->tx.imm_size_oflow++;
  1304. break;
  1305. case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
  1306. stats->tx.data_seq_err++;
  1307. break;
  1308. case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
  1309. stats->tx.mem_seq_err++;
  1310. break;
  1311. case CQ_TX_ERROP_LOCK_VIOL:
  1312. stats->tx.lock_viol++;
  1313. break;
  1314. case CQ_TX_ERROP_DATA_FAULT:
  1315. stats->tx.data_fault++;
  1316. break;
  1317. case CQ_TX_ERROP_TSTMP_CONFLICT:
  1318. stats->tx.tstmp_conflict++;
  1319. break;
  1320. case CQ_TX_ERROP_TSTMP_TIMEOUT:
  1321. stats->tx.tstmp_timeout++;
  1322. break;
  1323. case CQ_TX_ERROP_MEM_FAULT:
  1324. stats->tx.mem_fault++;
  1325. break;
  1326. case CQ_TX_ERROP_CK_OVERLAP:
  1327. stats->tx.csum_overlap++;
  1328. break;
  1329. case CQ_TX_ERROP_CK_OFLOW:
  1330. stats->tx.csum_overflow++;
  1331. break;
  1332. }
  1333. return 1;
  1334. }