nic.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #ifndef NIC_H
  9. #define NIC_H
  10. #include <linux/netdevice.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/pci.h>
  13. #include "thunder_bgx.h"
  14. /* PCI device IDs */
  15. #define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
  16. #define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
  17. #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
  18. #define PCI_DEVICE_ID_THUNDER_BGX 0xA026
  19. /* PCI BAR nos */
  20. #define PCI_CFG_REG_BAR_NUM 0
  21. #define PCI_MSIX_REG_BAR_NUM 4
  22. /* NIC SRIOV VF count */
  23. #define MAX_NUM_VFS_SUPPORTED 128
  24. #define DEFAULT_NUM_VF_ENABLED 8
  25. #define NIC_TNS_BYPASS_MODE 0
  26. #define NIC_TNS_MODE 1
  27. /* NIC priv flags */
  28. #define NIC_SRIOV_ENABLED BIT(0)
  29. /* Min/Max packet size */
  30. #define NIC_HW_MIN_FRS 64
  31. #define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
  32. /* Max pkinds */
  33. #define NIC_MAX_PKIND 16
  34. /* Rx Channels */
  35. /* Receive channel configuration in TNS bypass mode
  36. * Below is configuration in TNS bypass mode
  37. * BGX0-LMAC0-CHAN0 - VNIC CHAN0
  38. * BGX0-LMAC1-CHAN0 - VNIC CHAN16
  39. * ...
  40. * BGX1-LMAC0-CHAN0 - VNIC CHAN128
  41. * ...
  42. * BGX1-LMAC3-CHAN0 - VNIC CHAN174
  43. */
  44. #define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
  45. #define NIC_CHANS_PER_INF 128
  46. #define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
  47. #define NIC_CPI_COUNT 2048 /* No of channel parse indices */
  48. /* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
  49. #define NIC_MAX_BGX MAX_BGX_PER_CN88XX
  50. #define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
  51. #define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
  52. #define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
  53. /* Tx scheduling */
  54. #define NIC_MAX_TL4 1024
  55. #define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
  56. #define NIC_MAX_TL3 256
  57. #define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
  58. #define NIC_MAX_TL2 64
  59. #define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
  60. #define NIC_MAX_TL1 2
  61. /* TNS bypass mode */
  62. #define NIC_TL2_PER_BGX 32
  63. #define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
  64. #define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
  65. /* NIC VF Interrupts */
  66. #define NICVF_INTR_CQ 0
  67. #define NICVF_INTR_SQ 1
  68. #define NICVF_INTR_RBDR 2
  69. #define NICVF_INTR_PKT_DROP 3
  70. #define NICVF_INTR_TCP_TIMER 4
  71. #define NICVF_INTR_MBOX 5
  72. #define NICVF_INTR_QS_ERR 6
  73. #define NICVF_INTR_CQ_SHIFT 0
  74. #define NICVF_INTR_SQ_SHIFT 8
  75. #define NICVF_INTR_RBDR_SHIFT 16
  76. #define NICVF_INTR_PKT_DROP_SHIFT 20
  77. #define NICVF_INTR_TCP_TIMER_SHIFT 21
  78. #define NICVF_INTR_MBOX_SHIFT 22
  79. #define NICVF_INTR_QS_ERR_SHIFT 23
  80. #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
  81. #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
  82. #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
  83. #define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
  84. #define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
  85. #define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
  86. #define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
  87. /* MSI-X interrupts */
  88. #define NIC_PF_MSIX_VECTORS 10
  89. #define NIC_VF_MSIX_VECTORS 20
  90. #define NIC_PF_INTR_ID_ECC0_SBE 0
  91. #define NIC_PF_INTR_ID_ECC0_DBE 1
  92. #define NIC_PF_INTR_ID_ECC1_SBE 2
  93. #define NIC_PF_INTR_ID_ECC1_DBE 3
  94. #define NIC_PF_INTR_ID_ECC2_SBE 4
  95. #define NIC_PF_INTR_ID_ECC2_DBE 5
  96. #define NIC_PF_INTR_ID_ECC3_SBE 6
  97. #define NIC_PF_INTR_ID_ECC3_DBE 7
  98. #define NIC_PF_INTR_ID_MBOX0 8
  99. #define NIC_PF_INTR_ID_MBOX1 9
  100. /* Minimum FIFO level before all packets for the CQ are dropped
  101. *
  102. * This value ensures that once a packet has been "accepted"
  103. * for reception it will not get dropped due to non-availability
  104. * of CQ descriptor. An errata in HW mandates this value to be
  105. * atleast 0x100.
  106. */
  107. #define NICPF_CQM_MIN_DROP_LEVEL 0x100
  108. /* Global timer for CQ timer thresh interrupts
  109. * Calculated for SCLK of 700Mhz
  110. * value written should be a 1/16th of what is expected
  111. *
  112. * 1 tick per 0.025usec
  113. */
  114. #define NICPF_CLK_PER_INT_TICK 1
  115. /* Time to wait before we decide that a SQ is stuck.
  116. *
  117. * Since both pkt rx and tx notifications are done with same CQ,
  118. * when packets are being received at very high rate (eg: L2 forwarding)
  119. * then freeing transmitted skbs will be delayed and watchdog
  120. * will kick in, resetting interface. Hence keeping this value high.
  121. */
  122. #define NICVF_TX_TIMEOUT (50 * HZ)
  123. struct nicvf_cq_poll {
  124. struct nicvf *nicvf;
  125. u8 cq_idx; /* Completion queue index */
  126. struct napi_struct napi;
  127. };
  128. #define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
  129. #define NIC_MAX_RSS_HASH_BITS 8
  130. #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
  131. #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
  132. struct nicvf_rss_info {
  133. bool enable;
  134. #define RSS_L2_EXTENDED_HASH_ENA BIT(0)
  135. #define RSS_IP_HASH_ENA BIT(1)
  136. #define RSS_TCP_HASH_ENA BIT(2)
  137. #define RSS_TCP_SYN_DIS BIT(3)
  138. #define RSS_UDP_HASH_ENA BIT(4)
  139. #define RSS_L4_EXTENDED_HASH_ENA BIT(5)
  140. #define RSS_ROCE_ENA BIT(6)
  141. #define RSS_L3_BI_DIRECTION_ENA BIT(7)
  142. #define RSS_L4_BI_DIRECTION_ENA BIT(8)
  143. u64 cfg;
  144. u8 hash_bits;
  145. u16 rss_size;
  146. u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
  147. u64 key[RSS_HASH_KEY_SIZE];
  148. } ____cacheline_aligned_in_smp;
  149. enum rx_stats_reg_offset {
  150. RX_OCTS = 0x0,
  151. RX_UCAST = 0x1,
  152. RX_BCAST = 0x2,
  153. RX_MCAST = 0x3,
  154. RX_RED = 0x4,
  155. RX_RED_OCTS = 0x5,
  156. RX_ORUN = 0x6,
  157. RX_ORUN_OCTS = 0x7,
  158. RX_FCS = 0x8,
  159. RX_L2ERR = 0x9,
  160. RX_DRP_BCAST = 0xa,
  161. RX_DRP_MCAST = 0xb,
  162. RX_DRP_L3BCAST = 0xc,
  163. RX_DRP_L3MCAST = 0xd,
  164. RX_STATS_ENUM_LAST,
  165. };
  166. enum tx_stats_reg_offset {
  167. TX_OCTS = 0x0,
  168. TX_UCAST = 0x1,
  169. TX_BCAST = 0x2,
  170. TX_MCAST = 0x3,
  171. TX_DROP = 0x4,
  172. TX_STATS_ENUM_LAST,
  173. };
  174. struct nicvf_hw_stats {
  175. u64 rx_bytes;
  176. u64 rx_ucast_frames;
  177. u64 rx_bcast_frames;
  178. u64 rx_mcast_frames;
  179. u64 rx_fcs_errors;
  180. u64 rx_l2_errors;
  181. u64 rx_drop_red;
  182. u64 rx_drop_red_bytes;
  183. u64 rx_drop_overrun;
  184. u64 rx_drop_overrun_bytes;
  185. u64 rx_drop_bcast;
  186. u64 rx_drop_mcast;
  187. u64 rx_drop_l3_bcast;
  188. u64 rx_drop_l3_mcast;
  189. u64 rx_bgx_truncated_pkts;
  190. u64 rx_jabber_errs;
  191. u64 rx_fcs_errs;
  192. u64 rx_bgx_errs;
  193. u64 rx_prel2_errs;
  194. u64 rx_l2_hdr_malformed;
  195. u64 rx_oversize;
  196. u64 rx_undersize;
  197. u64 rx_l2_len_mismatch;
  198. u64 rx_l2_pclp;
  199. u64 rx_ip_ver_errs;
  200. u64 rx_ip_csum_errs;
  201. u64 rx_ip_hdr_malformed;
  202. u64 rx_ip_payload_malformed;
  203. u64 rx_ip_ttl_errs;
  204. u64 rx_l3_pclp;
  205. u64 rx_l4_malformed;
  206. u64 rx_l4_csum_errs;
  207. u64 rx_udp_len_errs;
  208. u64 rx_l4_port_errs;
  209. u64 rx_tcp_flag_errs;
  210. u64 rx_tcp_offset_errs;
  211. u64 rx_l4_pclp;
  212. u64 rx_truncated_pkts;
  213. u64 tx_bytes_ok;
  214. u64 tx_ucast_frames_ok;
  215. u64 tx_bcast_frames_ok;
  216. u64 tx_mcast_frames_ok;
  217. u64 tx_drops;
  218. };
  219. struct nicvf_drv_stats {
  220. /* Rx */
  221. u64 rx_frames_ok;
  222. u64 rx_frames_64;
  223. u64 rx_frames_127;
  224. u64 rx_frames_255;
  225. u64 rx_frames_511;
  226. u64 rx_frames_1023;
  227. u64 rx_frames_1518;
  228. u64 rx_frames_jumbo;
  229. u64 rx_drops;
  230. /* Tx */
  231. u64 tx_frames_ok;
  232. u64 tx_drops;
  233. u64 tx_tso;
  234. u64 txq_stop;
  235. u64 txq_wake;
  236. };
  237. struct nicvf {
  238. struct nicvf *pnicvf;
  239. struct net_device *netdev;
  240. struct pci_dev *pdev;
  241. u8 vf_id;
  242. u8 node;
  243. u8 tns_mode:1;
  244. u8 sqs_mode:1;
  245. u8 loopback_supported:1;
  246. u16 mtu;
  247. struct queue_set *qs;
  248. #define MAX_SQS_PER_VF_SINGLE_NODE 5
  249. #define MAX_SQS_PER_VF 11
  250. u8 sqs_id;
  251. u8 sqs_count; /* Secondary Qset count */
  252. struct nicvf *snicvf[MAX_SQS_PER_VF];
  253. u8 rx_queues;
  254. u8 tx_queues;
  255. u8 max_queues;
  256. void __iomem *reg_base;
  257. bool link_up;
  258. u8 duplex;
  259. u32 speed;
  260. struct page *rb_page;
  261. u32 rb_page_offset;
  262. bool rb_alloc_fail;
  263. bool rb_work_scheduled;
  264. struct delayed_work rbdr_work;
  265. struct tasklet_struct rbdr_task;
  266. struct tasklet_struct qs_err_task;
  267. struct tasklet_struct cq_task;
  268. struct nicvf_cq_poll *napi[8];
  269. struct nicvf_rss_info rss_info;
  270. u8 cpi_alg;
  271. /* Interrupt coalescing settings */
  272. u32 cq_coalesce_usecs;
  273. u32 msg_enable;
  274. struct nicvf_hw_stats hw_stats;
  275. struct nicvf_drv_stats drv_stats;
  276. struct bgx_stats bgx_stats;
  277. struct work_struct reset_task;
  278. /* MSI-X */
  279. bool msix_enabled;
  280. u8 num_vec;
  281. struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
  282. char irq_name[NIC_VF_MSIX_VECTORS][20];
  283. bool irq_allocated[NIC_VF_MSIX_VECTORS];
  284. /* VF <-> PF mailbox communication */
  285. bool pf_acked;
  286. bool pf_nacked;
  287. bool set_mac_pending;
  288. } ____cacheline_aligned_in_smp;
  289. /* PF <--> VF Mailbox communication
  290. * Eight 64bit registers are shared between PF and VF.
  291. * Separate set for each VF.
  292. * Writing '1' into last register mbx7 means end of message.
  293. */
  294. /* PF <--> VF mailbox communication */
  295. #define NIC_PF_VF_MAILBOX_SIZE 2
  296. #define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
  297. /* Mailbox message types */
  298. #define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
  299. #define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
  300. #define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
  301. #define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
  302. #define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
  303. #define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
  304. #define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
  305. #define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
  306. #define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
  307. #define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
  308. #define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
  309. #define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
  310. #define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
  311. #define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
  312. #define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
  313. #define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
  314. #define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
  315. #define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
  316. #define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
  317. #define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
  318. #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
  319. #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
  320. #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
  321. #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
  322. struct nic_cfg_msg {
  323. u8 msg;
  324. u8 vf_id;
  325. u8 node_id;
  326. u8 tns_mode:1;
  327. u8 sqs_mode:1;
  328. u8 loopback_supported:1;
  329. u8 mac_addr[ETH_ALEN];
  330. };
  331. /* Qset configuration */
  332. struct qs_cfg_msg {
  333. u8 msg;
  334. u8 num;
  335. u8 sqs_count;
  336. u64 cfg;
  337. };
  338. /* Receive queue configuration */
  339. struct rq_cfg_msg {
  340. u8 msg;
  341. u8 qs_num;
  342. u8 rq_num;
  343. u64 cfg;
  344. };
  345. /* Send queue configuration */
  346. struct sq_cfg_msg {
  347. u8 msg;
  348. u8 qs_num;
  349. u8 sq_num;
  350. bool sqs_mode;
  351. u64 cfg;
  352. };
  353. /* Set VF's MAC address */
  354. struct set_mac_msg {
  355. u8 msg;
  356. u8 vf_id;
  357. u8 mac_addr[ETH_ALEN];
  358. };
  359. /* Set Maximum frame size */
  360. struct set_frs_msg {
  361. u8 msg;
  362. u8 vf_id;
  363. u16 max_frs;
  364. };
  365. /* Set CPI algorithm type */
  366. struct cpi_cfg_msg {
  367. u8 msg;
  368. u8 vf_id;
  369. u8 rq_cnt;
  370. u8 cpi_alg;
  371. };
  372. /* Get RSS table size */
  373. struct rss_sz_msg {
  374. u8 msg;
  375. u8 vf_id;
  376. u16 ind_tbl_size;
  377. };
  378. /* Set RSS configuration */
  379. struct rss_cfg_msg {
  380. u8 msg;
  381. u8 vf_id;
  382. u8 hash_bits;
  383. u8 tbl_len;
  384. u8 tbl_offset;
  385. #define RSS_IND_TBL_LEN_PER_MBX_MSG 8
  386. u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
  387. };
  388. struct bgx_stats_msg {
  389. u8 msg;
  390. u8 vf_id;
  391. u8 rx;
  392. u8 idx;
  393. u64 stats;
  394. };
  395. /* Physical interface link status */
  396. struct bgx_link_status {
  397. u8 msg;
  398. u8 link_up;
  399. u8 duplex;
  400. u32 speed;
  401. };
  402. /* Get Extra Qset IDs */
  403. struct sqs_alloc {
  404. u8 msg;
  405. u8 vf_id;
  406. u8 qs_count;
  407. };
  408. struct nicvf_ptr {
  409. u8 msg;
  410. u8 vf_id;
  411. bool sqs_mode;
  412. u8 sqs_id;
  413. u64 nicvf;
  414. };
  415. /* Set interface in loopback mode */
  416. struct set_loopback {
  417. u8 msg;
  418. u8 vf_id;
  419. bool enable;
  420. };
  421. /* 128 bit shared memory between PF and each VF */
  422. union nic_mbx {
  423. struct { u8 msg; } msg;
  424. struct nic_cfg_msg nic_cfg;
  425. struct qs_cfg_msg qs;
  426. struct rq_cfg_msg rq;
  427. struct sq_cfg_msg sq;
  428. struct set_mac_msg mac;
  429. struct set_frs_msg frs;
  430. struct cpi_cfg_msg cpi_cfg;
  431. struct rss_sz_msg rss_size;
  432. struct rss_cfg_msg rss_cfg;
  433. struct bgx_stats_msg bgx_stats;
  434. struct bgx_link_status link_status;
  435. struct sqs_alloc sqs_alloc;
  436. struct nicvf_ptr nicvf;
  437. struct set_loopback lbk;
  438. };
  439. #define NIC_NODE_ID_MASK 0x03
  440. #define NIC_NODE_ID_SHIFT 44
  441. static inline int nic_get_node_id(struct pci_dev *pdev)
  442. {
  443. u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
  444. return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
  445. }
  446. int nicvf_set_real_num_queues(struct net_device *netdev,
  447. int tx_queues, int rx_queues);
  448. int nicvf_open(struct net_device *netdev);
  449. int nicvf_stop(struct net_device *netdev);
  450. int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
  451. void nicvf_config_rss(struct nicvf *nic);
  452. void nicvf_set_rss_key(struct nicvf *nic);
  453. void nicvf_set_ethtool_ops(struct net_device *netdev);
  454. void nicvf_update_stats(struct nicvf *nic);
  455. void nicvf_update_lmac_stats(struct nicvf *nic);
  456. #endif /* NIC_H */