t4vf_hw.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613
  1. /*
  2. * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
  3. * driver for Linux.
  4. *
  5. * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/pci.h>
  36. #include "t4vf_common.h"
  37. #include "t4vf_defs.h"
  38. #include "../cxgb4/t4_regs.h"
  39. #include "../cxgb4/t4_values.h"
  40. #include "../cxgb4/t4fw_api.h"
  41. /*
  42. * Wait for the device to become ready (signified by our "who am I" register
  43. * returning a value other than all 1's). Return an error if it doesn't
  44. * become ready ...
  45. */
  46. int t4vf_wait_dev_ready(struct adapter *adapter)
  47. {
  48. const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
  49. const u32 notready1 = 0xffffffff;
  50. const u32 notready2 = 0xeeeeeeee;
  51. u32 val;
  52. val = t4_read_reg(adapter, whoami);
  53. if (val != notready1 && val != notready2)
  54. return 0;
  55. msleep(500);
  56. val = t4_read_reg(adapter, whoami);
  57. if (val != notready1 && val != notready2)
  58. return 0;
  59. else
  60. return -EIO;
  61. }
  62. /*
  63. * Get the reply to a mailbox command and store it in @rpl in big-endian order
  64. * (since the firmware data structures are specified in a big-endian layout).
  65. */
  66. static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
  67. u32 mbox_data)
  68. {
  69. for ( ; size; size -= 8, mbox_data += 8)
  70. *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
  71. }
  72. /*
  73. * Dump contents of mailbox with a leading tag.
  74. */
  75. static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
  76. {
  77. dev_err(adapter->pdev_dev,
  78. "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
  79. (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
  80. (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
  81. (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
  82. (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
  83. (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
  84. (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
  85. (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
  86. (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
  87. }
  88. /**
  89. * t4vf_wr_mbox_core - send a command to FW through the mailbox
  90. * @adapter: the adapter
  91. * @cmd: the command to write
  92. * @size: command length in bytes
  93. * @rpl: where to optionally store the reply
  94. * @sleep_ok: if true we may sleep while awaiting command completion
  95. *
  96. * Sends the given command to FW through the mailbox and waits for the
  97. * FW to execute the command. If @rpl is not %NULL it is used to store
  98. * the FW's reply to the command. The command and its optional reply
  99. * are of the same length. FW can take up to 500 ms to respond.
  100. * @sleep_ok determines whether we may sleep while awaiting the response.
  101. * If sleeping is allowed we use progressive backoff otherwise we spin.
  102. *
  103. * The return value is 0 on success or a negative errno on failure. A
  104. * failure can happen either because we are not able to execute the
  105. * command or FW executes it but signals an error. In the latter case
  106. * the return value is the error code indicated by FW (negated).
  107. */
  108. int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
  109. void *rpl, bool sleep_ok)
  110. {
  111. static const int delay[] = {
  112. 1, 1, 3, 5, 10, 10, 20, 50, 100
  113. };
  114. u32 v;
  115. int i, ms, delay_idx;
  116. const __be64 *p;
  117. u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
  118. u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
  119. /*
  120. * Commands must be multiples of 16 bytes in length and may not be
  121. * larger than the size of the Mailbox Data register array.
  122. */
  123. if ((size % 16) != 0 ||
  124. size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
  125. return -EINVAL;
  126. /*
  127. * Loop trying to get ownership of the mailbox. Return an error
  128. * if we can't gain ownership.
  129. */
  130. v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
  131. for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
  132. v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
  133. if (v != MBOX_OWNER_DRV)
  134. return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
  135. /*
  136. * Write the command array into the Mailbox Data register array and
  137. * transfer ownership of the mailbox to the firmware.
  138. *
  139. * For the VFs, the Mailbox Data "registers" are actually backed by
  140. * T4's "MA" interface rather than PL Registers (as is the case for
  141. * the PFs). Because these are in different coherency domains, the
  142. * write to the VF's PL-register-backed Mailbox Control can race in
  143. * front of the writes to the MA-backed VF Mailbox Data "registers".
  144. * So we need to do a read-back on at least one byte of the VF Mailbox
  145. * Data registers before doing the write to the VF Mailbox Control
  146. * register.
  147. */
  148. for (i = 0, p = cmd; i < size; i += 8)
  149. t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
  150. t4_read_reg(adapter, mbox_data); /* flush write */
  151. t4_write_reg(adapter, mbox_ctl,
  152. MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
  153. t4_read_reg(adapter, mbox_ctl); /* flush write */
  154. /*
  155. * Spin waiting for firmware to acknowledge processing our command.
  156. */
  157. delay_idx = 0;
  158. ms = delay[0];
  159. for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
  160. if (sleep_ok) {
  161. ms = delay[delay_idx];
  162. if (delay_idx < ARRAY_SIZE(delay) - 1)
  163. delay_idx++;
  164. msleep(ms);
  165. } else
  166. mdelay(ms);
  167. /*
  168. * If we're the owner, see if this is the reply we wanted.
  169. */
  170. v = t4_read_reg(adapter, mbox_ctl);
  171. if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
  172. /*
  173. * If the Message Valid bit isn't on, revoke ownership
  174. * of the mailbox and continue waiting for our reply.
  175. */
  176. if ((v & MBMSGVALID_F) == 0) {
  177. t4_write_reg(adapter, mbox_ctl,
  178. MBOWNER_V(MBOX_OWNER_NONE));
  179. continue;
  180. }
  181. /*
  182. * We now have our reply. Extract the command return
  183. * value, copy the reply back to our caller's buffer
  184. * (if specified) and revoke ownership of the mailbox.
  185. * We return the (negated) firmware command return
  186. * code (this depends on FW_SUCCESS == 0).
  187. */
  188. /* return value in low-order little-endian word */
  189. v = t4_read_reg(adapter, mbox_data);
  190. if (FW_CMD_RETVAL_G(v))
  191. dump_mbox(adapter, "FW Error", mbox_data);
  192. if (rpl) {
  193. /* request bit in high-order BE word */
  194. WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
  195. & FW_CMD_REQUEST_F) == 0);
  196. get_mbox_rpl(adapter, rpl, size, mbox_data);
  197. WARN_ON((be32_to_cpu(*(__be32 *)rpl)
  198. & FW_CMD_REQUEST_F) != 0);
  199. }
  200. t4_write_reg(adapter, mbox_ctl,
  201. MBOWNER_V(MBOX_OWNER_NONE));
  202. return -FW_CMD_RETVAL_G(v);
  203. }
  204. }
  205. /*
  206. * We timed out. Return the error ...
  207. */
  208. dump_mbox(adapter, "FW Timeout", mbox_data);
  209. return -ETIMEDOUT;
  210. }
  211. /**
  212. * hash_mac_addr - return the hash value of a MAC address
  213. * @addr: the 48-bit Ethernet MAC address
  214. *
  215. * Hashes a MAC address according to the hash function used by hardware
  216. * inexact (hash) address matching.
  217. */
  218. static int hash_mac_addr(const u8 *addr)
  219. {
  220. u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
  221. u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
  222. a ^= b;
  223. a ^= (a >> 12);
  224. a ^= (a >> 6);
  225. return a & 0x3f;
  226. }
  227. #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
  228. FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
  229. FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
  230. /**
  231. * init_link_config - initialize a link's SW state
  232. * @lc: structure holding the link state
  233. * @caps: link capabilities
  234. *
  235. * Initializes the SW state maintained for each link, including the link's
  236. * capabilities and default speed/flow-control/autonegotiation settings.
  237. */
  238. static void init_link_config(struct link_config *lc, unsigned int caps)
  239. {
  240. lc->supported = caps;
  241. lc->requested_speed = 0;
  242. lc->speed = 0;
  243. lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
  244. if (lc->supported & FW_PORT_CAP_ANEG) {
  245. lc->advertising = lc->supported & ADVERT_MASK;
  246. lc->autoneg = AUTONEG_ENABLE;
  247. lc->requested_fc |= PAUSE_AUTONEG;
  248. } else {
  249. lc->advertising = 0;
  250. lc->autoneg = AUTONEG_DISABLE;
  251. }
  252. }
  253. /**
  254. * t4vf_port_init - initialize port hardware/software state
  255. * @adapter: the adapter
  256. * @pidx: the adapter port index
  257. */
  258. int t4vf_port_init(struct adapter *adapter, int pidx)
  259. {
  260. struct port_info *pi = adap2pinfo(adapter, pidx);
  261. struct fw_vi_cmd vi_cmd, vi_rpl;
  262. struct fw_port_cmd port_cmd, port_rpl;
  263. int v;
  264. /*
  265. * Execute a VI Read command to get our Virtual Interface information
  266. * like MAC address, etc.
  267. */
  268. memset(&vi_cmd, 0, sizeof(vi_cmd));
  269. vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  270. FW_CMD_REQUEST_F |
  271. FW_CMD_READ_F);
  272. vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
  273. vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
  274. v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
  275. if (v)
  276. return v;
  277. BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
  278. pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
  279. t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
  280. /*
  281. * If we don't have read access to our port information, we're done
  282. * now. Otherwise, execute a PORT Read command to get it ...
  283. */
  284. if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
  285. return 0;
  286. memset(&port_cmd, 0, sizeof(port_cmd));
  287. port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  288. FW_CMD_REQUEST_F |
  289. FW_CMD_READ_F |
  290. FW_PORT_CMD_PORTID_V(pi->port_id));
  291. port_cmd.action_to_len16 =
  292. cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
  293. FW_LEN16(port_cmd));
  294. v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
  295. if (v)
  296. return v;
  297. v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
  298. pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
  299. FW_PORT_CMD_MDIOADDR_G(v) : -1;
  300. pi->port_type = FW_PORT_CMD_PTYPE_G(v);
  301. pi->mod_type = FW_PORT_MOD_TYPE_NA;
  302. init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
  303. return 0;
  304. }
  305. /**
  306. * t4vf_fw_reset - issue a reset to FW
  307. * @adapter: the adapter
  308. *
  309. * Issues a reset command to FW. For a Physical Function this would
  310. * result in the Firmware resetting all of its state. For a Virtual
  311. * Function this just resets the state associated with the VF.
  312. */
  313. int t4vf_fw_reset(struct adapter *adapter)
  314. {
  315. struct fw_reset_cmd cmd;
  316. memset(&cmd, 0, sizeof(cmd));
  317. cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
  318. FW_CMD_WRITE_F);
  319. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  320. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  321. }
  322. /**
  323. * t4vf_query_params - query FW or device parameters
  324. * @adapter: the adapter
  325. * @nparams: the number of parameters
  326. * @params: the parameter names
  327. * @vals: the parameter values
  328. *
  329. * Reads the values of firmware or device parameters. Up to 7 parameters
  330. * can be queried at once.
  331. */
  332. static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
  333. const u32 *params, u32 *vals)
  334. {
  335. int i, ret;
  336. struct fw_params_cmd cmd, rpl;
  337. struct fw_params_param *p;
  338. size_t len16;
  339. if (nparams > 7)
  340. return -EINVAL;
  341. memset(&cmd, 0, sizeof(cmd));
  342. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  343. FW_CMD_REQUEST_F |
  344. FW_CMD_READ_F);
  345. len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
  346. param[nparams].mnem), 16);
  347. cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  348. for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
  349. p->mnem = htonl(*params++);
  350. ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  351. if (ret == 0)
  352. for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
  353. *vals++ = be32_to_cpu(p->val);
  354. return ret;
  355. }
  356. /**
  357. * t4vf_set_params - sets FW or device parameters
  358. * @adapter: the adapter
  359. * @nparams: the number of parameters
  360. * @params: the parameter names
  361. * @vals: the parameter values
  362. *
  363. * Sets the values of firmware or device parameters. Up to 7 parameters
  364. * can be specified at once.
  365. */
  366. int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
  367. const u32 *params, const u32 *vals)
  368. {
  369. int i;
  370. struct fw_params_cmd cmd;
  371. struct fw_params_param *p;
  372. size_t len16;
  373. if (nparams > 7)
  374. return -EINVAL;
  375. memset(&cmd, 0, sizeof(cmd));
  376. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  377. FW_CMD_REQUEST_F |
  378. FW_CMD_WRITE_F);
  379. len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
  380. param[nparams]), 16);
  381. cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  382. for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
  383. p->mnem = cpu_to_be32(*params++);
  384. p->val = cpu_to_be32(*vals++);
  385. }
  386. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  387. }
  388. /**
  389. * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
  390. * @adapter: the adapter
  391. * @qid: the Queue ID
  392. * @qtype: the Ingress or Egress type for @qid
  393. * @pbar2_qoffset: BAR2 Queue Offset
  394. * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
  395. *
  396. * Returns the BAR2 SGE Queue Registers information associated with the
  397. * indicated Absolute Queue ID. These are passed back in return value
  398. * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
  399. * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
  400. *
  401. * This may return an error which indicates that BAR2 SGE Queue
  402. * registers aren't available. If an error is not returned, then the
  403. * following values are returned:
  404. *
  405. * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
  406. * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
  407. *
  408. * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
  409. * require the "Inferred Queue ID" ability may be used. E.g. the
  410. * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  411. * then these "Inferred Queue ID" register may not be used.
  412. */
  413. int t4vf_bar2_sge_qregs(struct adapter *adapter,
  414. unsigned int qid,
  415. enum t4_bar2_qtype qtype,
  416. u64 *pbar2_qoffset,
  417. unsigned int *pbar2_qid)
  418. {
  419. unsigned int page_shift, page_size, qpp_shift, qpp_mask;
  420. u64 bar2_page_offset, bar2_qoffset;
  421. unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
  422. /* T4 doesn't support BAR2 SGE Queue registers.
  423. */
  424. if (is_t4(adapter->params.chip))
  425. return -EINVAL;
  426. /* Get our SGE Page Size parameters.
  427. */
  428. page_shift = adapter->params.sge.sge_vf_hps + 10;
  429. page_size = 1 << page_shift;
  430. /* Get the right Queues per Page parameters for our Queue.
  431. */
  432. qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
  433. ? adapter->params.sge.sge_vf_eq_qpp
  434. : adapter->params.sge.sge_vf_iq_qpp);
  435. qpp_mask = (1 << qpp_shift) - 1;
  436. /* Calculate the basics of the BAR2 SGE Queue register area:
  437. * o The BAR2 page the Queue registers will be in.
  438. * o The BAR2 Queue ID.
  439. * o The BAR2 Queue ID Offset into the BAR2 page.
  440. */
  441. bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
  442. bar2_qid = qid & qpp_mask;
  443. bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
  444. /* If the BAR2 Queue ID Offset is less than the Page Size, then the
  445. * hardware will infer the Absolute Queue ID simply from the writes to
  446. * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
  447. * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
  448. * write to the first BAR2 SGE Queue Area within the BAR2 Page with
  449. * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
  450. * from the BAR2 Page and BAR2 Queue ID.
  451. *
  452. * One important censequence of this is that some BAR2 SGE registers
  453. * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
  454. * there. But other registers synthesize the SGE Queue ID purely
  455. * from the writes to the registers -- the Write Combined Doorbell
  456. * Buffer is a good example. These BAR2 SGE Registers are only
  457. * available for those BAR2 SGE Register areas where the SGE Absolute
  458. * Queue ID can be inferred from simple writes.
  459. */
  460. bar2_qoffset = bar2_page_offset;
  461. bar2_qinferred = (bar2_qid_offset < page_size);
  462. if (bar2_qinferred) {
  463. bar2_qoffset += bar2_qid_offset;
  464. bar2_qid = 0;
  465. }
  466. *pbar2_qoffset = bar2_qoffset;
  467. *pbar2_qid = bar2_qid;
  468. return 0;
  469. }
  470. /**
  471. * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
  472. * @adapter: the adapter
  473. *
  474. * Retrieves various core SGE parameters in the form of hardware SGE
  475. * register values. The caller is responsible for decoding these as
  476. * needed. The SGE parameters are stored in @adapter->params.sge.
  477. */
  478. int t4vf_get_sge_params(struct adapter *adapter)
  479. {
  480. struct sge_params *sge_params = &adapter->params.sge;
  481. u32 params[7], vals[7];
  482. int v;
  483. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  484. FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
  485. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  486. FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
  487. params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  488. FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
  489. params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  490. FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
  491. params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  492. FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
  493. params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  494. FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
  495. params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  496. FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
  497. v = t4vf_query_params(adapter, 7, params, vals);
  498. if (v)
  499. return v;
  500. sge_params->sge_control = vals[0];
  501. sge_params->sge_host_page_size = vals[1];
  502. sge_params->sge_fl_buffer_size[0] = vals[2];
  503. sge_params->sge_fl_buffer_size[1] = vals[3];
  504. sge_params->sge_timer_value_0_and_1 = vals[4];
  505. sge_params->sge_timer_value_2_and_3 = vals[5];
  506. sge_params->sge_timer_value_4_and_5 = vals[6];
  507. /* T4 uses a single control field to specify both the PCIe Padding and
  508. * Packing Boundary. T5 introduced the ability to specify these
  509. * separately with the Padding Boundary in SGE_CONTROL and and Packing
  510. * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
  511. * SGE_CONTROL in order to determine how ingress packet data will be
  512. * laid out in Packed Buffer Mode. Unfortunately, older versions of
  513. * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
  514. * failure grabbing it we throw an error since we can't figure out the
  515. * right value.
  516. */
  517. if (!is_t4(adapter->params.chip)) {
  518. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  519. FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
  520. v = t4vf_query_params(adapter, 1, params, vals);
  521. if (v != FW_SUCCESS) {
  522. dev_err(adapter->pdev_dev,
  523. "Unable to get SGE Control2; "
  524. "probably old firmware.\n");
  525. return v;
  526. }
  527. sge_params->sge_control2 = vals[0];
  528. }
  529. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  530. FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
  531. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  532. FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
  533. v = t4vf_query_params(adapter, 2, params, vals);
  534. if (v)
  535. return v;
  536. sge_params->sge_ingress_rx_threshold = vals[0];
  537. sge_params->sge_congestion_control = vals[1];
  538. /* For T5 and later we want to use the new BAR2 Doorbells.
  539. * Unfortunately, older firmware didn't allow the this register to be
  540. * read.
  541. */
  542. if (!is_t4(adapter->params.chip)) {
  543. u32 whoami;
  544. unsigned int pf, s_hps, s_qpp;
  545. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  546. FW_PARAMS_PARAM_XYZ_V(
  547. SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
  548. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  549. FW_PARAMS_PARAM_XYZ_V(
  550. SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
  551. v = t4vf_query_params(adapter, 2, params, vals);
  552. if (v != FW_SUCCESS) {
  553. dev_warn(adapter->pdev_dev,
  554. "Unable to get VF SGE Queues/Page; "
  555. "probably old firmware.\n");
  556. return v;
  557. }
  558. sge_params->sge_egress_queues_per_page = vals[0];
  559. sge_params->sge_ingress_queues_per_page = vals[1];
  560. /* We need the Queues/Page for our VF. This is based on the
  561. * PF from which we're instantiated and is indexed in the
  562. * register we just read. Do it once here so other code in
  563. * the driver can just use it.
  564. */
  565. whoami = t4_read_reg(adapter,
  566. T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
  567. pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
  568. SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
  569. s_hps = (HOSTPAGESIZEPF0_S +
  570. (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
  571. sge_params->sge_vf_hps =
  572. ((sge_params->sge_host_page_size >> s_hps)
  573. & HOSTPAGESIZEPF0_M);
  574. s_qpp = (QUEUESPERPAGEPF0_S +
  575. (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
  576. sge_params->sge_vf_eq_qpp =
  577. ((sge_params->sge_egress_queues_per_page >> s_qpp)
  578. & QUEUESPERPAGEPF0_M);
  579. sge_params->sge_vf_iq_qpp =
  580. ((sge_params->sge_ingress_queues_per_page >> s_qpp)
  581. & QUEUESPERPAGEPF0_M);
  582. }
  583. return 0;
  584. }
  585. /**
  586. * t4vf_get_vpd_params - retrieve device VPD paremeters
  587. * @adapter: the adapter
  588. *
  589. * Retrives various device Vital Product Data parameters. The parameters
  590. * are stored in @adapter->params.vpd.
  591. */
  592. int t4vf_get_vpd_params(struct adapter *adapter)
  593. {
  594. struct vpd_params *vpd_params = &adapter->params.vpd;
  595. u32 params[7], vals[7];
  596. int v;
  597. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  598. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
  599. v = t4vf_query_params(adapter, 1, params, vals);
  600. if (v)
  601. return v;
  602. vpd_params->cclk = vals[0];
  603. return 0;
  604. }
  605. /**
  606. * t4vf_get_dev_params - retrieve device paremeters
  607. * @adapter: the adapter
  608. *
  609. * Retrives various device parameters. The parameters are stored in
  610. * @adapter->params.dev.
  611. */
  612. int t4vf_get_dev_params(struct adapter *adapter)
  613. {
  614. struct dev_params *dev_params = &adapter->params.dev;
  615. u32 params[7], vals[7];
  616. int v;
  617. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  618. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
  619. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  620. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
  621. v = t4vf_query_params(adapter, 2, params, vals);
  622. if (v)
  623. return v;
  624. dev_params->fwrev = vals[0];
  625. dev_params->tprev = vals[1];
  626. return 0;
  627. }
  628. /**
  629. * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
  630. * @adapter: the adapter
  631. *
  632. * Retrieves global RSS mode and parameters with which we have to live
  633. * and stores them in the @adapter's RSS parameters.
  634. */
  635. int t4vf_get_rss_glb_config(struct adapter *adapter)
  636. {
  637. struct rss_params *rss = &adapter->params.rss;
  638. struct fw_rss_glb_config_cmd cmd, rpl;
  639. int v;
  640. /*
  641. * Execute an RSS Global Configuration read command to retrieve
  642. * our RSS configuration.
  643. */
  644. memset(&cmd, 0, sizeof(cmd));
  645. cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
  646. FW_CMD_REQUEST_F |
  647. FW_CMD_READ_F);
  648. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  649. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  650. if (v)
  651. return v;
  652. /*
  653. * Transate the big-endian RSS Global Configuration into our
  654. * cpu-endian format based on the RSS mode. We also do first level
  655. * filtering at this point to weed out modes which don't support
  656. * VF Drivers ...
  657. */
  658. rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
  659. be32_to_cpu(rpl.u.manual.mode_pkd));
  660. switch (rss->mode) {
  661. case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
  662. u32 word = be32_to_cpu(
  663. rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
  664. rss->u.basicvirtual.synmapen =
  665. ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
  666. rss->u.basicvirtual.syn4tupenipv6 =
  667. ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
  668. rss->u.basicvirtual.syn2tupenipv6 =
  669. ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
  670. rss->u.basicvirtual.syn4tupenipv4 =
  671. ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
  672. rss->u.basicvirtual.syn2tupenipv4 =
  673. ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
  674. rss->u.basicvirtual.ofdmapen =
  675. ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
  676. rss->u.basicvirtual.tnlmapen =
  677. ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
  678. rss->u.basicvirtual.tnlalllookup =
  679. ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
  680. rss->u.basicvirtual.hashtoeplitz =
  681. ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
  682. /* we need at least Tunnel Map Enable to be set */
  683. if (!rss->u.basicvirtual.tnlmapen)
  684. return -EINVAL;
  685. break;
  686. }
  687. default:
  688. /* all unknown/unsupported RSS modes result in an error */
  689. return -EINVAL;
  690. }
  691. return 0;
  692. }
  693. /**
  694. * t4vf_get_vfres - retrieve VF resource limits
  695. * @adapter: the adapter
  696. *
  697. * Retrieves configured resource limits and capabilities for a virtual
  698. * function. The results are stored in @adapter->vfres.
  699. */
  700. int t4vf_get_vfres(struct adapter *adapter)
  701. {
  702. struct vf_resources *vfres = &adapter->params.vfres;
  703. struct fw_pfvf_cmd cmd, rpl;
  704. int v;
  705. u32 word;
  706. /*
  707. * Execute PFVF Read command to get VF resource limits; bail out early
  708. * with error on command failure.
  709. */
  710. memset(&cmd, 0, sizeof(cmd));
  711. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
  712. FW_CMD_REQUEST_F |
  713. FW_CMD_READ_F);
  714. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  715. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  716. if (v)
  717. return v;
  718. /*
  719. * Extract VF resource limits and return success.
  720. */
  721. word = be32_to_cpu(rpl.niqflint_niq);
  722. vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
  723. vfres->niq = FW_PFVF_CMD_NIQ_G(word);
  724. word = be32_to_cpu(rpl.type_to_neq);
  725. vfres->neq = FW_PFVF_CMD_NEQ_G(word);
  726. vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
  727. word = be32_to_cpu(rpl.tc_to_nexactf);
  728. vfres->tc = FW_PFVF_CMD_TC_G(word);
  729. vfres->nvi = FW_PFVF_CMD_NVI_G(word);
  730. vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
  731. word = be32_to_cpu(rpl.r_caps_to_nethctrl);
  732. vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
  733. vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
  734. vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
  735. return 0;
  736. }
  737. /**
  738. * t4vf_read_rss_vi_config - read a VI's RSS configuration
  739. * @adapter: the adapter
  740. * @viid: Virtual Interface ID
  741. * @config: pointer to host-native VI RSS Configuration buffer
  742. *
  743. * Reads the Virtual Interface's RSS configuration information and
  744. * translates it into CPU-native format.
  745. */
  746. int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
  747. union rss_vi_config *config)
  748. {
  749. struct fw_rss_vi_config_cmd cmd, rpl;
  750. int v;
  751. memset(&cmd, 0, sizeof(cmd));
  752. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  753. FW_CMD_REQUEST_F |
  754. FW_CMD_READ_F |
  755. FW_RSS_VI_CONFIG_CMD_VIID(viid));
  756. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  757. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  758. if (v)
  759. return v;
  760. switch (adapter->params.rss.mode) {
  761. case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
  762. u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
  763. config->basicvirtual.ip6fourtupen =
  764. ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
  765. config->basicvirtual.ip6twotupen =
  766. ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
  767. config->basicvirtual.ip4fourtupen =
  768. ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
  769. config->basicvirtual.ip4twotupen =
  770. ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
  771. config->basicvirtual.udpen =
  772. ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
  773. config->basicvirtual.defaultq =
  774. FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
  775. break;
  776. }
  777. default:
  778. return -EINVAL;
  779. }
  780. return 0;
  781. }
  782. /**
  783. * t4vf_write_rss_vi_config - write a VI's RSS configuration
  784. * @adapter: the adapter
  785. * @viid: Virtual Interface ID
  786. * @config: pointer to host-native VI RSS Configuration buffer
  787. *
  788. * Write the Virtual Interface's RSS configuration information
  789. * (translating it into firmware-native format before writing).
  790. */
  791. int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
  792. union rss_vi_config *config)
  793. {
  794. struct fw_rss_vi_config_cmd cmd, rpl;
  795. memset(&cmd, 0, sizeof(cmd));
  796. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  797. FW_CMD_REQUEST_F |
  798. FW_CMD_WRITE_F |
  799. FW_RSS_VI_CONFIG_CMD_VIID(viid));
  800. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  801. switch (adapter->params.rss.mode) {
  802. case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
  803. u32 word = 0;
  804. if (config->basicvirtual.ip6fourtupen)
  805. word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
  806. if (config->basicvirtual.ip6twotupen)
  807. word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
  808. if (config->basicvirtual.ip4fourtupen)
  809. word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
  810. if (config->basicvirtual.ip4twotupen)
  811. word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
  812. if (config->basicvirtual.udpen)
  813. word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
  814. word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
  815. config->basicvirtual.defaultq);
  816. cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
  817. break;
  818. }
  819. default:
  820. return -EINVAL;
  821. }
  822. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  823. }
  824. /**
  825. * t4vf_config_rss_range - configure a portion of the RSS mapping table
  826. * @adapter: the adapter
  827. * @viid: Virtual Interface of RSS Table Slice
  828. * @start: starting entry in the table to write
  829. * @n: how many table entries to write
  830. * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
  831. * @nrspq: number of values in @rspq
  832. *
  833. * Programs the selected part of the VI's RSS mapping table with the
  834. * provided values. If @nrspq < @n the supplied values are used repeatedly
  835. * until the full table range is populated.
  836. *
  837. * The caller must ensure the values in @rspq are in the range 0..1023.
  838. */
  839. int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
  840. int start, int n, const u16 *rspq, int nrspq)
  841. {
  842. const u16 *rsp = rspq;
  843. const u16 *rsp_end = rspq+nrspq;
  844. struct fw_rss_ind_tbl_cmd cmd;
  845. /*
  846. * Initialize firmware command template to write the RSS table.
  847. */
  848. memset(&cmd, 0, sizeof(cmd));
  849. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
  850. FW_CMD_REQUEST_F |
  851. FW_CMD_WRITE_F |
  852. FW_RSS_IND_TBL_CMD_VIID_V(viid));
  853. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  854. /*
  855. * Each firmware RSS command can accommodate up to 32 RSS Ingress
  856. * Queue Identifiers. These Ingress Queue IDs are packed three to
  857. * a 32-bit word as 10-bit values with the upper remaining 2 bits
  858. * reserved.
  859. */
  860. while (n > 0) {
  861. __be32 *qp = &cmd.iq0_to_iq2;
  862. int nq = min(n, 32);
  863. int ret;
  864. /*
  865. * Set up the firmware RSS command header to send the next
  866. * "nq" Ingress Queue IDs to the firmware.
  867. */
  868. cmd.niqid = cpu_to_be16(nq);
  869. cmd.startidx = cpu_to_be16(start);
  870. /*
  871. * "nq" more done for the start of the next loop.
  872. */
  873. start += nq;
  874. n -= nq;
  875. /*
  876. * While there are still Ingress Queue IDs to stuff into the
  877. * current firmware RSS command, retrieve them from the
  878. * Ingress Queue ID array and insert them into the command.
  879. */
  880. while (nq > 0) {
  881. /*
  882. * Grab up to the next 3 Ingress Queue IDs (wrapping
  883. * around the Ingress Queue ID array if necessary) and
  884. * insert them into the firmware RSS command at the
  885. * current 3-tuple position within the commad.
  886. */
  887. u16 qbuf[3];
  888. u16 *qbp = qbuf;
  889. int nqbuf = min(3, nq);
  890. nq -= nqbuf;
  891. qbuf[0] = qbuf[1] = qbuf[2] = 0;
  892. while (nqbuf) {
  893. nqbuf--;
  894. *qbp++ = *rsp++;
  895. if (rsp >= rsp_end)
  896. rsp = rspq;
  897. }
  898. *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
  899. FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
  900. FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
  901. }
  902. /*
  903. * Send this portion of the RRS table update to the firmware;
  904. * bail out on any errors.
  905. */
  906. ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  907. if (ret)
  908. return ret;
  909. }
  910. return 0;
  911. }
  912. /**
  913. * t4vf_alloc_vi - allocate a virtual interface on a port
  914. * @adapter: the adapter
  915. * @port_id: physical port associated with the VI
  916. *
  917. * Allocate a new Virtual Interface and bind it to the indicated
  918. * physical port. Return the new Virtual Interface Identifier on
  919. * success, or a [negative] error number on failure.
  920. */
  921. int t4vf_alloc_vi(struct adapter *adapter, int port_id)
  922. {
  923. struct fw_vi_cmd cmd, rpl;
  924. int v;
  925. /*
  926. * Execute a VI command to allocate Virtual Interface and return its
  927. * VIID.
  928. */
  929. memset(&cmd, 0, sizeof(cmd));
  930. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  931. FW_CMD_REQUEST_F |
  932. FW_CMD_WRITE_F |
  933. FW_CMD_EXEC_F);
  934. cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
  935. FW_VI_CMD_ALLOC_F);
  936. cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
  937. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  938. if (v)
  939. return v;
  940. return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
  941. }
  942. /**
  943. * t4vf_free_vi -- free a virtual interface
  944. * @adapter: the adapter
  945. * @viid: the virtual interface identifier
  946. *
  947. * Free a previously allocated Virtual Interface. Return an error on
  948. * failure.
  949. */
  950. int t4vf_free_vi(struct adapter *adapter, int viid)
  951. {
  952. struct fw_vi_cmd cmd;
  953. /*
  954. * Execute a VI command to free the Virtual Interface.
  955. */
  956. memset(&cmd, 0, sizeof(cmd));
  957. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  958. FW_CMD_REQUEST_F |
  959. FW_CMD_EXEC_F);
  960. cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
  961. FW_VI_CMD_FREE_F);
  962. cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
  963. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  964. }
  965. /**
  966. * t4vf_enable_vi - enable/disable a virtual interface
  967. * @adapter: the adapter
  968. * @viid: the Virtual Interface ID
  969. * @rx_en: 1=enable Rx, 0=disable Rx
  970. * @tx_en: 1=enable Tx, 0=disable Tx
  971. *
  972. * Enables/disables a virtual interface.
  973. */
  974. int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
  975. bool rx_en, bool tx_en)
  976. {
  977. struct fw_vi_enable_cmd cmd;
  978. memset(&cmd, 0, sizeof(cmd));
  979. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  980. FW_CMD_REQUEST_F |
  981. FW_CMD_EXEC_F |
  982. FW_VI_ENABLE_CMD_VIID_V(viid));
  983. cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
  984. FW_VI_ENABLE_CMD_EEN_V(tx_en) |
  985. FW_LEN16(cmd));
  986. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  987. }
  988. /**
  989. * t4vf_identify_port - identify a VI's port by blinking its LED
  990. * @adapter: the adapter
  991. * @viid: the Virtual Interface ID
  992. * @nblinks: how many times to blink LED at 2.5 Hz
  993. *
  994. * Identifies a VI's port by blinking its LED.
  995. */
  996. int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
  997. unsigned int nblinks)
  998. {
  999. struct fw_vi_enable_cmd cmd;
  1000. memset(&cmd, 0, sizeof(cmd));
  1001. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  1002. FW_CMD_REQUEST_F |
  1003. FW_CMD_EXEC_F |
  1004. FW_VI_ENABLE_CMD_VIID_V(viid));
  1005. cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
  1006. FW_LEN16(cmd));
  1007. cmd.blinkdur = cpu_to_be16(nblinks);
  1008. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1009. }
  1010. /**
  1011. * t4vf_set_rxmode - set Rx properties of a virtual interface
  1012. * @adapter: the adapter
  1013. * @viid: the VI id
  1014. * @mtu: the new MTU or -1 for no change
  1015. * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
  1016. * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
  1017. * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
  1018. * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
  1019. * -1 no change
  1020. *
  1021. * Sets Rx properties of a virtual interface.
  1022. */
  1023. int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
  1024. int mtu, int promisc, int all_multi, int bcast, int vlanex,
  1025. bool sleep_ok)
  1026. {
  1027. struct fw_vi_rxmode_cmd cmd;
  1028. /* convert to FW values */
  1029. if (mtu < 0)
  1030. mtu = FW_VI_RXMODE_CMD_MTU_M;
  1031. if (promisc < 0)
  1032. promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
  1033. if (all_multi < 0)
  1034. all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
  1035. if (bcast < 0)
  1036. bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
  1037. if (vlanex < 0)
  1038. vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
  1039. memset(&cmd, 0, sizeof(cmd));
  1040. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
  1041. FW_CMD_REQUEST_F |
  1042. FW_CMD_WRITE_F |
  1043. FW_VI_RXMODE_CMD_VIID_V(viid));
  1044. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  1045. cmd.mtu_to_vlanexen =
  1046. cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
  1047. FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
  1048. FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
  1049. FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
  1050. FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
  1051. return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
  1052. }
  1053. /**
  1054. * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
  1055. * @adapter: the adapter
  1056. * @viid: the Virtual Interface Identifier
  1057. * @free: if true any existing filters for this VI id are first removed
  1058. * @naddr: the number of MAC addresses to allocate filters for (up to 7)
  1059. * @addr: the MAC address(es)
  1060. * @idx: where to store the index of each allocated filter
  1061. * @hash: pointer to hash address filter bitmap
  1062. * @sleep_ok: call is allowed to sleep
  1063. *
  1064. * Allocates an exact-match filter for each of the supplied addresses and
  1065. * sets it to the corresponding address. If @idx is not %NULL it should
  1066. * have at least @naddr entries, each of which will be set to the index of
  1067. * the filter allocated for the corresponding MAC address. If a filter
  1068. * could not be allocated for an address its index is set to 0xffff.
  1069. * If @hash is not %NULL addresses that fail to allocate an exact filter
  1070. * are hashed and update the hash filter bitmap pointed at by @hash.
  1071. *
  1072. * Returns a negative error number or the number of filters allocated.
  1073. */
  1074. int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
  1075. unsigned int naddr, const u8 **addr, u16 *idx,
  1076. u64 *hash, bool sleep_ok)
  1077. {
  1078. int offset, ret = 0;
  1079. unsigned nfilters = 0;
  1080. unsigned int rem = naddr;
  1081. struct fw_vi_mac_cmd cmd, rpl;
  1082. unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
  1083. if (naddr > max_naddr)
  1084. return -EINVAL;
  1085. for (offset = 0; offset < naddr; /**/) {
  1086. unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
  1087. ? rem
  1088. : ARRAY_SIZE(cmd.u.exact));
  1089. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  1090. u.exact[fw_naddr]), 16);
  1091. struct fw_vi_mac_exact *p;
  1092. int i;
  1093. memset(&cmd, 0, sizeof(cmd));
  1094. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  1095. FW_CMD_REQUEST_F |
  1096. FW_CMD_WRITE_F |
  1097. (free ? FW_CMD_EXEC_F : 0) |
  1098. FW_VI_MAC_CMD_VIID_V(viid));
  1099. cmd.freemacs_to_len16 =
  1100. cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
  1101. FW_CMD_LEN16_V(len16));
  1102. for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
  1103. p->valid_to_idx = cpu_to_be16(
  1104. FW_VI_MAC_CMD_VALID_F |
  1105. FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
  1106. memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
  1107. }
  1108. ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
  1109. sleep_ok);
  1110. if (ret && ret != -ENOMEM)
  1111. break;
  1112. for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
  1113. u16 index = FW_VI_MAC_CMD_IDX_G(
  1114. be16_to_cpu(p->valid_to_idx));
  1115. if (idx)
  1116. idx[offset+i] =
  1117. (index >= max_naddr
  1118. ? 0xffff
  1119. : index);
  1120. if (index < max_naddr)
  1121. nfilters++;
  1122. else if (hash)
  1123. *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
  1124. }
  1125. free = false;
  1126. offset += fw_naddr;
  1127. rem -= fw_naddr;
  1128. }
  1129. /*
  1130. * If there were no errors or we merely ran out of room in our MAC
  1131. * address arena, return the number of filters actually written.
  1132. */
  1133. if (ret == 0 || ret == -ENOMEM)
  1134. ret = nfilters;
  1135. return ret;
  1136. }
  1137. /**
  1138. * t4vf_change_mac - modifies the exact-match filter for a MAC address
  1139. * @adapter: the adapter
  1140. * @viid: the Virtual Interface ID
  1141. * @idx: index of existing filter for old value of MAC address, or -1
  1142. * @addr: the new MAC address value
  1143. * @persist: if idx < 0, the new MAC allocation should be persistent
  1144. *
  1145. * Modifies an exact-match filter and sets it to the new MAC address.
  1146. * Note that in general it is not possible to modify the value of a given
  1147. * filter so the generic way to modify an address filter is to free the
  1148. * one being used by the old address value and allocate a new filter for
  1149. * the new address value. @idx can be -1 if the address is a new
  1150. * addition.
  1151. *
  1152. * Returns a negative error number or the index of the filter with the new
  1153. * MAC value.
  1154. */
  1155. int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
  1156. int idx, const u8 *addr, bool persist)
  1157. {
  1158. int ret;
  1159. struct fw_vi_mac_cmd cmd, rpl;
  1160. struct fw_vi_mac_exact *p = &cmd.u.exact[0];
  1161. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  1162. u.exact[1]), 16);
  1163. unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
  1164. /*
  1165. * If this is a new allocation, determine whether it should be
  1166. * persistent (across a "freemacs" operation) or not.
  1167. */
  1168. if (idx < 0)
  1169. idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
  1170. memset(&cmd, 0, sizeof(cmd));
  1171. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  1172. FW_CMD_REQUEST_F |
  1173. FW_CMD_WRITE_F |
  1174. FW_VI_MAC_CMD_VIID_V(viid));
  1175. cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  1176. p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  1177. FW_VI_MAC_CMD_IDX_V(idx));
  1178. memcpy(p->macaddr, addr, sizeof(p->macaddr));
  1179. ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  1180. if (ret == 0) {
  1181. p = &rpl.u.exact[0];
  1182. ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
  1183. if (ret >= max_mac_addr)
  1184. ret = -ENOMEM;
  1185. }
  1186. return ret;
  1187. }
  1188. /**
  1189. * t4vf_set_addr_hash - program the MAC inexact-match hash filter
  1190. * @adapter: the adapter
  1191. * @viid: the Virtual Interface Identifier
  1192. * @ucast: whether the hash filter should also match unicast addresses
  1193. * @vec: the value to be written to the hash filter
  1194. * @sleep_ok: call is allowed to sleep
  1195. *
  1196. * Sets the 64-bit inexact-match hash filter for a virtual interface.
  1197. */
  1198. int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
  1199. bool ucast, u64 vec, bool sleep_ok)
  1200. {
  1201. struct fw_vi_mac_cmd cmd;
  1202. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  1203. u.exact[0]), 16);
  1204. memset(&cmd, 0, sizeof(cmd));
  1205. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  1206. FW_CMD_REQUEST_F |
  1207. FW_CMD_WRITE_F |
  1208. FW_VI_ENABLE_CMD_VIID_V(viid));
  1209. cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
  1210. FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
  1211. FW_CMD_LEN16_V(len16));
  1212. cmd.u.hash.hashvec = cpu_to_be64(vec);
  1213. return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
  1214. }
  1215. /**
  1216. * t4vf_get_port_stats - collect "port" statistics
  1217. * @adapter: the adapter
  1218. * @pidx: the port index
  1219. * @s: the stats structure to fill
  1220. *
  1221. * Collect statistics for the "port"'s Virtual Interface.
  1222. */
  1223. int t4vf_get_port_stats(struct adapter *adapter, int pidx,
  1224. struct t4vf_port_stats *s)
  1225. {
  1226. struct port_info *pi = adap2pinfo(adapter, pidx);
  1227. struct fw_vi_stats_vf fwstats;
  1228. unsigned int rem = VI_VF_NUM_STATS;
  1229. __be64 *fwsp = (__be64 *)&fwstats;
  1230. /*
  1231. * Grab the Virtual Interface statistics a chunk at a time via mailbox
  1232. * commands. We could use a Work Request and get all of them at once
  1233. * but that's an asynchronous interface which is awkward to use.
  1234. */
  1235. while (rem) {
  1236. unsigned int ix = VI_VF_NUM_STATS - rem;
  1237. unsigned int nstats = min(6U, rem);
  1238. struct fw_vi_stats_cmd cmd, rpl;
  1239. size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
  1240. sizeof(struct fw_vi_stats_ctl));
  1241. size_t len16 = DIV_ROUND_UP(len, 16);
  1242. int ret;
  1243. memset(&cmd, 0, sizeof(cmd));
  1244. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
  1245. FW_VI_STATS_CMD_VIID_V(pi->viid) |
  1246. FW_CMD_REQUEST_F |
  1247. FW_CMD_READ_F);
  1248. cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  1249. cmd.u.ctl.nstats_ix =
  1250. cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
  1251. FW_VI_STATS_CMD_NSTATS_V(nstats));
  1252. ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
  1253. if (ret)
  1254. return ret;
  1255. memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
  1256. rem -= nstats;
  1257. fwsp += nstats;
  1258. }
  1259. /*
  1260. * Translate firmware statistics into host native statistics.
  1261. */
  1262. s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
  1263. s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
  1264. s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
  1265. s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
  1266. s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
  1267. s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
  1268. s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
  1269. s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
  1270. s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
  1271. s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
  1272. s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
  1273. s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
  1274. s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
  1275. s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
  1276. s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
  1277. s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
  1278. return 0;
  1279. }
  1280. /**
  1281. * t4vf_iq_free - free an ingress queue and its free lists
  1282. * @adapter: the adapter
  1283. * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
  1284. * @iqid: ingress queue ID
  1285. * @fl0id: FL0 queue ID or 0xffff if no attached FL0
  1286. * @fl1id: FL1 queue ID or 0xffff if no attached FL1
  1287. *
  1288. * Frees an ingress queue and its associated free lists, if any.
  1289. */
  1290. int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
  1291. unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
  1292. {
  1293. struct fw_iq_cmd cmd;
  1294. memset(&cmd, 0, sizeof(cmd));
  1295. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
  1296. FW_CMD_REQUEST_F |
  1297. FW_CMD_EXEC_F);
  1298. cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
  1299. FW_LEN16(cmd));
  1300. cmd.type_to_iqandstindex =
  1301. cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
  1302. cmd.iqid = cpu_to_be16(iqid);
  1303. cmd.fl0id = cpu_to_be16(fl0id);
  1304. cmd.fl1id = cpu_to_be16(fl1id);
  1305. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1306. }
  1307. /**
  1308. * t4vf_eth_eq_free - free an Ethernet egress queue
  1309. * @adapter: the adapter
  1310. * @eqid: egress queue ID
  1311. *
  1312. * Frees an Ethernet egress queue.
  1313. */
  1314. int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
  1315. {
  1316. struct fw_eq_eth_cmd cmd;
  1317. memset(&cmd, 0, sizeof(cmd));
  1318. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
  1319. FW_CMD_REQUEST_F |
  1320. FW_CMD_EXEC_F);
  1321. cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
  1322. FW_LEN16(cmd));
  1323. cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
  1324. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1325. }
  1326. /**
  1327. * t4vf_handle_fw_rpl - process a firmware reply message
  1328. * @adapter: the adapter
  1329. * @rpl: start of the firmware message
  1330. *
  1331. * Processes a firmware message, such as link state change messages.
  1332. */
  1333. int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
  1334. {
  1335. const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
  1336. u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
  1337. switch (opcode) {
  1338. case FW_PORT_CMD: {
  1339. /*
  1340. * Link/module state change message.
  1341. */
  1342. const struct fw_port_cmd *port_cmd =
  1343. (const struct fw_port_cmd *)rpl;
  1344. u32 stat, mod;
  1345. int action, port_id, link_ok, speed, fc, pidx;
  1346. /*
  1347. * Extract various fields from port status change message.
  1348. */
  1349. action = FW_PORT_CMD_ACTION_G(
  1350. be32_to_cpu(port_cmd->action_to_len16));
  1351. if (action != FW_PORT_ACTION_GET_PORT_INFO) {
  1352. dev_err(adapter->pdev_dev,
  1353. "Unknown firmware PORT reply action %x\n",
  1354. action);
  1355. break;
  1356. }
  1357. port_id = FW_PORT_CMD_PORTID_G(
  1358. be32_to_cpu(port_cmd->op_to_portid));
  1359. stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
  1360. link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
  1361. speed = 0;
  1362. fc = 0;
  1363. if (stat & FW_PORT_CMD_RXPAUSE_F)
  1364. fc |= PAUSE_RX;
  1365. if (stat & FW_PORT_CMD_TXPAUSE_F)
  1366. fc |= PAUSE_TX;
  1367. if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
  1368. speed = 100;
  1369. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
  1370. speed = 1000;
  1371. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
  1372. speed = 10000;
  1373. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
  1374. speed = 40000;
  1375. /*
  1376. * Scan all of our "ports" (Virtual Interfaces) looking for
  1377. * those bound to the physical port which has changed. If
  1378. * our recorded state doesn't match the current state,
  1379. * signal that change to the OS code.
  1380. */
  1381. for_each_port(adapter, pidx) {
  1382. struct port_info *pi = adap2pinfo(adapter, pidx);
  1383. struct link_config *lc;
  1384. if (pi->port_id != port_id)
  1385. continue;
  1386. lc = &pi->link_cfg;
  1387. mod = FW_PORT_CMD_MODTYPE_G(stat);
  1388. if (mod != pi->mod_type) {
  1389. pi->mod_type = mod;
  1390. t4vf_os_portmod_changed(adapter, pidx);
  1391. }
  1392. if (link_ok != lc->link_ok || speed != lc->speed ||
  1393. fc != lc->fc) {
  1394. /* something changed */
  1395. lc->link_ok = link_ok;
  1396. lc->speed = speed;
  1397. lc->fc = fc;
  1398. lc->supported =
  1399. be16_to_cpu(port_cmd->u.info.pcap);
  1400. t4vf_os_link_changed(adapter, pidx, link_ok);
  1401. }
  1402. }
  1403. break;
  1404. }
  1405. default:
  1406. dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
  1407. opcode);
  1408. }
  1409. return 0;
  1410. }
  1411. /**
  1412. */
  1413. int t4vf_prep_adapter(struct adapter *adapter)
  1414. {
  1415. int err;
  1416. unsigned int chipid;
  1417. /* Wait for the device to become ready before proceeding ...
  1418. */
  1419. err = t4vf_wait_dev_ready(adapter);
  1420. if (err)
  1421. return err;
  1422. /* Default port and clock for debugging in case we can't reach
  1423. * firmware.
  1424. */
  1425. adapter->params.nports = 1;
  1426. adapter->params.vfres.pmask = 1;
  1427. adapter->params.vpd.cclk = 50000;
  1428. adapter->params.chip = 0;
  1429. switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
  1430. case CHELSIO_T4:
  1431. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
  1432. adapter->params.arch.sge_fl_db = DBPRIO_F;
  1433. adapter->params.arch.mps_tcam_size =
  1434. NUM_MPS_CLS_SRAM_L_INSTANCES;
  1435. break;
  1436. case CHELSIO_T5:
  1437. chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
  1438. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
  1439. adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
  1440. adapter->params.arch.mps_tcam_size =
  1441. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  1442. break;
  1443. case CHELSIO_T6:
  1444. chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
  1445. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
  1446. adapter->params.arch.sge_fl_db = 0;
  1447. adapter->params.arch.mps_tcam_size =
  1448. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  1449. break;
  1450. }
  1451. return 0;
  1452. }