port.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/errno.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/if_vlan.h>
  35. #include <linux/export.h>
  36. #include <linux/mlx4/cmd.h>
  37. #include "mlx4.h"
  38. #include "mlx4_stats.h"
  39. #define MLX4_MAC_VALID (1ull << 63)
  40. #define MLX4_VLAN_VALID (1u << 31)
  41. #define MLX4_VLAN_MASK 0xfff
  42. #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
  43. #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
  44. #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
  45. #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
  46. #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
  47. #define MLX4_IGNORE_FCS_MASK 0x1
  48. void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
  49. {
  50. int i;
  51. mutex_init(&table->mutex);
  52. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  53. table->entries[i] = 0;
  54. table->refs[i] = 0;
  55. }
  56. table->max = 1 << dev->caps.log_num_macs;
  57. table->total = 0;
  58. }
  59. void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
  60. {
  61. int i;
  62. mutex_init(&table->mutex);
  63. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  64. table->entries[i] = 0;
  65. table->refs[i] = 0;
  66. }
  67. table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
  68. table->total = 0;
  69. }
  70. void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
  71. struct mlx4_roce_gid_table *table)
  72. {
  73. int i;
  74. mutex_init(&table->mutex);
  75. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
  76. memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
  77. }
  78. static int validate_index(struct mlx4_dev *dev,
  79. struct mlx4_mac_table *table, int index)
  80. {
  81. int err = 0;
  82. if (index < 0 || index >= table->max || !table->entries[index]) {
  83. mlx4_warn(dev, "No valid Mac entry for the given index\n");
  84. err = -EINVAL;
  85. }
  86. return err;
  87. }
  88. static int find_index(struct mlx4_dev *dev,
  89. struct mlx4_mac_table *table, u64 mac)
  90. {
  91. int i;
  92. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  93. if (table->refs[i] &&
  94. (MLX4_MAC_MASK & mac) ==
  95. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
  96. return i;
  97. }
  98. /* Mac not found */
  99. return -EINVAL;
  100. }
  101. static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
  102. __be64 *entries)
  103. {
  104. struct mlx4_cmd_mailbox *mailbox;
  105. u32 in_mod;
  106. int err;
  107. mailbox = mlx4_alloc_cmd_mailbox(dev);
  108. if (IS_ERR(mailbox))
  109. return PTR_ERR(mailbox);
  110. memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
  111. in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
  112. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  113. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  114. MLX4_CMD_NATIVE);
  115. mlx4_free_cmd_mailbox(dev, mailbox);
  116. return err;
  117. }
  118. int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
  119. {
  120. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  121. struct mlx4_mac_table *table = &info->mac_table;
  122. int i;
  123. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  124. if (!table->refs[i])
  125. continue;
  126. if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  127. *idx = i;
  128. return 0;
  129. }
  130. }
  131. return -ENOENT;
  132. }
  133. EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
  134. int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  135. {
  136. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  137. struct mlx4_mac_table *table = &info->mac_table;
  138. int i, err = 0;
  139. int free = -1;
  140. mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
  141. (unsigned long long) mac, port);
  142. mutex_lock(&table->mutex);
  143. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  144. if (!table->refs[i]) {
  145. if (free < 0)
  146. free = i;
  147. continue;
  148. }
  149. if ((MLX4_MAC_MASK & mac) ==
  150. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  151. /* MAC already registered, increment ref count */
  152. err = i;
  153. ++table->refs[i];
  154. goto out;
  155. }
  156. }
  157. mlx4_dbg(dev, "Free MAC index is %d\n", free);
  158. if (table->total == table->max) {
  159. /* No free mac entries */
  160. err = -ENOSPC;
  161. goto out;
  162. }
  163. /* Register new MAC */
  164. table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  165. err = mlx4_set_port_mac_table(dev, port, table->entries);
  166. if (unlikely(err)) {
  167. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  168. (unsigned long long) mac);
  169. table->entries[free] = 0;
  170. goto out;
  171. }
  172. table->refs[free] = 1;
  173. err = free;
  174. ++table->total;
  175. out:
  176. mutex_unlock(&table->mutex);
  177. return err;
  178. }
  179. EXPORT_SYMBOL_GPL(__mlx4_register_mac);
  180. int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  181. {
  182. u64 out_param = 0;
  183. int err = -EINVAL;
  184. if (mlx4_is_mfunc(dev)) {
  185. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  186. err = mlx4_cmd_imm(dev, mac, &out_param,
  187. ((u32) port) << 8 | (u32) RES_MAC,
  188. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  189. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  190. }
  191. if (err && err == -EINVAL && mlx4_is_slave(dev)) {
  192. /* retry using old REG_MAC format */
  193. set_param_l(&out_param, port);
  194. err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  195. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  196. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  197. if (!err)
  198. dev->flags |= MLX4_FLAG_OLD_REG_MAC;
  199. }
  200. if (err)
  201. return err;
  202. return get_param_l(&out_param);
  203. }
  204. return __mlx4_register_mac(dev, port, mac);
  205. }
  206. EXPORT_SYMBOL_GPL(mlx4_register_mac);
  207. int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
  208. {
  209. return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
  210. (port - 1) * (1 << dev->caps.log_num_macs);
  211. }
  212. EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
  213. void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  214. {
  215. struct mlx4_port_info *info;
  216. struct mlx4_mac_table *table;
  217. int index;
  218. if (port < 1 || port > dev->caps.num_ports) {
  219. mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
  220. return;
  221. }
  222. info = &mlx4_priv(dev)->port[port];
  223. table = &info->mac_table;
  224. mutex_lock(&table->mutex);
  225. index = find_index(dev, table, mac);
  226. if (validate_index(dev, table, index))
  227. goto out;
  228. if (--table->refs[index]) {
  229. mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
  230. index);
  231. goto out;
  232. }
  233. table->entries[index] = 0;
  234. mlx4_set_port_mac_table(dev, port, table->entries);
  235. --table->total;
  236. out:
  237. mutex_unlock(&table->mutex);
  238. }
  239. EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
  240. void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  241. {
  242. u64 out_param = 0;
  243. if (mlx4_is_mfunc(dev)) {
  244. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  245. (void) mlx4_cmd_imm(dev, mac, &out_param,
  246. ((u32) port) << 8 | (u32) RES_MAC,
  247. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  248. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  249. } else {
  250. /* use old unregister mac format */
  251. set_param_l(&out_param, port);
  252. (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  253. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  254. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  255. }
  256. return;
  257. }
  258. __mlx4_unregister_mac(dev, port, mac);
  259. return;
  260. }
  261. EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
  262. int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
  263. {
  264. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  265. struct mlx4_mac_table *table = &info->mac_table;
  266. int index = qpn - info->base_qpn;
  267. int err = 0;
  268. /* CX1 doesn't support multi-functions */
  269. mutex_lock(&table->mutex);
  270. err = validate_index(dev, table, index);
  271. if (err)
  272. goto out;
  273. table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  274. err = mlx4_set_port_mac_table(dev, port, table->entries);
  275. if (unlikely(err)) {
  276. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  277. (unsigned long long) new_mac);
  278. table->entries[index] = 0;
  279. }
  280. out:
  281. mutex_unlock(&table->mutex);
  282. return err;
  283. }
  284. EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
  285. static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
  286. __be32 *entries)
  287. {
  288. struct mlx4_cmd_mailbox *mailbox;
  289. u32 in_mod;
  290. int err;
  291. mailbox = mlx4_alloc_cmd_mailbox(dev);
  292. if (IS_ERR(mailbox))
  293. return PTR_ERR(mailbox);
  294. memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
  295. in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
  296. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  297. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  298. MLX4_CMD_NATIVE);
  299. mlx4_free_cmd_mailbox(dev, mailbox);
  300. return err;
  301. }
  302. int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
  303. {
  304. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  305. int i;
  306. for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
  307. if (table->refs[i] &&
  308. (vid == (MLX4_VLAN_MASK &
  309. be32_to_cpu(table->entries[i])))) {
  310. /* VLAN already registered, increase reference count */
  311. *idx = i;
  312. return 0;
  313. }
  314. }
  315. return -ENOENT;
  316. }
  317. EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
  318. int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
  319. int *index)
  320. {
  321. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  322. int i, err = 0;
  323. int free = -1;
  324. mutex_lock(&table->mutex);
  325. if (table->total == table->max) {
  326. /* No free vlan entries */
  327. err = -ENOSPC;
  328. goto out;
  329. }
  330. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  331. if (free < 0 && (table->refs[i] == 0)) {
  332. free = i;
  333. continue;
  334. }
  335. if (table->refs[i] &&
  336. (vlan == (MLX4_VLAN_MASK &
  337. be32_to_cpu(table->entries[i])))) {
  338. /* Vlan already registered, increase references count */
  339. *index = i;
  340. ++table->refs[i];
  341. goto out;
  342. }
  343. }
  344. if (free < 0) {
  345. err = -ENOMEM;
  346. goto out;
  347. }
  348. /* Register new VLAN */
  349. table->refs[free] = 1;
  350. table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  351. err = mlx4_set_port_vlan_table(dev, port, table->entries);
  352. if (unlikely(err)) {
  353. mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
  354. table->refs[free] = 0;
  355. table->entries[free] = 0;
  356. goto out;
  357. }
  358. *index = free;
  359. ++table->total;
  360. out:
  361. mutex_unlock(&table->mutex);
  362. return err;
  363. }
  364. int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
  365. {
  366. u64 out_param = 0;
  367. int err;
  368. if (vlan > 4095)
  369. return -EINVAL;
  370. if (mlx4_is_mfunc(dev)) {
  371. err = mlx4_cmd_imm(dev, vlan, &out_param,
  372. ((u32) port) << 8 | (u32) RES_VLAN,
  373. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  374. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  375. if (!err)
  376. *index = get_param_l(&out_param);
  377. return err;
  378. }
  379. return __mlx4_register_vlan(dev, port, vlan, index);
  380. }
  381. EXPORT_SYMBOL_GPL(mlx4_register_vlan);
  382. void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  383. {
  384. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  385. int index;
  386. mutex_lock(&table->mutex);
  387. if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
  388. mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
  389. goto out;
  390. }
  391. if (index < MLX4_VLAN_REGULAR) {
  392. mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
  393. goto out;
  394. }
  395. if (--table->refs[index]) {
  396. mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
  397. table->refs[index], index);
  398. goto out;
  399. }
  400. table->entries[index] = 0;
  401. mlx4_set_port_vlan_table(dev, port, table->entries);
  402. --table->total;
  403. out:
  404. mutex_unlock(&table->mutex);
  405. }
  406. void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  407. {
  408. u64 out_param = 0;
  409. if (mlx4_is_mfunc(dev)) {
  410. (void) mlx4_cmd_imm(dev, vlan, &out_param,
  411. ((u32) port) << 8 | (u32) RES_VLAN,
  412. RES_OP_RESERVE_AND_MAP,
  413. MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
  414. MLX4_CMD_WRAPPED);
  415. return;
  416. }
  417. __mlx4_unregister_vlan(dev, port, vlan);
  418. }
  419. EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
  420. int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
  421. {
  422. struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
  423. u8 *inbuf, *outbuf;
  424. int err;
  425. inmailbox = mlx4_alloc_cmd_mailbox(dev);
  426. if (IS_ERR(inmailbox))
  427. return PTR_ERR(inmailbox);
  428. outmailbox = mlx4_alloc_cmd_mailbox(dev);
  429. if (IS_ERR(outmailbox)) {
  430. mlx4_free_cmd_mailbox(dev, inmailbox);
  431. return PTR_ERR(outmailbox);
  432. }
  433. inbuf = inmailbox->buf;
  434. outbuf = outmailbox->buf;
  435. inbuf[0] = 1;
  436. inbuf[1] = 1;
  437. inbuf[2] = 1;
  438. inbuf[3] = 1;
  439. *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
  440. *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
  441. err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
  442. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  443. MLX4_CMD_NATIVE);
  444. if (!err)
  445. *caps = *(__be32 *) (outbuf + 84);
  446. mlx4_free_cmd_mailbox(dev, inmailbox);
  447. mlx4_free_cmd_mailbox(dev, outmailbox);
  448. return err;
  449. }
  450. static struct mlx4_roce_gid_entry zgid_entry;
  451. int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
  452. {
  453. int vfs;
  454. int slave_gid = slave;
  455. unsigned i;
  456. struct mlx4_slaves_pport slaves_pport;
  457. struct mlx4_active_ports actv_ports;
  458. unsigned max_port_p_one;
  459. if (slave == 0)
  460. return MLX4_ROCE_PF_GIDS;
  461. /* Slave is a VF */
  462. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  463. actv_ports = mlx4_get_active_ports(dev, slave);
  464. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  465. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  466. for (i = 1; i < max_port_p_one; i++) {
  467. struct mlx4_active_ports exclusive_ports;
  468. struct mlx4_slaves_pport slaves_pport_actv;
  469. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  470. set_bit(i - 1, exclusive_ports.ports);
  471. if (i == port)
  472. continue;
  473. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  474. dev, &exclusive_ports);
  475. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  476. dev->persist->num_vfs + 1);
  477. }
  478. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  479. if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
  480. return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
  481. return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
  482. }
  483. int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
  484. {
  485. int gids;
  486. unsigned i;
  487. int slave_gid = slave;
  488. int vfs;
  489. struct mlx4_slaves_pport slaves_pport;
  490. struct mlx4_active_ports actv_ports;
  491. unsigned max_port_p_one;
  492. if (slave == 0)
  493. return 0;
  494. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  495. actv_ports = mlx4_get_active_ports(dev, slave);
  496. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  497. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  498. for (i = 1; i < max_port_p_one; i++) {
  499. struct mlx4_active_ports exclusive_ports;
  500. struct mlx4_slaves_pport slaves_pport_actv;
  501. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  502. set_bit(i - 1, exclusive_ports.ports);
  503. if (i == port)
  504. continue;
  505. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  506. dev, &exclusive_ports);
  507. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  508. dev->persist->num_vfs + 1);
  509. }
  510. gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  511. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  512. if (slave_gid <= gids % vfs)
  513. return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
  514. return MLX4_ROCE_PF_GIDS + (gids % vfs) +
  515. ((gids / vfs) * (slave_gid - 1));
  516. }
  517. EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
  518. static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
  519. int port, struct mlx4_cmd_mailbox *mailbox)
  520. {
  521. struct mlx4_roce_gid_entry *gid_entry_mbox;
  522. struct mlx4_priv *priv = mlx4_priv(dev);
  523. int num_gids, base, offset;
  524. int i, err;
  525. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  526. base = mlx4_get_base_gid_ix(dev, slave, port);
  527. memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
  528. mutex_lock(&(priv->port[port].gid_table.mutex));
  529. /* Zero-out gids belonging to that slave in the port GID table */
  530. for (i = 0, offset = base; i < num_gids; offset++, i++)
  531. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  532. zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
  533. /* Now, copy roce port gids table to mailbox for passing to FW */
  534. gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
  535. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  536. memcpy(gid_entry_mbox->raw,
  537. priv->port[port].gid_table.roce_gids[i].raw,
  538. MLX4_ROCE_GID_ENTRY_SIZE);
  539. err = mlx4_cmd(dev, mailbox->dma,
  540. ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
  541. MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
  542. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  543. mutex_unlock(&(priv->port[port].gid_table.mutex));
  544. return err;
  545. }
  546. void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
  547. {
  548. struct mlx4_active_ports actv_ports;
  549. struct mlx4_cmd_mailbox *mailbox;
  550. int num_eth_ports, err;
  551. int i;
  552. if (slave < 0 || slave > dev->persist->num_vfs)
  553. return;
  554. actv_ports = mlx4_get_active_ports(dev, slave);
  555. for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
  556. if (test_bit(i, actv_ports.ports)) {
  557. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  558. continue;
  559. num_eth_ports++;
  560. }
  561. }
  562. if (!num_eth_ports)
  563. return;
  564. /* have ETH ports. Alloc mailbox for SET_PORT command */
  565. mailbox = mlx4_alloc_cmd_mailbox(dev);
  566. if (IS_ERR(mailbox))
  567. return;
  568. for (i = 0; i < dev->caps.num_ports; i++) {
  569. if (test_bit(i, actv_ports.ports)) {
  570. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  571. continue;
  572. err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
  573. if (err)
  574. mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
  575. slave, i + 1, err);
  576. }
  577. }
  578. mlx4_free_cmd_mailbox(dev, mailbox);
  579. return;
  580. }
  581. static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
  582. u8 op_mod, struct mlx4_cmd_mailbox *inbox)
  583. {
  584. struct mlx4_priv *priv = mlx4_priv(dev);
  585. struct mlx4_port_info *port_info;
  586. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  587. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  588. struct mlx4_set_port_rqp_calc_context *qpn_context;
  589. struct mlx4_set_port_general_context *gen_context;
  590. struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
  591. int reset_qkey_viols;
  592. int port;
  593. int is_eth;
  594. int num_gids;
  595. int base;
  596. u32 in_modifier;
  597. u32 promisc;
  598. u16 mtu, prev_mtu;
  599. int err;
  600. int i, j;
  601. int offset;
  602. __be32 agg_cap_mask;
  603. __be32 slave_cap_mask;
  604. __be32 new_cap_mask;
  605. port = in_mod & 0xff;
  606. in_modifier = in_mod >> 8;
  607. is_eth = op_mod;
  608. port_info = &priv->port[port];
  609. /* Slaves cannot perform SET_PORT operations except changing MTU */
  610. if (is_eth) {
  611. if (slave != dev->caps.function &&
  612. in_modifier != MLX4_SET_PORT_GENERAL &&
  613. in_modifier != MLX4_SET_PORT_GID_TABLE) {
  614. mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
  615. slave);
  616. return -EINVAL;
  617. }
  618. switch (in_modifier) {
  619. case MLX4_SET_PORT_RQP_CALC:
  620. qpn_context = inbox->buf;
  621. qpn_context->base_qpn =
  622. cpu_to_be32(port_info->base_qpn);
  623. qpn_context->n_mac = 0x7;
  624. promisc = be32_to_cpu(qpn_context->promisc) >>
  625. SET_PORT_PROMISC_SHIFT;
  626. qpn_context->promisc = cpu_to_be32(
  627. promisc << SET_PORT_PROMISC_SHIFT |
  628. port_info->base_qpn);
  629. promisc = be32_to_cpu(qpn_context->mcast) >>
  630. SET_PORT_MC_PROMISC_SHIFT;
  631. qpn_context->mcast = cpu_to_be32(
  632. promisc << SET_PORT_MC_PROMISC_SHIFT |
  633. port_info->base_qpn);
  634. break;
  635. case MLX4_SET_PORT_GENERAL:
  636. gen_context = inbox->buf;
  637. /* Mtu is configured as the max MTU among all the
  638. * the functions on the port. */
  639. mtu = be16_to_cpu(gen_context->mtu);
  640. mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
  641. ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
  642. prev_mtu = slave_st->mtu[port];
  643. slave_st->mtu[port] = mtu;
  644. if (mtu > master->max_mtu[port])
  645. master->max_mtu[port] = mtu;
  646. if (mtu < prev_mtu && prev_mtu ==
  647. master->max_mtu[port]) {
  648. slave_st->mtu[port] = mtu;
  649. master->max_mtu[port] = mtu;
  650. for (i = 0; i < dev->num_slaves; i++) {
  651. master->max_mtu[port] =
  652. max(master->max_mtu[port],
  653. master->slave_state[i].mtu[port]);
  654. }
  655. }
  656. gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
  657. break;
  658. case MLX4_SET_PORT_GID_TABLE:
  659. /* change to MULTIPLE entries: number of guest's gids
  660. * need a FOR-loop here over number of gids the guest has.
  661. * 1. Check no duplicates in gids passed by slave
  662. */
  663. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  664. base = mlx4_get_base_gid_ix(dev, slave, port);
  665. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  666. for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
  667. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  668. sizeof(zgid_entry)))
  669. continue;
  670. gid_entry_mb1 = gid_entry_mbox + 1;
  671. for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
  672. if (!memcmp(gid_entry_mb1->raw,
  673. zgid_entry.raw, sizeof(zgid_entry)))
  674. continue;
  675. if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
  676. sizeof(gid_entry_mbox->raw))) {
  677. /* found duplicate */
  678. return -EINVAL;
  679. }
  680. }
  681. }
  682. /* 2. Check that do not have duplicates in OTHER
  683. * entries in the port GID table
  684. */
  685. mutex_lock(&(priv->port[port].gid_table.mutex));
  686. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  687. if (i >= base && i < base + num_gids)
  688. continue; /* don't compare to slave's current gids */
  689. gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
  690. if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
  691. continue;
  692. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  693. for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
  694. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  695. sizeof(zgid_entry)))
  696. continue;
  697. if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
  698. sizeof(gid_entry_tbl->raw))) {
  699. /* found duplicate */
  700. mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
  701. slave, i);
  702. mutex_unlock(&(priv->port[port].gid_table.mutex));
  703. return -EINVAL;
  704. }
  705. }
  706. }
  707. /* insert slave GIDs with memcpy, starting at slave's base index */
  708. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  709. for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
  710. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  711. gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
  712. /* Now, copy roce port gids table to current mailbox for passing to FW */
  713. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  714. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  715. memcpy(gid_entry_mbox->raw,
  716. priv->port[port].gid_table.roce_gids[i].raw,
  717. MLX4_ROCE_GID_ENTRY_SIZE);
  718. err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  719. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  720. MLX4_CMD_NATIVE);
  721. mutex_unlock(&(priv->port[port].gid_table.mutex));
  722. return err;
  723. }
  724. return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  725. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  726. MLX4_CMD_NATIVE);
  727. }
  728. /* Slaves are not allowed to SET_PORT beacon (LED) blink */
  729. if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
  730. mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
  731. return -EPERM;
  732. }
  733. /* For IB, we only consider:
  734. * - The capability mask, which is set to the aggregate of all
  735. * slave function capabilities
  736. * - The QKey violatin counter - reset according to each request.
  737. */
  738. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  739. reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
  740. new_cap_mask = ((__be32 *) inbox->buf)[2];
  741. } else {
  742. reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
  743. new_cap_mask = ((__be32 *) inbox->buf)[1];
  744. }
  745. /* slave may not set the IS_SM capability for the port */
  746. if (slave != mlx4_master_func_num(dev) &&
  747. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
  748. return -EINVAL;
  749. /* No DEV_MGMT in multifunc mode */
  750. if (mlx4_is_mfunc(dev) &&
  751. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
  752. return -EINVAL;
  753. agg_cap_mask = 0;
  754. slave_cap_mask =
  755. priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
  756. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
  757. for (i = 0; i < dev->num_slaves; i++)
  758. agg_cap_mask |=
  759. priv->mfunc.master.slave_state[i].ib_cap_mask[port];
  760. /* only clear mailbox for guests. Master may be setting
  761. * MTU or PKEY table size
  762. */
  763. if (slave != dev->caps.function)
  764. memset(inbox->buf, 0, 256);
  765. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  766. *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
  767. ((__be32 *) inbox->buf)[2] = agg_cap_mask;
  768. } else {
  769. ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
  770. ((__be32 *) inbox->buf)[1] = agg_cap_mask;
  771. }
  772. err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
  773. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  774. if (err)
  775. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
  776. slave_cap_mask;
  777. return err;
  778. }
  779. int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
  780. struct mlx4_vhcr *vhcr,
  781. struct mlx4_cmd_mailbox *inbox,
  782. struct mlx4_cmd_mailbox *outbox,
  783. struct mlx4_cmd_info *cmd)
  784. {
  785. int port = mlx4_slave_convert_port(
  786. dev, slave, vhcr->in_modifier & 0xFF);
  787. if (port < 0)
  788. return -EINVAL;
  789. vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
  790. (port & 0xFF);
  791. return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
  792. vhcr->op_modifier, inbox);
  793. }
  794. /* bit locations for set port command with zero op modifier */
  795. enum {
  796. MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
  797. MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
  798. MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
  799. MLX4_CHANGE_PORT_VL_CAP = 21,
  800. MLX4_CHANGE_PORT_MTU_CAP = 22,
  801. };
  802. int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
  803. {
  804. struct mlx4_cmd_mailbox *mailbox;
  805. int err, vl_cap, pkey_tbl_flag = 0;
  806. if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
  807. return 0;
  808. mailbox = mlx4_alloc_cmd_mailbox(dev);
  809. if (IS_ERR(mailbox))
  810. return PTR_ERR(mailbox);
  811. ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
  812. if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
  813. pkey_tbl_flag = 1;
  814. ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
  815. }
  816. /* IB VL CAP enum isn't used by the firmware, just numerical values */
  817. for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
  818. ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
  819. (1 << MLX4_CHANGE_PORT_MTU_CAP) |
  820. (1 << MLX4_CHANGE_PORT_VL_CAP) |
  821. (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
  822. (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
  823. (vl_cap << MLX4_SET_PORT_VL_CAP));
  824. err = mlx4_cmd(dev, mailbox->dma, port,
  825. MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
  826. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  827. if (err != -ENOMEM)
  828. break;
  829. }
  830. mlx4_free_cmd_mailbox(dev, mailbox);
  831. return err;
  832. }
  833. int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
  834. u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
  835. {
  836. struct mlx4_cmd_mailbox *mailbox;
  837. struct mlx4_set_port_general_context *context;
  838. int err;
  839. u32 in_mod;
  840. mailbox = mlx4_alloc_cmd_mailbox(dev);
  841. if (IS_ERR(mailbox))
  842. return PTR_ERR(mailbox);
  843. context = mailbox->buf;
  844. context->flags = SET_PORT_GEN_ALL_VALID;
  845. context->mtu = cpu_to_be16(mtu);
  846. context->pptx = (pptx * (!pfctx)) << 7;
  847. context->pfctx = pfctx;
  848. context->pprx = (pprx * (!pfcrx)) << 7;
  849. context->pfcrx = pfcrx;
  850. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  851. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  852. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  853. MLX4_CMD_WRAPPED);
  854. mlx4_free_cmd_mailbox(dev, mailbox);
  855. return err;
  856. }
  857. EXPORT_SYMBOL(mlx4_SET_PORT_general);
  858. int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
  859. u8 promisc)
  860. {
  861. struct mlx4_cmd_mailbox *mailbox;
  862. struct mlx4_set_port_rqp_calc_context *context;
  863. int err;
  864. u32 in_mod;
  865. u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
  866. MCAST_DIRECT : MCAST_DEFAULT;
  867. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
  868. return 0;
  869. mailbox = mlx4_alloc_cmd_mailbox(dev);
  870. if (IS_ERR(mailbox))
  871. return PTR_ERR(mailbox);
  872. context = mailbox->buf;
  873. context->base_qpn = cpu_to_be32(base_qpn);
  874. context->n_mac = dev->caps.log_num_macs;
  875. context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
  876. base_qpn);
  877. context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
  878. base_qpn);
  879. context->intra_no_vlan = 0;
  880. context->no_vlan = MLX4_NO_VLAN_IDX;
  881. context->intra_vlan_miss = 0;
  882. context->vlan_miss = MLX4_VLAN_MISS_IDX;
  883. in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
  884. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  885. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  886. MLX4_CMD_WRAPPED);
  887. mlx4_free_cmd_mailbox(dev, mailbox);
  888. return err;
  889. }
  890. EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
  891. int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
  892. {
  893. struct mlx4_cmd_mailbox *mailbox;
  894. struct mlx4_set_port_general_context *context;
  895. u32 in_mod;
  896. int err;
  897. mailbox = mlx4_alloc_cmd_mailbox(dev);
  898. if (IS_ERR(mailbox))
  899. return PTR_ERR(mailbox);
  900. context = mailbox->buf;
  901. context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
  902. if (ignore_fcs_value)
  903. context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
  904. else
  905. context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
  906. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  907. err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
  908. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  909. mlx4_free_cmd_mailbox(dev, mailbox);
  910. return err;
  911. }
  912. EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
  913. enum {
  914. VXLAN_ENABLE_MODIFY = 1 << 7,
  915. VXLAN_STEERING_MODIFY = 1 << 6,
  916. VXLAN_ENABLE = 1 << 7,
  917. };
  918. struct mlx4_set_port_vxlan_context {
  919. u32 reserved1;
  920. u8 modify_flags;
  921. u8 reserved2;
  922. u8 enable_flags;
  923. u8 steering;
  924. };
  925. int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
  926. {
  927. int err;
  928. u32 in_mod;
  929. struct mlx4_cmd_mailbox *mailbox;
  930. struct mlx4_set_port_vxlan_context *context;
  931. mailbox = mlx4_alloc_cmd_mailbox(dev);
  932. if (IS_ERR(mailbox))
  933. return PTR_ERR(mailbox);
  934. context = mailbox->buf;
  935. memset(context, 0, sizeof(*context));
  936. context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
  937. if (enable)
  938. context->enable_flags = VXLAN_ENABLE;
  939. context->steering = steering;
  940. in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
  941. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  942. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  943. MLX4_CMD_NATIVE);
  944. mlx4_free_cmd_mailbox(dev, mailbox);
  945. return err;
  946. }
  947. EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
  948. int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
  949. {
  950. int err;
  951. struct mlx4_cmd_mailbox *mailbox;
  952. mailbox = mlx4_alloc_cmd_mailbox(dev);
  953. if (IS_ERR(mailbox))
  954. return PTR_ERR(mailbox);
  955. *((__be32 *)mailbox->buf) = cpu_to_be32(time);
  956. err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
  957. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  958. MLX4_CMD_NATIVE);
  959. mlx4_free_cmd_mailbox(dev, mailbox);
  960. return err;
  961. }
  962. EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
  963. int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  964. struct mlx4_vhcr *vhcr,
  965. struct mlx4_cmd_mailbox *inbox,
  966. struct mlx4_cmd_mailbox *outbox,
  967. struct mlx4_cmd_info *cmd)
  968. {
  969. int err = 0;
  970. return err;
  971. }
  972. int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
  973. u64 mac, u64 clear, u8 mode)
  974. {
  975. return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
  976. MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
  977. MLX4_CMD_WRAPPED);
  978. }
  979. EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
  980. int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  981. struct mlx4_vhcr *vhcr,
  982. struct mlx4_cmd_mailbox *inbox,
  983. struct mlx4_cmd_mailbox *outbox,
  984. struct mlx4_cmd_info *cmd)
  985. {
  986. int err = 0;
  987. return err;
  988. }
  989. int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
  990. u32 in_mod, struct mlx4_cmd_mailbox *outbox)
  991. {
  992. return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
  993. MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
  994. MLX4_CMD_NATIVE);
  995. }
  996. int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
  997. struct mlx4_vhcr *vhcr,
  998. struct mlx4_cmd_mailbox *inbox,
  999. struct mlx4_cmd_mailbox *outbox,
  1000. struct mlx4_cmd_info *cmd)
  1001. {
  1002. if (slave != dev->caps.function)
  1003. return 0;
  1004. return mlx4_common_dump_eth_stats(dev, slave,
  1005. vhcr->in_modifier, outbox);
  1006. }
  1007. int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
  1008. int *slave_id)
  1009. {
  1010. struct mlx4_priv *priv = mlx4_priv(dev);
  1011. int i, found_ix = -1;
  1012. int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1013. struct mlx4_slaves_pport slaves_pport;
  1014. unsigned num_vfs;
  1015. int slave_gid;
  1016. if (!mlx4_is_mfunc(dev))
  1017. return -EINVAL;
  1018. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1019. num_vfs = bitmap_weight(slaves_pport.slaves,
  1020. dev->persist->num_vfs + 1) - 1;
  1021. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1022. if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
  1023. MLX4_ROCE_GID_ENTRY_SIZE)) {
  1024. found_ix = i;
  1025. break;
  1026. }
  1027. }
  1028. if (found_ix >= 0) {
  1029. /* Calculate a slave_gid which is the slave number in the gid
  1030. * table and not a globally unique slave number.
  1031. */
  1032. if (found_ix < MLX4_ROCE_PF_GIDS)
  1033. slave_gid = 0;
  1034. else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
  1035. (vf_gids / num_vfs + 1))
  1036. slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
  1037. (vf_gids / num_vfs + 1)) + 1;
  1038. else
  1039. slave_gid =
  1040. ((found_ix - MLX4_ROCE_PF_GIDS -
  1041. ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
  1042. (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
  1043. /* Calculate the globally unique slave id */
  1044. if (slave_gid) {
  1045. struct mlx4_active_ports exclusive_ports;
  1046. struct mlx4_active_ports actv_ports;
  1047. struct mlx4_slaves_pport slaves_pport_actv;
  1048. unsigned max_port_p_one;
  1049. int num_vfs_before = 0;
  1050. int candidate_slave_gid;
  1051. /* Calculate how many VFs are on the previous port, if exists */
  1052. for (i = 1; i < port; i++) {
  1053. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1054. set_bit(i - 1, exclusive_ports.ports);
  1055. slaves_pport_actv =
  1056. mlx4_phys_to_slaves_pport_actv(
  1057. dev, &exclusive_ports);
  1058. num_vfs_before += bitmap_weight(
  1059. slaves_pport_actv.slaves,
  1060. dev->persist->num_vfs + 1);
  1061. }
  1062. /* candidate_slave_gid isn't necessarily the correct slave, but
  1063. * it has the same number of ports and is assigned to the same
  1064. * ports as the real slave we're looking for. On dual port VF,
  1065. * slave_gid = [single port VFs on port <port>] +
  1066. * [offset of the current slave from the first dual port VF] +
  1067. * 1 (for the PF).
  1068. */
  1069. candidate_slave_gid = slave_gid + num_vfs_before;
  1070. actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
  1071. max_port_p_one = find_first_bit(
  1072. actv_ports.ports, dev->caps.num_ports) +
  1073. bitmap_weight(actv_ports.ports,
  1074. dev->caps.num_ports) + 1;
  1075. /* Calculate the real slave number */
  1076. for (i = 1; i < max_port_p_one; i++) {
  1077. if (i == port)
  1078. continue;
  1079. bitmap_zero(exclusive_ports.ports,
  1080. dev->caps.num_ports);
  1081. set_bit(i - 1, exclusive_ports.ports);
  1082. slaves_pport_actv =
  1083. mlx4_phys_to_slaves_pport_actv(
  1084. dev, &exclusive_ports);
  1085. slave_gid += bitmap_weight(
  1086. slaves_pport_actv.slaves,
  1087. dev->persist->num_vfs + 1);
  1088. }
  1089. }
  1090. *slave_id = slave_gid;
  1091. }
  1092. return (found_ix >= 0) ? 0 : -EINVAL;
  1093. }
  1094. EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
  1095. int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
  1096. u8 *gid)
  1097. {
  1098. struct mlx4_priv *priv = mlx4_priv(dev);
  1099. if (!mlx4_is_master(dev))
  1100. return -EINVAL;
  1101. memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
  1102. MLX4_ROCE_GID_ENTRY_SIZE);
  1103. return 0;
  1104. }
  1105. EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
  1106. /* Cable Module Info */
  1107. #define MODULE_INFO_MAX_READ 48
  1108. #define I2C_ADDR_LOW 0x50
  1109. #define I2C_ADDR_HIGH 0x51
  1110. #define I2C_PAGE_SIZE 256
  1111. /* Module Info Data */
  1112. struct mlx4_cable_info {
  1113. u8 i2c_addr;
  1114. u8 page_num;
  1115. __be16 dev_mem_address;
  1116. __be16 reserved1;
  1117. __be16 size;
  1118. __be32 reserved2[2];
  1119. u8 data[MODULE_INFO_MAX_READ];
  1120. };
  1121. enum cable_info_err {
  1122. CABLE_INF_INV_PORT = 0x1,
  1123. CABLE_INF_OP_NOSUP = 0x2,
  1124. CABLE_INF_NOT_CONN = 0x3,
  1125. CABLE_INF_NO_EEPRM = 0x4,
  1126. CABLE_INF_PAGE_ERR = 0x5,
  1127. CABLE_INF_INV_ADDR = 0x6,
  1128. CABLE_INF_I2C_ADDR = 0x7,
  1129. CABLE_INF_QSFP_VIO = 0x8,
  1130. CABLE_INF_I2C_BUSY = 0x9,
  1131. };
  1132. #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
  1133. static inline const char *cable_info_mad_err_str(u16 mad_status)
  1134. {
  1135. u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
  1136. switch (err) {
  1137. case CABLE_INF_INV_PORT:
  1138. return "invalid port selected";
  1139. case CABLE_INF_OP_NOSUP:
  1140. return "operation not supported for this port (the port is of type CX4 or internal)";
  1141. case CABLE_INF_NOT_CONN:
  1142. return "cable is not connected";
  1143. case CABLE_INF_NO_EEPRM:
  1144. return "the connected cable has no EPROM (passive copper cable)";
  1145. case CABLE_INF_PAGE_ERR:
  1146. return "page number is greater than 15";
  1147. case CABLE_INF_INV_ADDR:
  1148. return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
  1149. case CABLE_INF_I2C_ADDR:
  1150. return "invalid I2C slave address";
  1151. case CABLE_INF_QSFP_VIO:
  1152. return "at least one cable violates the QSFP specification and ignores the modsel signal";
  1153. case CABLE_INF_I2C_BUSY:
  1154. return "I2C bus is constantly busy";
  1155. }
  1156. return "Unknown Error";
  1157. }
  1158. /**
  1159. * mlx4_get_module_info - Read cable module eeprom data
  1160. * @dev: mlx4_dev.
  1161. * @port: port number.
  1162. * @offset: byte offset in eeprom to start reading data from.
  1163. * @size: num of bytes to read.
  1164. * @data: output buffer to put the requested data into.
  1165. *
  1166. * Reads cable module eeprom data, puts the outcome data into
  1167. * data pointer paramer.
  1168. * Returns num of read bytes on success or a negative error
  1169. * code.
  1170. */
  1171. int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
  1172. u16 offset, u16 size, u8 *data)
  1173. {
  1174. struct mlx4_cmd_mailbox *inbox, *outbox;
  1175. struct mlx4_mad_ifc *inmad, *outmad;
  1176. struct mlx4_cable_info *cable_info;
  1177. u16 i2c_addr;
  1178. int ret;
  1179. if (size > MODULE_INFO_MAX_READ)
  1180. size = MODULE_INFO_MAX_READ;
  1181. inbox = mlx4_alloc_cmd_mailbox(dev);
  1182. if (IS_ERR(inbox))
  1183. return PTR_ERR(inbox);
  1184. outbox = mlx4_alloc_cmd_mailbox(dev);
  1185. if (IS_ERR(outbox)) {
  1186. mlx4_free_cmd_mailbox(dev, inbox);
  1187. return PTR_ERR(outbox);
  1188. }
  1189. inmad = (struct mlx4_mad_ifc *)(inbox->buf);
  1190. outmad = (struct mlx4_mad_ifc *)(outbox->buf);
  1191. inmad->method = 0x1; /* Get */
  1192. inmad->class_version = 0x1;
  1193. inmad->mgmt_class = 0x1;
  1194. inmad->base_version = 0x1;
  1195. inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
  1196. if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
  1197. /* Cross pages reads are not allowed
  1198. * read until offset 256 in low page
  1199. */
  1200. size -= offset + size - I2C_PAGE_SIZE;
  1201. i2c_addr = I2C_ADDR_LOW;
  1202. if (offset >= I2C_PAGE_SIZE) {
  1203. /* Reset offset to high page */
  1204. i2c_addr = I2C_ADDR_HIGH;
  1205. offset -= I2C_PAGE_SIZE;
  1206. }
  1207. cable_info = (struct mlx4_cable_info *)inmad->data;
  1208. cable_info->dev_mem_address = cpu_to_be16(offset);
  1209. cable_info->page_num = 0;
  1210. cable_info->i2c_addr = i2c_addr;
  1211. cable_info->size = cpu_to_be16(size);
  1212. ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
  1213. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  1214. MLX4_CMD_NATIVE);
  1215. if (ret)
  1216. goto out;
  1217. if (be16_to_cpu(outmad->status)) {
  1218. /* Mad returned with bad status */
  1219. ret = be16_to_cpu(outmad->status);
  1220. mlx4_warn(dev,
  1221. "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
  1222. 0xFF60, port, i2c_addr, offset, size,
  1223. ret, cable_info_mad_err_str(ret));
  1224. if (i2c_addr == I2C_ADDR_HIGH &&
  1225. MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
  1226. /* Some SFP cables do not support i2c slave
  1227. * address 0x51 (high page), abort silently.
  1228. */
  1229. ret = 0;
  1230. else
  1231. ret = -ret;
  1232. goto out;
  1233. }
  1234. cable_info = (struct mlx4_cable_info *)outmad->data;
  1235. memcpy(data, cable_info->data, size);
  1236. ret = size;
  1237. out:
  1238. mlx4_free_cmd_mailbox(dev, inbox);
  1239. mlx4_free_cmd_mailbox(dev, outbox);
  1240. return ret;
  1241. }
  1242. EXPORT_SYMBOL(mlx4_get_module_info);