netcp_core.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160
  1. /*
  2. * Keystone NetCP Core driver
  3. *
  4. * Copyright (C) 2014 Texas Instruments Incorporated
  5. * Authors: Sandeep Nair <sandeep_n@ti.com>
  6. * Sandeep Paulraj <s-paulraj@ti.com>
  7. * Cyril Chemparathy <cyril@ti.com>
  8. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  9. * Murali Karicheri <m-karicheri2@ti.com>
  10. * Wingman Kwok <w-kwok2@ti.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation version 2.
  15. *
  16. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  17. * kind, whether express or implied; without even the implied warranty
  18. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. */
  21. #include <linux/io.h>
  22. #include <linux/module.h>
  23. #include <linux/of_net.h>
  24. #include <linux/of_address.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/soc/ti/knav_qmss.h>
  29. #include <linux/soc/ti/knav_dma.h>
  30. #include "netcp.h"
  31. #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
  32. #define NETCP_NAPI_WEIGHT 64
  33. #define NETCP_TX_TIMEOUT (5 * HZ)
  34. #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
  35. #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
  36. #define NETCP_MAX_MCAST_ADDR 16
  37. #define NETCP_EFUSE_REG_INDEX 0
  38. #define NETCP_MOD_PROBE_SKIPPED 1
  39. #define NETCP_MOD_PROBE_FAILED 2
  40. #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
  41. NETIF_MSG_DRV | NETIF_MSG_LINK | \
  42. NETIF_MSG_IFUP | NETIF_MSG_INTR | \
  43. NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
  44. NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
  45. NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
  46. NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
  47. NETIF_MSG_RX_STATUS)
  48. #define NETCP_EFUSE_ADDR_SWAP 2
  49. #define knav_queue_get_id(q) knav_queue_device_control(q, \
  50. KNAV_QUEUE_GET_ID, (unsigned long)NULL)
  51. #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
  52. KNAV_QUEUE_ENABLE_NOTIFY, \
  53. (unsigned long)NULL)
  54. #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
  55. KNAV_QUEUE_DISABLE_NOTIFY, \
  56. (unsigned long)NULL)
  57. #define knav_queue_get_count(q) knav_queue_device_control(q, \
  58. KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
  59. #define for_each_netcp_module(module) \
  60. list_for_each_entry(module, &netcp_modules, module_list)
  61. #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
  62. list_for_each_entry(inst_modpriv, \
  63. &((netcp_device)->modpriv_head), inst_list)
  64. #define for_each_module(netcp, intf_modpriv) \
  65. list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
  66. /* Module management structures */
  67. struct netcp_device {
  68. struct list_head device_list;
  69. struct list_head interface_head;
  70. struct list_head modpriv_head;
  71. struct device *device;
  72. };
  73. struct netcp_inst_modpriv {
  74. struct netcp_device *netcp_device;
  75. struct netcp_module *netcp_module;
  76. struct list_head inst_list;
  77. void *module_priv;
  78. };
  79. struct netcp_intf_modpriv {
  80. struct netcp_intf *netcp_priv;
  81. struct netcp_module *netcp_module;
  82. struct list_head intf_list;
  83. void *module_priv;
  84. };
  85. static LIST_HEAD(netcp_devices);
  86. static LIST_HEAD(netcp_modules);
  87. static DEFINE_MUTEX(netcp_modules_lock);
  88. static int netcp_debug_level = -1;
  89. module_param(netcp_debug_level, int, 0);
  90. MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
  91. /* Helper functions - Get/Set */
  92. static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
  93. struct knav_dma_desc *desc)
  94. {
  95. *buff_len = desc->buff_len;
  96. *buff = desc->buff;
  97. *ndesc = desc->next_desc;
  98. }
  99. static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
  100. {
  101. *pad0 = desc->pad[0];
  102. *pad1 = desc->pad[1];
  103. }
  104. static void get_org_pkt_info(u32 *buff, u32 *buff_len,
  105. struct knav_dma_desc *desc)
  106. {
  107. *buff = desc->orig_buff;
  108. *buff_len = desc->orig_len;
  109. }
  110. static void get_words(u32 *words, int num_words, u32 *desc)
  111. {
  112. int i;
  113. for (i = 0; i < num_words; i++)
  114. words[i] = desc[i];
  115. }
  116. static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
  117. struct knav_dma_desc *desc)
  118. {
  119. desc->buff_len = buff_len;
  120. desc->buff = buff;
  121. desc->next_desc = ndesc;
  122. }
  123. static void set_desc_info(u32 desc_info, u32 pkt_info,
  124. struct knav_dma_desc *desc)
  125. {
  126. desc->desc_info = desc_info;
  127. desc->packet_info = pkt_info;
  128. }
  129. static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
  130. {
  131. desc->pad[0] = pad0;
  132. desc->pad[1] = pad1;
  133. }
  134. static void set_org_pkt_info(u32 buff, u32 buff_len,
  135. struct knav_dma_desc *desc)
  136. {
  137. desc->orig_buff = buff;
  138. desc->orig_len = buff_len;
  139. }
  140. static void set_words(u32 *words, int num_words, u32 *desc)
  141. {
  142. int i;
  143. for (i = 0; i < num_words; i++)
  144. desc[i] = words[i];
  145. }
  146. /* Read the e-fuse value as 32 bit values to be endian independent */
  147. static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
  148. {
  149. unsigned int addr0, addr1;
  150. addr1 = readl(efuse_mac + 4);
  151. addr0 = readl(efuse_mac);
  152. switch (swap) {
  153. case NETCP_EFUSE_ADDR_SWAP:
  154. addr0 = addr1;
  155. addr1 = readl(efuse_mac);
  156. break;
  157. default:
  158. break;
  159. }
  160. x[0] = (addr1 & 0x0000ff00) >> 8;
  161. x[1] = addr1 & 0x000000ff;
  162. x[2] = (addr0 & 0xff000000) >> 24;
  163. x[3] = (addr0 & 0x00ff0000) >> 16;
  164. x[4] = (addr0 & 0x0000ff00) >> 8;
  165. x[5] = addr0 & 0x000000ff;
  166. return 0;
  167. }
  168. static const char *netcp_node_name(struct device_node *node)
  169. {
  170. const char *name;
  171. if (of_property_read_string(node, "label", &name) < 0)
  172. name = node->name;
  173. if (!name)
  174. name = "unknown";
  175. return name;
  176. }
  177. /* Module management routines */
  178. static int netcp_register_interface(struct netcp_intf *netcp)
  179. {
  180. int ret;
  181. ret = register_netdev(netcp->ndev);
  182. if (!ret)
  183. netcp->netdev_registered = true;
  184. return ret;
  185. }
  186. static int netcp_module_probe(struct netcp_device *netcp_device,
  187. struct netcp_module *module)
  188. {
  189. struct device *dev = netcp_device->device;
  190. struct device_node *devices, *interface, *node = dev->of_node;
  191. struct device_node *child;
  192. struct netcp_inst_modpriv *inst_modpriv;
  193. struct netcp_intf *netcp_intf;
  194. struct netcp_module *tmp;
  195. bool primary_module_registered = false;
  196. int ret;
  197. /* Find this module in the sub-tree for this device */
  198. devices = of_get_child_by_name(node, "netcp-devices");
  199. if (!devices) {
  200. dev_err(dev, "could not find netcp-devices node\n");
  201. return NETCP_MOD_PROBE_SKIPPED;
  202. }
  203. for_each_available_child_of_node(devices, child) {
  204. const char *name = netcp_node_name(child);
  205. if (!strcasecmp(module->name, name))
  206. break;
  207. }
  208. of_node_put(devices);
  209. /* If module not used for this device, skip it */
  210. if (!child) {
  211. dev_warn(dev, "module(%s) not used for device\n", module->name);
  212. return NETCP_MOD_PROBE_SKIPPED;
  213. }
  214. inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
  215. if (!inst_modpriv) {
  216. of_node_put(child);
  217. return -ENOMEM;
  218. }
  219. inst_modpriv->netcp_device = netcp_device;
  220. inst_modpriv->netcp_module = module;
  221. list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
  222. ret = module->probe(netcp_device, dev, child,
  223. &inst_modpriv->module_priv);
  224. of_node_put(child);
  225. if (ret) {
  226. dev_err(dev, "Probe of module(%s) failed with %d\n",
  227. module->name, ret);
  228. list_del(&inst_modpriv->inst_list);
  229. devm_kfree(dev, inst_modpriv);
  230. return NETCP_MOD_PROBE_FAILED;
  231. }
  232. /* Attach modules only if the primary module is probed */
  233. for_each_netcp_module(tmp) {
  234. if (tmp->primary)
  235. primary_module_registered = true;
  236. }
  237. if (!primary_module_registered)
  238. return 0;
  239. /* Attach module to interfaces */
  240. list_for_each_entry(netcp_intf, &netcp_device->interface_head,
  241. interface_list) {
  242. struct netcp_intf_modpriv *intf_modpriv;
  243. intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
  244. GFP_KERNEL);
  245. if (!intf_modpriv)
  246. return -ENOMEM;
  247. interface = of_parse_phandle(netcp_intf->node_interface,
  248. module->name, 0);
  249. if (!interface) {
  250. devm_kfree(dev, intf_modpriv);
  251. continue;
  252. }
  253. intf_modpriv->netcp_priv = netcp_intf;
  254. intf_modpriv->netcp_module = module;
  255. list_add_tail(&intf_modpriv->intf_list,
  256. &netcp_intf->module_head);
  257. ret = module->attach(inst_modpriv->module_priv,
  258. netcp_intf->ndev, interface,
  259. &intf_modpriv->module_priv);
  260. of_node_put(interface);
  261. if (ret) {
  262. dev_dbg(dev, "Attach of module %s declined with %d\n",
  263. module->name, ret);
  264. list_del(&intf_modpriv->intf_list);
  265. devm_kfree(dev, intf_modpriv);
  266. continue;
  267. }
  268. }
  269. /* Now register the interface with netdev */
  270. list_for_each_entry(netcp_intf,
  271. &netcp_device->interface_head,
  272. interface_list) {
  273. /* If interface not registered then register now */
  274. if (!netcp_intf->netdev_registered) {
  275. ret = netcp_register_interface(netcp_intf);
  276. if (ret)
  277. return -ENODEV;
  278. }
  279. }
  280. return 0;
  281. }
  282. int netcp_register_module(struct netcp_module *module)
  283. {
  284. struct netcp_device *netcp_device;
  285. struct netcp_module *tmp;
  286. int ret;
  287. if (!module->name) {
  288. WARN(1, "error registering netcp module: no name\n");
  289. return -EINVAL;
  290. }
  291. if (!module->probe) {
  292. WARN(1, "error registering netcp module: no probe\n");
  293. return -EINVAL;
  294. }
  295. mutex_lock(&netcp_modules_lock);
  296. for_each_netcp_module(tmp) {
  297. if (!strcasecmp(tmp->name, module->name)) {
  298. mutex_unlock(&netcp_modules_lock);
  299. return -EEXIST;
  300. }
  301. }
  302. list_add_tail(&module->module_list, &netcp_modules);
  303. list_for_each_entry(netcp_device, &netcp_devices, device_list) {
  304. ret = netcp_module_probe(netcp_device, module);
  305. if (ret < 0)
  306. goto fail;
  307. }
  308. mutex_unlock(&netcp_modules_lock);
  309. return 0;
  310. fail:
  311. mutex_unlock(&netcp_modules_lock);
  312. netcp_unregister_module(module);
  313. return ret;
  314. }
  315. EXPORT_SYMBOL_GPL(netcp_register_module);
  316. static void netcp_release_module(struct netcp_device *netcp_device,
  317. struct netcp_module *module)
  318. {
  319. struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
  320. struct netcp_intf *netcp_intf, *netcp_tmp;
  321. struct device *dev = netcp_device->device;
  322. /* Release the module from each interface */
  323. list_for_each_entry_safe(netcp_intf, netcp_tmp,
  324. &netcp_device->interface_head,
  325. interface_list) {
  326. struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
  327. list_for_each_entry_safe(intf_modpriv, intf_tmp,
  328. &netcp_intf->module_head,
  329. intf_list) {
  330. if (intf_modpriv->netcp_module == module) {
  331. module->release(intf_modpriv->module_priv);
  332. list_del(&intf_modpriv->intf_list);
  333. devm_kfree(dev, intf_modpriv);
  334. break;
  335. }
  336. }
  337. }
  338. /* Remove the module from each instance */
  339. list_for_each_entry_safe(inst_modpriv, inst_tmp,
  340. &netcp_device->modpriv_head, inst_list) {
  341. if (inst_modpriv->netcp_module == module) {
  342. module->remove(netcp_device,
  343. inst_modpriv->module_priv);
  344. list_del(&inst_modpriv->inst_list);
  345. devm_kfree(dev, inst_modpriv);
  346. break;
  347. }
  348. }
  349. }
  350. void netcp_unregister_module(struct netcp_module *module)
  351. {
  352. struct netcp_device *netcp_device;
  353. struct netcp_module *module_tmp;
  354. mutex_lock(&netcp_modules_lock);
  355. list_for_each_entry(netcp_device, &netcp_devices, device_list) {
  356. netcp_release_module(netcp_device, module);
  357. }
  358. /* Remove the module from the module list */
  359. for_each_netcp_module(module_tmp) {
  360. if (module == module_tmp) {
  361. list_del(&module->module_list);
  362. break;
  363. }
  364. }
  365. mutex_unlock(&netcp_modules_lock);
  366. }
  367. EXPORT_SYMBOL_GPL(netcp_unregister_module);
  368. void *netcp_module_get_intf_data(struct netcp_module *module,
  369. struct netcp_intf *intf)
  370. {
  371. struct netcp_intf_modpriv *intf_modpriv;
  372. list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
  373. if (intf_modpriv->netcp_module == module)
  374. return intf_modpriv->module_priv;
  375. return NULL;
  376. }
  377. EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
  378. /* Module TX and RX Hook management */
  379. struct netcp_hook_list {
  380. struct list_head list;
  381. netcp_hook_rtn *hook_rtn;
  382. void *hook_data;
  383. int order;
  384. };
  385. int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
  386. netcp_hook_rtn *hook_rtn, void *hook_data)
  387. {
  388. struct netcp_hook_list *entry;
  389. struct netcp_hook_list *next;
  390. unsigned long flags;
  391. entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
  392. if (!entry)
  393. return -ENOMEM;
  394. entry->hook_rtn = hook_rtn;
  395. entry->hook_data = hook_data;
  396. entry->order = order;
  397. spin_lock_irqsave(&netcp_priv->lock, flags);
  398. list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
  399. if (next->order > order)
  400. break;
  401. }
  402. __list_add(&entry->list, next->list.prev, &next->list);
  403. spin_unlock_irqrestore(&netcp_priv->lock, flags);
  404. return 0;
  405. }
  406. EXPORT_SYMBOL_GPL(netcp_register_txhook);
  407. int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
  408. netcp_hook_rtn *hook_rtn, void *hook_data)
  409. {
  410. struct netcp_hook_list *next, *n;
  411. unsigned long flags;
  412. spin_lock_irqsave(&netcp_priv->lock, flags);
  413. list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
  414. if ((next->order == order) &&
  415. (next->hook_rtn == hook_rtn) &&
  416. (next->hook_data == hook_data)) {
  417. list_del(&next->list);
  418. spin_unlock_irqrestore(&netcp_priv->lock, flags);
  419. devm_kfree(netcp_priv->dev, next);
  420. return 0;
  421. }
  422. }
  423. spin_unlock_irqrestore(&netcp_priv->lock, flags);
  424. return -ENOENT;
  425. }
  426. EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
  427. int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
  428. netcp_hook_rtn *hook_rtn, void *hook_data)
  429. {
  430. struct netcp_hook_list *entry;
  431. struct netcp_hook_list *next;
  432. unsigned long flags;
  433. entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
  434. if (!entry)
  435. return -ENOMEM;
  436. entry->hook_rtn = hook_rtn;
  437. entry->hook_data = hook_data;
  438. entry->order = order;
  439. spin_lock_irqsave(&netcp_priv->lock, flags);
  440. list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
  441. if (next->order > order)
  442. break;
  443. }
  444. __list_add(&entry->list, next->list.prev, &next->list);
  445. spin_unlock_irqrestore(&netcp_priv->lock, flags);
  446. return 0;
  447. }
  448. int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
  449. netcp_hook_rtn *hook_rtn, void *hook_data)
  450. {
  451. struct netcp_hook_list *next, *n;
  452. unsigned long flags;
  453. spin_lock_irqsave(&netcp_priv->lock, flags);
  454. list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
  455. if ((next->order == order) &&
  456. (next->hook_rtn == hook_rtn) &&
  457. (next->hook_data == hook_data)) {
  458. list_del(&next->list);
  459. spin_unlock_irqrestore(&netcp_priv->lock, flags);
  460. devm_kfree(netcp_priv->dev, next);
  461. return 0;
  462. }
  463. }
  464. spin_unlock_irqrestore(&netcp_priv->lock, flags);
  465. return -ENOENT;
  466. }
  467. static void netcp_frag_free(bool is_frag, void *ptr)
  468. {
  469. if (is_frag)
  470. skb_free_frag(ptr);
  471. else
  472. kfree(ptr);
  473. }
  474. static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
  475. struct knav_dma_desc *desc)
  476. {
  477. struct knav_dma_desc *ndesc;
  478. dma_addr_t dma_desc, dma_buf;
  479. unsigned int buf_len, dma_sz = sizeof(*ndesc);
  480. void *buf_ptr;
  481. u32 tmp;
  482. get_words(&dma_desc, 1, &desc->next_desc);
  483. while (dma_desc) {
  484. ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
  485. if (unlikely(!ndesc)) {
  486. dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
  487. break;
  488. }
  489. get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
  490. get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
  491. dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
  492. __free_page(buf_ptr);
  493. knav_pool_desc_put(netcp->rx_pool, desc);
  494. }
  495. get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
  496. if (buf_ptr)
  497. netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
  498. knav_pool_desc_put(netcp->rx_pool, desc);
  499. }
  500. static void netcp_empty_rx_queue(struct netcp_intf *netcp)
  501. {
  502. struct knav_dma_desc *desc;
  503. unsigned int dma_sz;
  504. dma_addr_t dma;
  505. for (; ;) {
  506. dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
  507. if (!dma)
  508. break;
  509. desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
  510. if (unlikely(!desc)) {
  511. dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
  512. __func__);
  513. netcp->ndev->stats.rx_errors++;
  514. continue;
  515. }
  516. netcp_free_rx_desc_chain(netcp, desc);
  517. netcp->ndev->stats.rx_dropped++;
  518. }
  519. }
  520. static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
  521. {
  522. unsigned int dma_sz, buf_len, org_buf_len;
  523. struct knav_dma_desc *desc, *ndesc;
  524. unsigned int pkt_sz = 0, accum_sz;
  525. struct netcp_hook_list *rx_hook;
  526. dma_addr_t dma_desc, dma_buff;
  527. struct netcp_packet p_info;
  528. struct sk_buff *skb;
  529. void *org_buf_ptr;
  530. u32 tmp;
  531. dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
  532. if (!dma_desc)
  533. return -1;
  534. desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
  535. if (unlikely(!desc)) {
  536. dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
  537. return 0;
  538. }
  539. get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
  540. get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
  541. if (unlikely(!org_buf_ptr)) {
  542. dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
  543. goto free_desc;
  544. }
  545. pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
  546. accum_sz = buf_len;
  547. dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
  548. /* Build a new sk_buff for the primary buffer */
  549. skb = build_skb(org_buf_ptr, org_buf_len);
  550. if (unlikely(!skb)) {
  551. dev_err(netcp->ndev_dev, "build_skb() failed\n");
  552. goto free_desc;
  553. }
  554. /* update data, tail and len */
  555. skb_reserve(skb, NETCP_SOP_OFFSET);
  556. __skb_put(skb, buf_len);
  557. /* Fill in the page fragment list */
  558. while (dma_desc) {
  559. struct page *page;
  560. ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
  561. if (unlikely(!ndesc)) {
  562. dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
  563. goto free_desc;
  564. }
  565. get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
  566. get_pad_info((u32 *)&page, &tmp, ndesc);
  567. if (likely(dma_buff && buf_len && page)) {
  568. dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
  569. DMA_FROM_DEVICE);
  570. } else {
  571. dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
  572. (void *)dma_buff, buf_len, page);
  573. goto free_desc;
  574. }
  575. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
  576. offset_in_page(dma_buff), buf_len, PAGE_SIZE);
  577. accum_sz += buf_len;
  578. /* Free the descriptor */
  579. knav_pool_desc_put(netcp->rx_pool, ndesc);
  580. }
  581. /* Free the primary descriptor */
  582. knav_pool_desc_put(netcp->rx_pool, desc);
  583. /* check for packet len and warn */
  584. if (unlikely(pkt_sz != accum_sz))
  585. dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
  586. pkt_sz, accum_sz);
  587. /* Remove ethernet FCS from the packet */
  588. __pskb_trim(skb, skb->len - ETH_FCS_LEN);
  589. /* Call each of the RX hooks */
  590. p_info.skb = skb;
  591. p_info.rxtstamp_complete = false;
  592. list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
  593. int ret;
  594. ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
  595. &p_info);
  596. if (unlikely(ret)) {
  597. dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
  598. rx_hook->order, ret);
  599. netcp->ndev->stats.rx_errors++;
  600. dev_kfree_skb(skb);
  601. return 0;
  602. }
  603. }
  604. netcp->ndev->stats.rx_packets++;
  605. netcp->ndev->stats.rx_bytes += skb->len;
  606. /* push skb up the stack */
  607. skb->protocol = eth_type_trans(skb, netcp->ndev);
  608. netif_receive_skb(skb);
  609. return 0;
  610. free_desc:
  611. netcp_free_rx_desc_chain(netcp, desc);
  612. netcp->ndev->stats.rx_errors++;
  613. return 0;
  614. }
  615. static int netcp_process_rx_packets(struct netcp_intf *netcp,
  616. unsigned int budget)
  617. {
  618. int i;
  619. for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
  620. ;
  621. return i;
  622. }
  623. /* Release descriptors and attached buffers from Rx FDQ */
  624. static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
  625. {
  626. struct knav_dma_desc *desc;
  627. unsigned int buf_len, dma_sz;
  628. dma_addr_t dma;
  629. void *buf_ptr;
  630. u32 tmp;
  631. /* Allocate descriptor */
  632. while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
  633. desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
  634. if (unlikely(!desc)) {
  635. dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
  636. continue;
  637. }
  638. get_org_pkt_info(&dma, &buf_len, desc);
  639. get_pad_info((u32 *)&buf_ptr, &tmp, desc);
  640. if (unlikely(!dma)) {
  641. dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
  642. knav_pool_desc_put(netcp->rx_pool, desc);
  643. continue;
  644. }
  645. if (unlikely(!buf_ptr)) {
  646. dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
  647. knav_pool_desc_put(netcp->rx_pool, desc);
  648. continue;
  649. }
  650. if (fdq == 0) {
  651. dma_unmap_single(netcp->dev, dma, buf_len,
  652. DMA_FROM_DEVICE);
  653. netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
  654. } else {
  655. dma_unmap_page(netcp->dev, dma, buf_len,
  656. DMA_FROM_DEVICE);
  657. __free_page(buf_ptr);
  658. }
  659. knav_pool_desc_put(netcp->rx_pool, desc);
  660. }
  661. }
  662. static void netcp_rxpool_free(struct netcp_intf *netcp)
  663. {
  664. int i;
  665. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
  666. !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
  667. netcp_free_rx_buf(netcp, i);
  668. if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
  669. dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
  670. netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
  671. knav_pool_destroy(netcp->rx_pool);
  672. netcp->rx_pool = NULL;
  673. }
  674. static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
  675. {
  676. struct knav_dma_desc *hwdesc;
  677. unsigned int buf_len, dma_sz;
  678. u32 desc_info, pkt_info;
  679. struct page *page;
  680. dma_addr_t dma;
  681. void *bufptr;
  682. u32 pad[2];
  683. /* Allocate descriptor */
  684. hwdesc = knav_pool_desc_get(netcp->rx_pool);
  685. if (IS_ERR_OR_NULL(hwdesc)) {
  686. dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
  687. return -ENOMEM;
  688. }
  689. if (likely(fdq == 0)) {
  690. unsigned int primary_buf_len;
  691. /* Allocate a primary receive queue entry */
  692. buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
  693. primary_buf_len = SKB_DATA_ALIGN(buf_len) +
  694. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  695. bufptr = netdev_alloc_frag(primary_buf_len);
  696. pad[1] = primary_buf_len;
  697. if (unlikely(!bufptr)) {
  698. dev_warn_ratelimited(netcp->ndev_dev,
  699. "Primary RX buffer alloc failed\n");
  700. goto fail;
  701. }
  702. dma = dma_map_single(netcp->dev, bufptr, buf_len,
  703. DMA_TO_DEVICE);
  704. if (unlikely(dma_mapping_error(netcp->dev, dma)))
  705. goto fail;
  706. pad[0] = (u32)bufptr;
  707. } else {
  708. /* Allocate a secondary receive queue entry */
  709. page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
  710. if (unlikely(!page)) {
  711. dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
  712. goto fail;
  713. }
  714. buf_len = PAGE_SIZE;
  715. dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
  716. pad[0] = (u32)page;
  717. pad[1] = 0;
  718. }
  719. desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
  720. desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
  721. pkt_info = KNAV_DMA_DESC_HAS_EPIB;
  722. pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
  723. pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
  724. KNAV_DMA_DESC_RETQ_SHIFT;
  725. set_org_pkt_info(dma, buf_len, hwdesc);
  726. set_pad_info(pad[0], pad[1], hwdesc);
  727. set_desc_info(desc_info, pkt_info, hwdesc);
  728. /* Push to FDQs */
  729. knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
  730. &dma_sz);
  731. knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
  732. return 0;
  733. fail:
  734. knav_pool_desc_put(netcp->rx_pool, hwdesc);
  735. return -ENOMEM;
  736. }
  737. /* Refill Rx FDQ with descriptors & attached buffers */
  738. static void netcp_rxpool_refill(struct netcp_intf *netcp)
  739. {
  740. u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
  741. int i, ret = 0;
  742. /* Calculate the FDQ deficit and refill */
  743. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
  744. fdq_deficit[i] = netcp->rx_queue_depths[i] -
  745. knav_queue_get_count(netcp->rx_fdq[i]);
  746. while (fdq_deficit[i]-- && !ret)
  747. ret = netcp_allocate_rx_buf(netcp, i);
  748. } /* end for fdqs */
  749. }
  750. /* NAPI poll */
  751. static int netcp_rx_poll(struct napi_struct *napi, int budget)
  752. {
  753. struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
  754. rx_napi);
  755. unsigned int packets;
  756. packets = netcp_process_rx_packets(netcp, budget);
  757. netcp_rxpool_refill(netcp);
  758. if (packets < budget) {
  759. napi_complete(&netcp->rx_napi);
  760. knav_queue_enable_notify(netcp->rx_queue);
  761. }
  762. return packets;
  763. }
  764. static void netcp_rx_notify(void *arg)
  765. {
  766. struct netcp_intf *netcp = arg;
  767. knav_queue_disable_notify(netcp->rx_queue);
  768. napi_schedule(&netcp->rx_napi);
  769. }
  770. static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
  771. struct knav_dma_desc *desc,
  772. unsigned int desc_sz)
  773. {
  774. struct knav_dma_desc *ndesc = desc;
  775. dma_addr_t dma_desc, dma_buf;
  776. unsigned int buf_len;
  777. while (ndesc) {
  778. get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
  779. if (dma_buf && buf_len)
  780. dma_unmap_single(netcp->dev, dma_buf, buf_len,
  781. DMA_TO_DEVICE);
  782. else
  783. dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
  784. (void *)dma_buf, buf_len);
  785. knav_pool_desc_put(netcp->tx_pool, ndesc);
  786. ndesc = NULL;
  787. if (dma_desc) {
  788. ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
  789. desc_sz);
  790. if (!ndesc)
  791. dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
  792. }
  793. }
  794. }
  795. static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
  796. unsigned int budget)
  797. {
  798. struct knav_dma_desc *desc;
  799. struct sk_buff *skb;
  800. unsigned int dma_sz;
  801. dma_addr_t dma;
  802. int pkts = 0;
  803. u32 tmp;
  804. while (budget--) {
  805. dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
  806. if (!dma)
  807. break;
  808. desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
  809. if (unlikely(!desc)) {
  810. dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
  811. netcp->ndev->stats.tx_errors++;
  812. continue;
  813. }
  814. get_pad_info((u32 *)&skb, &tmp, desc);
  815. netcp_free_tx_desc_chain(netcp, desc, dma_sz);
  816. if (!skb) {
  817. dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
  818. netcp->ndev->stats.tx_errors++;
  819. continue;
  820. }
  821. if (netif_subqueue_stopped(netcp->ndev, skb) &&
  822. netif_running(netcp->ndev) &&
  823. (knav_pool_count(netcp->tx_pool) >
  824. netcp->tx_resume_threshold)) {
  825. u16 subqueue = skb_get_queue_mapping(skb);
  826. netif_wake_subqueue(netcp->ndev, subqueue);
  827. }
  828. netcp->ndev->stats.tx_packets++;
  829. netcp->ndev->stats.tx_bytes += skb->len;
  830. dev_kfree_skb(skb);
  831. pkts++;
  832. }
  833. return pkts;
  834. }
  835. static int netcp_tx_poll(struct napi_struct *napi, int budget)
  836. {
  837. int packets;
  838. struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
  839. tx_napi);
  840. packets = netcp_process_tx_compl_packets(netcp, budget);
  841. if (packets < budget) {
  842. napi_complete(&netcp->tx_napi);
  843. knav_queue_enable_notify(netcp->tx_compl_q);
  844. }
  845. return packets;
  846. }
  847. static void netcp_tx_notify(void *arg)
  848. {
  849. struct netcp_intf *netcp = arg;
  850. knav_queue_disable_notify(netcp->tx_compl_q);
  851. napi_schedule(&netcp->tx_napi);
  852. }
  853. static struct knav_dma_desc*
  854. netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
  855. {
  856. struct knav_dma_desc *desc, *ndesc, *pdesc;
  857. unsigned int pkt_len = skb_headlen(skb);
  858. struct device *dev = netcp->dev;
  859. dma_addr_t dma_addr;
  860. unsigned int dma_sz;
  861. int i;
  862. /* Map the linear buffer */
  863. dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
  864. if (unlikely(dma_mapping_error(dev, dma_addr))) {
  865. dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
  866. return NULL;
  867. }
  868. desc = knav_pool_desc_get(netcp->tx_pool);
  869. if (IS_ERR_OR_NULL(desc)) {
  870. dev_err(netcp->ndev_dev, "out of TX desc\n");
  871. dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
  872. return NULL;
  873. }
  874. set_pkt_info(dma_addr, pkt_len, 0, desc);
  875. if (skb_is_nonlinear(skb)) {
  876. prefetchw(skb_shinfo(skb));
  877. } else {
  878. desc->next_desc = 0;
  879. goto upd_pkt_len;
  880. }
  881. pdesc = desc;
  882. /* Handle the case where skb is fragmented in pages */
  883. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  884. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  885. struct page *page = skb_frag_page(frag);
  886. u32 page_offset = frag->page_offset;
  887. u32 buf_len = skb_frag_size(frag);
  888. dma_addr_t desc_dma;
  889. u32 pkt_info;
  890. dma_addr = dma_map_page(dev, page, page_offset, buf_len,
  891. DMA_TO_DEVICE);
  892. if (unlikely(!dma_addr)) {
  893. dev_err(netcp->ndev_dev, "Failed to map skb page\n");
  894. goto free_descs;
  895. }
  896. ndesc = knav_pool_desc_get(netcp->tx_pool);
  897. if (IS_ERR_OR_NULL(ndesc)) {
  898. dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
  899. dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
  900. goto free_descs;
  901. }
  902. desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
  903. (void *)ndesc);
  904. pkt_info =
  905. (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
  906. KNAV_DMA_DESC_RETQ_SHIFT;
  907. set_pkt_info(dma_addr, buf_len, 0, ndesc);
  908. set_words(&desc_dma, 1, &pdesc->next_desc);
  909. pkt_len += buf_len;
  910. if (pdesc != desc)
  911. knav_pool_desc_map(netcp->tx_pool, pdesc,
  912. sizeof(*pdesc), &desc_dma, &dma_sz);
  913. pdesc = ndesc;
  914. }
  915. if (pdesc != desc)
  916. knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
  917. &dma_addr, &dma_sz);
  918. /* frag list based linkage is not supported for now. */
  919. if (skb_shinfo(skb)->frag_list) {
  920. dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
  921. goto free_descs;
  922. }
  923. upd_pkt_len:
  924. WARN_ON(pkt_len != skb->len);
  925. pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
  926. set_words(&pkt_len, 1, &desc->desc_info);
  927. return desc;
  928. free_descs:
  929. netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
  930. return NULL;
  931. }
  932. static int netcp_tx_submit_skb(struct netcp_intf *netcp,
  933. struct sk_buff *skb,
  934. struct knav_dma_desc *desc)
  935. {
  936. struct netcp_tx_pipe *tx_pipe = NULL;
  937. struct netcp_hook_list *tx_hook;
  938. struct netcp_packet p_info;
  939. unsigned int dma_sz;
  940. dma_addr_t dma;
  941. u32 tmp = 0;
  942. int ret = 0;
  943. p_info.netcp = netcp;
  944. p_info.skb = skb;
  945. p_info.tx_pipe = NULL;
  946. p_info.psdata_len = 0;
  947. p_info.ts_context = NULL;
  948. p_info.txtstamp_complete = NULL;
  949. p_info.epib = desc->epib;
  950. p_info.psdata = desc->psdata;
  951. memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
  952. /* Find out where to inject the packet for transmission */
  953. list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
  954. ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
  955. &p_info);
  956. if (unlikely(ret != 0)) {
  957. dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
  958. tx_hook->order, ret);
  959. ret = (ret < 0) ? ret : NETDEV_TX_OK;
  960. goto out;
  961. }
  962. }
  963. /* Make sure some TX hook claimed the packet */
  964. tx_pipe = p_info.tx_pipe;
  965. if (!tx_pipe) {
  966. dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
  967. ret = -ENXIO;
  968. goto out;
  969. }
  970. /* update descriptor */
  971. if (p_info.psdata_len) {
  972. u32 *psdata = p_info.psdata;
  973. memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
  974. p_info.psdata_len);
  975. set_words(psdata, p_info.psdata_len, psdata);
  976. tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
  977. KNAV_DMA_DESC_PSLEN_SHIFT;
  978. }
  979. tmp |= KNAV_DMA_DESC_HAS_EPIB |
  980. ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
  981. KNAV_DMA_DESC_RETQ_SHIFT);
  982. if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
  983. tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
  984. KNAV_DMA_DESC_PSFLAG_SHIFT);
  985. }
  986. set_words(&tmp, 1, &desc->packet_info);
  987. set_words((u32 *)&skb, 1, &desc->pad[0]);
  988. if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
  989. tmp = tx_pipe->switch_to_port;
  990. set_words((u32 *)&tmp, 1, &desc->tag_info);
  991. }
  992. /* submit packet descriptor */
  993. ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
  994. &dma_sz);
  995. if (unlikely(ret)) {
  996. dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
  997. ret = -ENOMEM;
  998. goto out;
  999. }
  1000. skb_tx_timestamp(skb);
  1001. knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
  1002. out:
  1003. return ret;
  1004. }
  1005. /* Submit the packet */
  1006. static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1007. {
  1008. struct netcp_intf *netcp = netdev_priv(ndev);
  1009. int subqueue = skb_get_queue_mapping(skb);
  1010. struct knav_dma_desc *desc;
  1011. int desc_count, ret = 0;
  1012. if (unlikely(skb->len <= 0)) {
  1013. dev_kfree_skb(skb);
  1014. return NETDEV_TX_OK;
  1015. }
  1016. if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
  1017. ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
  1018. if (ret < 0) {
  1019. /* If we get here, the skb has already been dropped */
  1020. dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
  1021. ret);
  1022. ndev->stats.tx_dropped++;
  1023. return ret;
  1024. }
  1025. skb->len = NETCP_MIN_PACKET_SIZE;
  1026. }
  1027. desc = netcp_tx_map_skb(skb, netcp);
  1028. if (unlikely(!desc)) {
  1029. netif_stop_subqueue(ndev, subqueue);
  1030. ret = -ENOBUFS;
  1031. goto drop;
  1032. }
  1033. ret = netcp_tx_submit_skb(netcp, skb, desc);
  1034. if (ret)
  1035. goto drop;
  1036. ndev->trans_start = jiffies;
  1037. /* Check Tx pool count & stop subqueue if needed */
  1038. desc_count = knav_pool_count(netcp->tx_pool);
  1039. if (desc_count < netcp->tx_pause_threshold) {
  1040. dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
  1041. netif_stop_subqueue(ndev, subqueue);
  1042. }
  1043. return NETDEV_TX_OK;
  1044. drop:
  1045. ndev->stats.tx_dropped++;
  1046. if (desc)
  1047. netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
  1048. dev_kfree_skb(skb);
  1049. return ret;
  1050. }
  1051. int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
  1052. {
  1053. if (tx_pipe->dma_channel) {
  1054. knav_dma_close_channel(tx_pipe->dma_channel);
  1055. tx_pipe->dma_channel = NULL;
  1056. }
  1057. return 0;
  1058. }
  1059. EXPORT_SYMBOL_GPL(netcp_txpipe_close);
  1060. int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
  1061. {
  1062. struct device *dev = tx_pipe->netcp_device->device;
  1063. struct knav_dma_cfg config;
  1064. int ret = 0;
  1065. u8 name[16];
  1066. memset(&config, 0, sizeof(config));
  1067. config.direction = DMA_MEM_TO_DEV;
  1068. config.u.tx.filt_einfo = false;
  1069. config.u.tx.filt_pswords = false;
  1070. config.u.tx.priority = DMA_PRIO_MED_L;
  1071. tx_pipe->dma_channel = knav_dma_open_channel(dev,
  1072. tx_pipe->dma_chan_name, &config);
  1073. if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
  1074. dev_err(dev, "failed opening tx chan(%s)\n",
  1075. tx_pipe->dma_chan_name);
  1076. goto err;
  1077. }
  1078. snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
  1079. tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
  1080. KNAV_QUEUE_SHARED);
  1081. if (IS_ERR(tx_pipe->dma_queue)) {
  1082. dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
  1083. name, ret);
  1084. ret = PTR_ERR(tx_pipe->dma_queue);
  1085. goto err;
  1086. }
  1087. dev_dbg(dev, "opened tx pipe %s\n", name);
  1088. return 0;
  1089. err:
  1090. if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
  1091. knav_dma_close_channel(tx_pipe->dma_channel);
  1092. tx_pipe->dma_channel = NULL;
  1093. return ret;
  1094. }
  1095. EXPORT_SYMBOL_GPL(netcp_txpipe_open);
  1096. int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
  1097. struct netcp_device *netcp_device,
  1098. const char *dma_chan_name, unsigned int dma_queue_id)
  1099. {
  1100. memset(tx_pipe, 0, sizeof(*tx_pipe));
  1101. tx_pipe->netcp_device = netcp_device;
  1102. tx_pipe->dma_chan_name = dma_chan_name;
  1103. tx_pipe->dma_queue_id = dma_queue_id;
  1104. return 0;
  1105. }
  1106. EXPORT_SYMBOL_GPL(netcp_txpipe_init);
  1107. static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
  1108. const u8 *addr,
  1109. enum netcp_addr_type type)
  1110. {
  1111. struct netcp_addr *naddr;
  1112. list_for_each_entry(naddr, &netcp->addr_list, node) {
  1113. if (naddr->type != type)
  1114. continue;
  1115. if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
  1116. continue;
  1117. return naddr;
  1118. }
  1119. return NULL;
  1120. }
  1121. static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
  1122. const u8 *addr,
  1123. enum netcp_addr_type type)
  1124. {
  1125. struct netcp_addr *naddr;
  1126. naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
  1127. if (!naddr)
  1128. return NULL;
  1129. naddr->type = type;
  1130. naddr->flags = 0;
  1131. naddr->netcp = netcp;
  1132. if (addr)
  1133. ether_addr_copy(naddr->addr, addr);
  1134. else
  1135. eth_zero_addr(naddr->addr);
  1136. list_add_tail(&naddr->node, &netcp->addr_list);
  1137. return naddr;
  1138. }
  1139. static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
  1140. {
  1141. list_del(&naddr->node);
  1142. devm_kfree(netcp->dev, naddr);
  1143. }
  1144. static void netcp_addr_clear_mark(struct netcp_intf *netcp)
  1145. {
  1146. struct netcp_addr *naddr;
  1147. list_for_each_entry(naddr, &netcp->addr_list, node)
  1148. naddr->flags = 0;
  1149. }
  1150. static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
  1151. enum netcp_addr_type type)
  1152. {
  1153. struct netcp_addr *naddr;
  1154. naddr = netcp_addr_find(netcp, addr, type);
  1155. if (naddr) {
  1156. naddr->flags |= ADDR_VALID;
  1157. return;
  1158. }
  1159. naddr = netcp_addr_add(netcp, addr, type);
  1160. if (!WARN_ON(!naddr))
  1161. naddr->flags |= ADDR_NEW;
  1162. }
  1163. static void netcp_addr_sweep_del(struct netcp_intf *netcp)
  1164. {
  1165. struct netcp_addr *naddr, *tmp;
  1166. struct netcp_intf_modpriv *priv;
  1167. struct netcp_module *module;
  1168. int error;
  1169. list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
  1170. if (naddr->flags & (ADDR_VALID | ADDR_NEW))
  1171. continue;
  1172. dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
  1173. naddr->addr, naddr->type);
  1174. for_each_module(netcp, priv) {
  1175. module = priv->netcp_module;
  1176. if (!module->del_addr)
  1177. continue;
  1178. error = module->del_addr(priv->module_priv,
  1179. naddr);
  1180. WARN_ON(error);
  1181. }
  1182. netcp_addr_del(netcp, naddr);
  1183. }
  1184. }
  1185. static void netcp_addr_sweep_add(struct netcp_intf *netcp)
  1186. {
  1187. struct netcp_addr *naddr, *tmp;
  1188. struct netcp_intf_modpriv *priv;
  1189. struct netcp_module *module;
  1190. int error;
  1191. list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
  1192. if (!(naddr->flags & ADDR_NEW))
  1193. continue;
  1194. dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
  1195. naddr->addr, naddr->type);
  1196. for_each_module(netcp, priv) {
  1197. module = priv->netcp_module;
  1198. if (!module->add_addr)
  1199. continue;
  1200. error = module->add_addr(priv->module_priv, naddr);
  1201. WARN_ON(error);
  1202. }
  1203. }
  1204. }
  1205. static void netcp_set_rx_mode(struct net_device *ndev)
  1206. {
  1207. struct netcp_intf *netcp = netdev_priv(ndev);
  1208. struct netdev_hw_addr *ndev_addr;
  1209. bool promisc;
  1210. promisc = (ndev->flags & IFF_PROMISC ||
  1211. ndev->flags & IFF_ALLMULTI ||
  1212. netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
  1213. spin_lock(&netcp->lock);
  1214. /* first clear all marks */
  1215. netcp_addr_clear_mark(netcp);
  1216. /* next add new entries, mark existing ones */
  1217. netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
  1218. for_each_dev_addr(ndev, ndev_addr)
  1219. netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
  1220. netdev_for_each_uc_addr(ndev_addr, ndev)
  1221. netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
  1222. netdev_for_each_mc_addr(ndev_addr, ndev)
  1223. netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
  1224. if (promisc)
  1225. netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
  1226. /* finally sweep and callout into modules */
  1227. netcp_addr_sweep_del(netcp);
  1228. netcp_addr_sweep_add(netcp);
  1229. spin_unlock(&netcp->lock);
  1230. }
  1231. static void netcp_free_navigator_resources(struct netcp_intf *netcp)
  1232. {
  1233. int i;
  1234. if (netcp->rx_channel) {
  1235. knav_dma_close_channel(netcp->rx_channel);
  1236. netcp->rx_channel = NULL;
  1237. }
  1238. if (!IS_ERR_OR_NULL(netcp->rx_pool))
  1239. netcp_rxpool_free(netcp);
  1240. if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
  1241. knav_queue_close(netcp->rx_queue);
  1242. netcp->rx_queue = NULL;
  1243. }
  1244. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
  1245. !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
  1246. knav_queue_close(netcp->rx_fdq[i]);
  1247. netcp->rx_fdq[i] = NULL;
  1248. }
  1249. if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
  1250. knav_queue_close(netcp->tx_compl_q);
  1251. netcp->tx_compl_q = NULL;
  1252. }
  1253. if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
  1254. knav_pool_destroy(netcp->tx_pool);
  1255. netcp->tx_pool = NULL;
  1256. }
  1257. }
  1258. static int netcp_setup_navigator_resources(struct net_device *ndev)
  1259. {
  1260. struct netcp_intf *netcp = netdev_priv(ndev);
  1261. struct knav_queue_notify_config notify_cfg;
  1262. struct knav_dma_cfg config;
  1263. u32 last_fdq = 0;
  1264. u8 name[16];
  1265. int ret;
  1266. int i;
  1267. /* Create Rx/Tx descriptor pools */
  1268. snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
  1269. netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
  1270. netcp->rx_pool_region_id);
  1271. if (IS_ERR_OR_NULL(netcp->rx_pool)) {
  1272. dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
  1273. ret = PTR_ERR(netcp->rx_pool);
  1274. goto fail;
  1275. }
  1276. snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
  1277. netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
  1278. netcp->tx_pool_region_id);
  1279. if (IS_ERR_OR_NULL(netcp->tx_pool)) {
  1280. dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
  1281. ret = PTR_ERR(netcp->tx_pool);
  1282. goto fail;
  1283. }
  1284. /* open Tx completion queue */
  1285. snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
  1286. netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
  1287. if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
  1288. ret = PTR_ERR(netcp->tx_compl_q);
  1289. goto fail;
  1290. }
  1291. netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
  1292. /* Set notification for Tx completion */
  1293. notify_cfg.fn = netcp_tx_notify;
  1294. notify_cfg.fn_arg = netcp;
  1295. ret = knav_queue_device_control(netcp->tx_compl_q,
  1296. KNAV_QUEUE_SET_NOTIFIER,
  1297. (unsigned long)&notify_cfg);
  1298. if (ret)
  1299. goto fail;
  1300. knav_queue_disable_notify(netcp->tx_compl_q);
  1301. /* open Rx completion queue */
  1302. snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
  1303. netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
  1304. if (IS_ERR_OR_NULL(netcp->rx_queue)) {
  1305. ret = PTR_ERR(netcp->rx_queue);
  1306. goto fail;
  1307. }
  1308. netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
  1309. /* Set notification for Rx completion */
  1310. notify_cfg.fn = netcp_rx_notify;
  1311. notify_cfg.fn_arg = netcp;
  1312. ret = knav_queue_device_control(netcp->rx_queue,
  1313. KNAV_QUEUE_SET_NOTIFIER,
  1314. (unsigned long)&notify_cfg);
  1315. if (ret)
  1316. goto fail;
  1317. knav_queue_disable_notify(netcp->rx_queue);
  1318. /* open Rx FDQs */
  1319. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
  1320. ++i) {
  1321. snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
  1322. netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
  1323. if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
  1324. ret = PTR_ERR(netcp->rx_fdq[i]);
  1325. goto fail;
  1326. }
  1327. }
  1328. memset(&config, 0, sizeof(config));
  1329. config.direction = DMA_DEV_TO_MEM;
  1330. config.u.rx.einfo_present = true;
  1331. config.u.rx.psinfo_present = true;
  1332. config.u.rx.err_mode = DMA_DROP;
  1333. config.u.rx.desc_type = DMA_DESC_HOST;
  1334. config.u.rx.psinfo_at_sop = false;
  1335. config.u.rx.sop_offset = NETCP_SOP_OFFSET;
  1336. config.u.rx.dst_q = netcp->rx_queue_id;
  1337. config.u.rx.thresh = DMA_THRESH_NONE;
  1338. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
  1339. if (netcp->rx_fdq[i])
  1340. last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
  1341. config.u.rx.fdq[i] = last_fdq;
  1342. }
  1343. netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
  1344. netcp->dma_chan_name, &config);
  1345. if (IS_ERR_OR_NULL(netcp->rx_channel)) {
  1346. dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
  1347. netcp->dma_chan_name);
  1348. goto fail;
  1349. }
  1350. dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
  1351. return 0;
  1352. fail:
  1353. netcp_free_navigator_resources(netcp);
  1354. return ret;
  1355. }
  1356. /* Open the device */
  1357. static int netcp_ndo_open(struct net_device *ndev)
  1358. {
  1359. struct netcp_intf *netcp = netdev_priv(ndev);
  1360. struct netcp_intf_modpriv *intf_modpriv;
  1361. struct netcp_module *module;
  1362. int ret;
  1363. netif_carrier_off(ndev);
  1364. ret = netcp_setup_navigator_resources(ndev);
  1365. if (ret) {
  1366. dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
  1367. goto fail;
  1368. }
  1369. for_each_module(netcp, intf_modpriv) {
  1370. module = intf_modpriv->netcp_module;
  1371. if (module->open) {
  1372. ret = module->open(intf_modpriv->module_priv, ndev);
  1373. if (ret != 0) {
  1374. dev_err(netcp->ndev_dev, "module open failed\n");
  1375. goto fail_open;
  1376. }
  1377. }
  1378. }
  1379. napi_enable(&netcp->rx_napi);
  1380. napi_enable(&netcp->tx_napi);
  1381. knav_queue_enable_notify(netcp->tx_compl_q);
  1382. knav_queue_enable_notify(netcp->rx_queue);
  1383. netcp_rxpool_refill(netcp);
  1384. netif_tx_wake_all_queues(ndev);
  1385. dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
  1386. return 0;
  1387. fail_open:
  1388. for_each_module(netcp, intf_modpriv) {
  1389. module = intf_modpriv->netcp_module;
  1390. if (module->close)
  1391. module->close(intf_modpriv->module_priv, ndev);
  1392. }
  1393. fail:
  1394. netcp_free_navigator_resources(netcp);
  1395. return ret;
  1396. }
  1397. /* Close the device */
  1398. static int netcp_ndo_stop(struct net_device *ndev)
  1399. {
  1400. struct netcp_intf *netcp = netdev_priv(ndev);
  1401. struct netcp_intf_modpriv *intf_modpriv;
  1402. struct netcp_module *module;
  1403. int err = 0;
  1404. netif_tx_stop_all_queues(ndev);
  1405. netif_carrier_off(ndev);
  1406. netcp_addr_clear_mark(netcp);
  1407. netcp_addr_sweep_del(netcp);
  1408. knav_queue_disable_notify(netcp->rx_queue);
  1409. knav_queue_disable_notify(netcp->tx_compl_q);
  1410. napi_disable(&netcp->rx_napi);
  1411. napi_disable(&netcp->tx_napi);
  1412. for_each_module(netcp, intf_modpriv) {
  1413. module = intf_modpriv->netcp_module;
  1414. if (module->close) {
  1415. err = module->close(intf_modpriv->module_priv, ndev);
  1416. if (err != 0)
  1417. dev_err(netcp->ndev_dev, "Close failed\n");
  1418. }
  1419. }
  1420. /* Recycle Rx descriptors from completion queue */
  1421. netcp_empty_rx_queue(netcp);
  1422. /* Recycle Tx descriptors from completion queue */
  1423. netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
  1424. if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
  1425. dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
  1426. netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
  1427. netcp_free_navigator_resources(netcp);
  1428. dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
  1429. return 0;
  1430. }
  1431. static int netcp_ndo_ioctl(struct net_device *ndev,
  1432. struct ifreq *req, int cmd)
  1433. {
  1434. struct netcp_intf *netcp = netdev_priv(ndev);
  1435. struct netcp_intf_modpriv *intf_modpriv;
  1436. struct netcp_module *module;
  1437. int ret = -1, err = -EOPNOTSUPP;
  1438. if (!netif_running(ndev))
  1439. return -EINVAL;
  1440. for_each_module(netcp, intf_modpriv) {
  1441. module = intf_modpriv->netcp_module;
  1442. if (!module->ioctl)
  1443. continue;
  1444. err = module->ioctl(intf_modpriv->module_priv, req, cmd);
  1445. if ((err < 0) && (err != -EOPNOTSUPP)) {
  1446. ret = err;
  1447. goto out;
  1448. }
  1449. if (err == 0)
  1450. ret = err;
  1451. }
  1452. out:
  1453. return (ret == 0) ? 0 : err;
  1454. }
  1455. static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
  1456. {
  1457. struct netcp_intf *netcp = netdev_priv(ndev);
  1458. /* MTU < 68 is an error for IPv4 traffic */
  1459. if ((new_mtu < 68) ||
  1460. (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
  1461. dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
  1462. return -EINVAL;
  1463. }
  1464. ndev->mtu = new_mtu;
  1465. return 0;
  1466. }
  1467. static void netcp_ndo_tx_timeout(struct net_device *ndev)
  1468. {
  1469. struct netcp_intf *netcp = netdev_priv(ndev);
  1470. unsigned int descs = knav_pool_count(netcp->tx_pool);
  1471. dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
  1472. netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
  1473. ndev->trans_start = jiffies;
  1474. netif_tx_wake_all_queues(ndev);
  1475. }
  1476. static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
  1477. {
  1478. struct netcp_intf *netcp = netdev_priv(ndev);
  1479. struct netcp_intf_modpriv *intf_modpriv;
  1480. struct netcp_module *module;
  1481. unsigned long flags;
  1482. int err = 0;
  1483. dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
  1484. spin_lock_irqsave(&netcp->lock, flags);
  1485. for_each_module(netcp, intf_modpriv) {
  1486. module = intf_modpriv->netcp_module;
  1487. if ((module->add_vid) && (vid != 0)) {
  1488. err = module->add_vid(intf_modpriv->module_priv, vid);
  1489. if (err != 0) {
  1490. dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
  1491. vid);
  1492. break;
  1493. }
  1494. }
  1495. }
  1496. spin_unlock_irqrestore(&netcp->lock, flags);
  1497. return err;
  1498. }
  1499. static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
  1500. {
  1501. struct netcp_intf *netcp = netdev_priv(ndev);
  1502. struct netcp_intf_modpriv *intf_modpriv;
  1503. struct netcp_module *module;
  1504. unsigned long flags;
  1505. int err = 0;
  1506. dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
  1507. spin_lock_irqsave(&netcp->lock, flags);
  1508. for_each_module(netcp, intf_modpriv) {
  1509. module = intf_modpriv->netcp_module;
  1510. if (module->del_vid) {
  1511. err = module->del_vid(intf_modpriv->module_priv, vid);
  1512. if (err != 0) {
  1513. dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
  1514. vid);
  1515. break;
  1516. }
  1517. }
  1518. }
  1519. spin_unlock_irqrestore(&netcp->lock, flags);
  1520. return err;
  1521. }
  1522. static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
  1523. void *accel_priv,
  1524. select_queue_fallback_t fallback)
  1525. {
  1526. return 0;
  1527. }
  1528. static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
  1529. {
  1530. int i;
  1531. /* setup tc must be called under rtnl lock */
  1532. ASSERT_RTNL();
  1533. /* Sanity-check the number of traffic classes requested */
  1534. if ((dev->real_num_tx_queues <= 1) ||
  1535. (dev->real_num_tx_queues < num_tc))
  1536. return -EINVAL;
  1537. /* Configure traffic class to queue mappings */
  1538. if (num_tc) {
  1539. netdev_set_num_tc(dev, num_tc);
  1540. for (i = 0; i < num_tc; i++)
  1541. netdev_set_tc_queue(dev, i, 1, i);
  1542. } else {
  1543. netdev_reset_tc(dev);
  1544. }
  1545. return 0;
  1546. }
  1547. static const struct net_device_ops netcp_netdev_ops = {
  1548. .ndo_open = netcp_ndo_open,
  1549. .ndo_stop = netcp_ndo_stop,
  1550. .ndo_start_xmit = netcp_ndo_start_xmit,
  1551. .ndo_set_rx_mode = netcp_set_rx_mode,
  1552. .ndo_do_ioctl = netcp_ndo_ioctl,
  1553. .ndo_change_mtu = netcp_ndo_change_mtu,
  1554. .ndo_set_mac_address = eth_mac_addr,
  1555. .ndo_validate_addr = eth_validate_addr,
  1556. .ndo_vlan_rx_add_vid = netcp_rx_add_vid,
  1557. .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
  1558. .ndo_tx_timeout = netcp_ndo_tx_timeout,
  1559. .ndo_select_queue = netcp_select_queue,
  1560. .ndo_setup_tc = netcp_setup_tc,
  1561. };
  1562. static int netcp_create_interface(struct netcp_device *netcp_device,
  1563. struct device_node *node_interface)
  1564. {
  1565. struct device *dev = netcp_device->device;
  1566. struct device_node *node = dev->of_node;
  1567. struct netcp_intf *netcp;
  1568. struct net_device *ndev;
  1569. resource_size_t size;
  1570. struct resource res;
  1571. void __iomem *efuse = NULL;
  1572. u32 efuse_mac = 0;
  1573. const void *mac_addr;
  1574. u8 efuse_mac_addr[6];
  1575. u32 temp[2];
  1576. int ret = 0;
  1577. ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
  1578. if (!ndev) {
  1579. dev_err(dev, "Error allocating netdev\n");
  1580. return -ENOMEM;
  1581. }
  1582. ndev->features |= NETIF_F_SG;
  1583. ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  1584. ndev->hw_features = ndev->features;
  1585. ndev->vlan_features |= NETIF_F_SG;
  1586. netcp = netdev_priv(ndev);
  1587. spin_lock_init(&netcp->lock);
  1588. INIT_LIST_HEAD(&netcp->module_head);
  1589. INIT_LIST_HEAD(&netcp->txhook_list_head);
  1590. INIT_LIST_HEAD(&netcp->rxhook_list_head);
  1591. INIT_LIST_HEAD(&netcp->addr_list);
  1592. netcp->netcp_device = netcp_device;
  1593. netcp->dev = netcp_device->device;
  1594. netcp->ndev = ndev;
  1595. netcp->ndev_dev = &ndev->dev;
  1596. netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
  1597. netcp->tx_pause_threshold = MAX_SKB_FRAGS;
  1598. netcp->tx_resume_threshold = netcp->tx_pause_threshold;
  1599. netcp->node_interface = node_interface;
  1600. ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
  1601. if (efuse_mac) {
  1602. if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
  1603. dev_err(dev, "could not find efuse-mac reg resource\n");
  1604. ret = -ENODEV;
  1605. goto quit;
  1606. }
  1607. size = resource_size(&res);
  1608. if (!devm_request_mem_region(dev, res.start, size,
  1609. dev_name(dev))) {
  1610. dev_err(dev, "could not reserve resource\n");
  1611. ret = -ENOMEM;
  1612. goto quit;
  1613. }
  1614. efuse = devm_ioremap_nocache(dev, res.start, size);
  1615. if (!efuse) {
  1616. dev_err(dev, "could not map resource\n");
  1617. devm_release_mem_region(dev, res.start, size);
  1618. ret = -ENOMEM;
  1619. goto quit;
  1620. }
  1621. emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
  1622. if (is_valid_ether_addr(efuse_mac_addr))
  1623. ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
  1624. else
  1625. random_ether_addr(ndev->dev_addr);
  1626. devm_iounmap(dev, efuse);
  1627. devm_release_mem_region(dev, res.start, size);
  1628. } else {
  1629. mac_addr = of_get_mac_address(node_interface);
  1630. if (mac_addr)
  1631. ether_addr_copy(ndev->dev_addr, mac_addr);
  1632. else
  1633. random_ether_addr(ndev->dev_addr);
  1634. }
  1635. ret = of_property_read_string(node_interface, "rx-channel",
  1636. &netcp->dma_chan_name);
  1637. if (ret < 0) {
  1638. dev_err(dev, "missing \"rx-channel\" parameter\n");
  1639. ret = -ENODEV;
  1640. goto quit;
  1641. }
  1642. ret = of_property_read_u32(node_interface, "rx-queue",
  1643. &netcp->rx_queue_id);
  1644. if (ret < 0) {
  1645. dev_warn(dev, "missing \"rx-queue\" parameter\n");
  1646. netcp->rx_queue_id = KNAV_QUEUE_QPEND;
  1647. }
  1648. ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
  1649. netcp->rx_queue_depths,
  1650. KNAV_DMA_FDQ_PER_CHAN);
  1651. if (ret < 0) {
  1652. dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
  1653. netcp->rx_queue_depths[0] = 128;
  1654. }
  1655. ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
  1656. if (ret < 0) {
  1657. dev_err(dev, "missing \"rx-pool\" parameter\n");
  1658. ret = -ENODEV;
  1659. goto quit;
  1660. }
  1661. netcp->rx_pool_size = temp[0];
  1662. netcp->rx_pool_region_id = temp[1];
  1663. ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
  1664. if (ret < 0) {
  1665. dev_err(dev, "missing \"tx-pool\" parameter\n");
  1666. ret = -ENODEV;
  1667. goto quit;
  1668. }
  1669. netcp->tx_pool_size = temp[0];
  1670. netcp->tx_pool_region_id = temp[1];
  1671. if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
  1672. dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
  1673. MAX_SKB_FRAGS);
  1674. ret = -ENODEV;
  1675. goto quit;
  1676. }
  1677. ret = of_property_read_u32(node_interface, "tx-completion-queue",
  1678. &netcp->tx_compl_qid);
  1679. if (ret < 0) {
  1680. dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
  1681. netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
  1682. }
  1683. /* NAPI register */
  1684. netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
  1685. netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
  1686. /* Register the network device */
  1687. ndev->dev_id = 0;
  1688. ndev->watchdog_timeo = NETCP_TX_TIMEOUT;
  1689. ndev->netdev_ops = &netcp_netdev_ops;
  1690. SET_NETDEV_DEV(ndev, dev);
  1691. list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
  1692. return 0;
  1693. quit:
  1694. free_netdev(ndev);
  1695. return ret;
  1696. }
  1697. static void netcp_delete_interface(struct netcp_device *netcp_device,
  1698. struct net_device *ndev)
  1699. {
  1700. struct netcp_intf_modpriv *intf_modpriv, *tmp;
  1701. struct netcp_intf *netcp = netdev_priv(ndev);
  1702. struct netcp_module *module;
  1703. dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
  1704. ndev->name);
  1705. /* Notify each of the modules that the interface is going away */
  1706. list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
  1707. intf_list) {
  1708. module = intf_modpriv->netcp_module;
  1709. dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
  1710. module->name);
  1711. if (module->release)
  1712. module->release(intf_modpriv->module_priv);
  1713. list_del(&intf_modpriv->intf_list);
  1714. kfree(intf_modpriv);
  1715. }
  1716. WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
  1717. ndev->name);
  1718. list_del(&netcp->interface_list);
  1719. of_node_put(netcp->node_interface);
  1720. unregister_netdev(ndev);
  1721. netif_napi_del(&netcp->rx_napi);
  1722. free_netdev(ndev);
  1723. }
  1724. static int netcp_probe(struct platform_device *pdev)
  1725. {
  1726. struct device_node *node = pdev->dev.of_node;
  1727. struct netcp_intf *netcp_intf, *netcp_tmp;
  1728. struct device_node *child, *interfaces;
  1729. struct netcp_device *netcp_device;
  1730. struct device *dev = &pdev->dev;
  1731. int ret;
  1732. if (!node) {
  1733. dev_err(dev, "could not find device info\n");
  1734. return -ENODEV;
  1735. }
  1736. /* Allocate a new NETCP device instance */
  1737. netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
  1738. if (!netcp_device)
  1739. return -ENOMEM;
  1740. pm_runtime_enable(&pdev->dev);
  1741. ret = pm_runtime_get_sync(&pdev->dev);
  1742. if (ret < 0) {
  1743. dev_err(dev, "Failed to enable NETCP power-domain\n");
  1744. pm_runtime_disable(&pdev->dev);
  1745. return ret;
  1746. }
  1747. /* Initialize the NETCP device instance */
  1748. INIT_LIST_HEAD(&netcp_device->interface_head);
  1749. INIT_LIST_HEAD(&netcp_device->modpriv_head);
  1750. netcp_device->device = dev;
  1751. platform_set_drvdata(pdev, netcp_device);
  1752. /* create interfaces */
  1753. interfaces = of_get_child_by_name(node, "netcp-interfaces");
  1754. if (!interfaces) {
  1755. dev_err(dev, "could not find netcp-interfaces node\n");
  1756. ret = -ENODEV;
  1757. goto probe_quit;
  1758. }
  1759. for_each_available_child_of_node(interfaces, child) {
  1760. ret = netcp_create_interface(netcp_device, child);
  1761. if (ret) {
  1762. dev_err(dev, "could not create interface(%s)\n",
  1763. child->name);
  1764. goto probe_quit_interface;
  1765. }
  1766. }
  1767. /* Add the device instance to the list */
  1768. list_add_tail(&netcp_device->device_list, &netcp_devices);
  1769. return 0;
  1770. probe_quit_interface:
  1771. list_for_each_entry_safe(netcp_intf, netcp_tmp,
  1772. &netcp_device->interface_head,
  1773. interface_list) {
  1774. netcp_delete_interface(netcp_device, netcp_intf->ndev);
  1775. }
  1776. probe_quit:
  1777. pm_runtime_put_sync(&pdev->dev);
  1778. pm_runtime_disable(&pdev->dev);
  1779. platform_set_drvdata(pdev, NULL);
  1780. return ret;
  1781. }
  1782. static int netcp_remove(struct platform_device *pdev)
  1783. {
  1784. struct netcp_device *netcp_device = platform_get_drvdata(pdev);
  1785. struct netcp_intf *netcp_intf, *netcp_tmp;
  1786. struct netcp_inst_modpriv *inst_modpriv, *tmp;
  1787. struct netcp_module *module;
  1788. list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
  1789. inst_list) {
  1790. module = inst_modpriv->netcp_module;
  1791. dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
  1792. module->remove(netcp_device, inst_modpriv->module_priv);
  1793. list_del(&inst_modpriv->inst_list);
  1794. kfree(inst_modpriv);
  1795. }
  1796. /* now that all modules are removed, clean up the interfaces */
  1797. list_for_each_entry_safe(netcp_intf, netcp_tmp,
  1798. &netcp_device->interface_head,
  1799. interface_list) {
  1800. netcp_delete_interface(netcp_device, netcp_intf->ndev);
  1801. }
  1802. WARN(!list_empty(&netcp_device->interface_head),
  1803. "%s interface list not empty!\n", pdev->name);
  1804. pm_runtime_put_sync(&pdev->dev);
  1805. pm_runtime_disable(&pdev->dev);
  1806. platform_set_drvdata(pdev, NULL);
  1807. return 0;
  1808. }
  1809. static const struct of_device_id of_match[] = {
  1810. { .compatible = "ti,netcp-1.0", },
  1811. {},
  1812. };
  1813. MODULE_DEVICE_TABLE(of, of_match);
  1814. static struct platform_driver netcp_driver = {
  1815. .driver = {
  1816. .name = "netcp-1.0",
  1817. .of_match_table = of_match,
  1818. },
  1819. .probe = netcp_probe,
  1820. .remove = netcp_remove,
  1821. };
  1822. module_platform_driver(netcp_driver);
  1823. MODULE_LICENSE("GPL v2");
  1824. MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
  1825. MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");