xilinx_can.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404
  1. /* Xilinx CAN device driver
  2. *
  3. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  4. * Copyright (C) 2009 PetaLogix. All rights reserved.
  5. * Copyright (C) 2017 Sandvik Mining and Construction Oy
  6. *
  7. * Description:
  8. * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
  9. * This program is free software: you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation, either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/io.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/of.h>
  28. #include <linux/of_device.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/string.h>
  33. #include <linux/types.h>
  34. #include <linux/can/dev.h>
  35. #include <linux/can/error.h>
  36. #include <linux/can/led.h>
  37. #define DRIVER_NAME "xilinx_can"
  38. /* CAN registers set */
  39. enum xcan_reg {
  40. XCAN_SRR_OFFSET = 0x00, /* Software reset */
  41. XCAN_MSR_OFFSET = 0x04, /* Mode select */
  42. XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
  43. XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
  44. XCAN_ECR_OFFSET = 0x10, /* Error counter */
  45. XCAN_ESR_OFFSET = 0x14, /* Error status */
  46. XCAN_SR_OFFSET = 0x18, /* Status */
  47. XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
  48. XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
  49. XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
  50. XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
  51. XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
  52. XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
  53. XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
  54. XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
  55. XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
  56. XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
  57. XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
  58. };
  59. /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
  60. #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
  61. #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
  62. #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
  63. #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
  64. #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
  65. #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
  66. #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
  67. #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
  68. #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
  69. #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
  70. #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
  71. #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
  72. #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
  73. #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
  74. #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
  75. #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
  76. #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
  77. #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
  78. #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
  79. #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
  80. #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
  81. #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
  82. #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
  83. #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
  84. #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
  85. #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
  86. #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
  87. #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
  88. #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
  89. #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
  90. #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
  91. #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
  92. #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
  93. #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
  94. #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
  95. #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
  96. #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
  97. #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
  98. #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
  99. XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
  100. XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
  101. XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
  102. /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
  103. #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
  104. #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
  105. #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
  106. #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
  107. #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
  108. #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
  109. /* CAN frame length constants */
  110. #define XCAN_FRAME_MAX_DATA_LEN 8
  111. #define XCAN_TIMEOUT (1 * HZ)
  112. /**
  113. * struct xcan_priv - This definition define CAN driver instance
  114. * @can: CAN private data structure.
  115. * @tx_lock: Lock for synchronizing TX interrupt handling
  116. * @tx_head: Tx CAN packets ready to send on the queue
  117. * @tx_tail: Tx CAN packets successfully sended on the queue
  118. * @tx_max: Maximum number packets the driver can send
  119. * @napi: NAPI structure
  120. * @read_reg: For reading data from CAN registers
  121. * @write_reg: For writing data to CAN registers
  122. * @dev: Network device data structure
  123. * @reg_base: Ioremapped address to registers
  124. * @irq_flags: For request_irq()
  125. * @bus_clk: Pointer to struct clk
  126. * @can_clk: Pointer to struct clk
  127. */
  128. struct xcan_priv {
  129. struct can_priv can;
  130. spinlock_t tx_lock;
  131. unsigned int tx_head;
  132. unsigned int tx_tail;
  133. unsigned int tx_max;
  134. struct napi_struct napi;
  135. u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
  136. void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
  137. u32 val);
  138. struct net_device *dev;
  139. void __iomem *reg_base;
  140. unsigned long irq_flags;
  141. struct clk *bus_clk;
  142. struct clk *can_clk;
  143. };
  144. /* CAN Bittiming constants as per Xilinx CAN specs */
  145. static const struct can_bittiming_const xcan_bittiming_const = {
  146. .name = DRIVER_NAME,
  147. .tseg1_min = 1,
  148. .tseg1_max = 16,
  149. .tseg2_min = 1,
  150. .tseg2_max = 8,
  151. .sjw_max = 4,
  152. .brp_min = 1,
  153. .brp_max = 256,
  154. .brp_inc = 1,
  155. };
  156. #define XCAN_CAP_WATERMARK 0x0001
  157. struct xcan_devtype_data {
  158. unsigned int caps;
  159. };
  160. /**
  161. * xcan_write_reg_le - Write a value to the device register little endian
  162. * @priv: Driver private data structure
  163. * @reg: Register offset
  164. * @val: Value to write at the Register offset
  165. *
  166. * Write data to the paricular CAN register
  167. */
  168. static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
  169. u32 val)
  170. {
  171. iowrite32(val, priv->reg_base + reg);
  172. }
  173. /**
  174. * xcan_read_reg_le - Read a value from the device register little endian
  175. * @priv: Driver private data structure
  176. * @reg: Register offset
  177. *
  178. * Read data from the particular CAN register
  179. * Return: value read from the CAN register
  180. */
  181. static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
  182. {
  183. return ioread32(priv->reg_base + reg);
  184. }
  185. /**
  186. * xcan_write_reg_be - Write a value to the device register big endian
  187. * @priv: Driver private data structure
  188. * @reg: Register offset
  189. * @val: Value to write at the Register offset
  190. *
  191. * Write data to the paricular CAN register
  192. */
  193. static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
  194. u32 val)
  195. {
  196. iowrite32be(val, priv->reg_base + reg);
  197. }
  198. /**
  199. * xcan_read_reg_be - Read a value from the device register big endian
  200. * @priv: Driver private data structure
  201. * @reg: Register offset
  202. *
  203. * Read data from the particular CAN register
  204. * Return: value read from the CAN register
  205. */
  206. static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
  207. {
  208. return ioread32be(priv->reg_base + reg);
  209. }
  210. /**
  211. * set_reset_mode - Resets the CAN device mode
  212. * @ndev: Pointer to net_device structure
  213. *
  214. * This is the driver reset mode routine.The driver
  215. * enters into configuration mode.
  216. *
  217. * Return: 0 on success and failure value on error
  218. */
  219. static int set_reset_mode(struct net_device *ndev)
  220. {
  221. struct xcan_priv *priv = netdev_priv(ndev);
  222. unsigned long timeout;
  223. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  224. timeout = jiffies + XCAN_TIMEOUT;
  225. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
  226. if (time_after(jiffies, timeout)) {
  227. netdev_warn(ndev, "timed out for config mode\n");
  228. return -ETIMEDOUT;
  229. }
  230. usleep_range(500, 10000);
  231. }
  232. /* reset clears FIFOs */
  233. priv->tx_head = 0;
  234. priv->tx_tail = 0;
  235. return 0;
  236. }
  237. /**
  238. * xcan_set_bittiming - CAN set bit timing routine
  239. * @ndev: Pointer to net_device structure
  240. *
  241. * This is the driver set bittiming routine.
  242. * Return: 0 on success and failure value on error
  243. */
  244. static int xcan_set_bittiming(struct net_device *ndev)
  245. {
  246. struct xcan_priv *priv = netdev_priv(ndev);
  247. struct can_bittiming *bt = &priv->can.bittiming;
  248. u32 btr0, btr1;
  249. u32 is_config_mode;
  250. /* Check whether Xilinx CAN is in configuration mode.
  251. * It cannot set bit timing if Xilinx CAN is not in configuration mode.
  252. */
  253. is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
  254. XCAN_SR_CONFIG_MASK;
  255. if (!is_config_mode) {
  256. netdev_alert(ndev,
  257. "BUG! Cannot set bittiming - CAN is not in config mode\n");
  258. return -EPERM;
  259. }
  260. /* Setting Baud Rate prescalar value in BRPR Register */
  261. btr0 = (bt->brp - 1);
  262. /* Setting Time Segment 1 in BTR Register */
  263. btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
  264. /* Setting Time Segment 2 in BTR Register */
  265. btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
  266. /* Setting Synchronous jump width in BTR Register */
  267. btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
  268. priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
  269. priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
  270. netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
  271. priv->read_reg(priv, XCAN_BRPR_OFFSET),
  272. priv->read_reg(priv, XCAN_BTR_OFFSET));
  273. return 0;
  274. }
  275. /**
  276. * xcan_chip_start - This the drivers start routine
  277. * @ndev: Pointer to net_device structure
  278. *
  279. * This is the drivers start routine.
  280. * Based on the State of the CAN device it puts
  281. * the CAN device into a proper mode.
  282. *
  283. * Return: 0 on success and failure value on error
  284. */
  285. static int xcan_chip_start(struct net_device *ndev)
  286. {
  287. struct xcan_priv *priv = netdev_priv(ndev);
  288. u32 reg_msr, reg_sr_mask;
  289. int err;
  290. unsigned long timeout;
  291. /* Check if it is in reset mode */
  292. err = set_reset_mode(ndev);
  293. if (err < 0)
  294. return err;
  295. err = xcan_set_bittiming(ndev);
  296. if (err < 0)
  297. return err;
  298. /* Enable interrupts */
  299. priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
  300. /* Check whether it is loopback mode or normal mode */
  301. if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
  302. reg_msr = XCAN_MSR_LBACK_MASK;
  303. reg_sr_mask = XCAN_SR_LBACK_MASK;
  304. } else {
  305. reg_msr = 0x0;
  306. reg_sr_mask = XCAN_SR_NORMAL_MASK;
  307. }
  308. priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
  309. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
  310. timeout = jiffies + XCAN_TIMEOUT;
  311. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
  312. if (time_after(jiffies, timeout)) {
  313. netdev_warn(ndev,
  314. "timed out for correct mode\n");
  315. return -ETIMEDOUT;
  316. }
  317. }
  318. netdev_dbg(ndev, "status:#x%08x\n",
  319. priv->read_reg(priv, XCAN_SR_OFFSET));
  320. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  321. return 0;
  322. }
  323. /**
  324. * xcan_do_set_mode - This sets the mode of the driver
  325. * @ndev: Pointer to net_device structure
  326. * @mode: Tells the mode of the driver
  327. *
  328. * This check the drivers state and calls the
  329. * the corresponding modes to set.
  330. *
  331. * Return: 0 on success and failure value on error
  332. */
  333. static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
  334. {
  335. int ret;
  336. switch (mode) {
  337. case CAN_MODE_START:
  338. ret = xcan_chip_start(ndev);
  339. if (ret < 0) {
  340. netdev_err(ndev, "xcan_chip_start failed!\n");
  341. return ret;
  342. }
  343. netif_wake_queue(ndev);
  344. break;
  345. default:
  346. ret = -EOPNOTSUPP;
  347. break;
  348. }
  349. return ret;
  350. }
  351. /**
  352. * xcan_start_xmit - Starts the transmission
  353. * @skb: sk_buff pointer that contains data to be Txed
  354. * @ndev: Pointer to net_device structure
  355. *
  356. * This function is invoked from upper layers to initiate transmission. This
  357. * function uses the next available free txbuff and populates their fields to
  358. * start the transmission.
  359. *
  360. * Return: 0 on success and failure value on error
  361. */
  362. static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  363. {
  364. struct xcan_priv *priv = netdev_priv(ndev);
  365. struct net_device_stats *stats = &ndev->stats;
  366. struct can_frame *cf = (struct can_frame *)skb->data;
  367. u32 id, dlc, data[2] = {0, 0};
  368. unsigned long flags;
  369. if (can_dropped_invalid_skb(ndev, skb))
  370. return NETDEV_TX_OK;
  371. /* Check if the TX buffer is full */
  372. if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
  373. XCAN_SR_TXFLL_MASK)) {
  374. netif_stop_queue(ndev);
  375. netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
  376. return NETDEV_TX_BUSY;
  377. }
  378. /* Watch carefully on the bit sequence */
  379. if (cf->can_id & CAN_EFF_FLAG) {
  380. /* Extended CAN ID format */
  381. id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
  382. XCAN_IDR_ID2_MASK;
  383. id |= (((cf->can_id & CAN_EFF_MASK) >>
  384. (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
  385. XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
  386. /* The substibute remote TX request bit should be "1"
  387. * for extended frames as in the Xilinx CAN datasheet
  388. */
  389. id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
  390. if (cf->can_id & CAN_RTR_FLAG)
  391. /* Extended frames remote TX request */
  392. id |= XCAN_IDR_RTR_MASK;
  393. } else {
  394. /* Standard CAN ID format */
  395. id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
  396. XCAN_IDR_ID1_MASK;
  397. if (cf->can_id & CAN_RTR_FLAG)
  398. /* Standard frames remote TX request */
  399. id |= XCAN_IDR_SRR_MASK;
  400. }
  401. dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
  402. if (cf->can_dlc > 0)
  403. data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
  404. if (cf->can_dlc > 4)
  405. data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
  406. can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
  407. spin_lock_irqsave(&priv->tx_lock, flags);
  408. priv->tx_head++;
  409. /* Write the Frame to Xilinx CAN TX FIFO */
  410. priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
  411. /* If the CAN frame is RTR frame this write triggers tranmission */
  412. priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
  413. if (!(cf->can_id & CAN_RTR_FLAG)) {
  414. priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
  415. /* If the CAN frame is Standard/Extended frame this
  416. * write triggers tranmission
  417. */
  418. priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
  419. stats->tx_bytes += cf->can_dlc;
  420. }
  421. /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
  422. if (priv->tx_max > 1)
  423. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
  424. /* Check if the TX buffer is full */
  425. if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
  426. netif_stop_queue(ndev);
  427. spin_unlock_irqrestore(&priv->tx_lock, flags);
  428. return NETDEV_TX_OK;
  429. }
  430. /**
  431. * xcan_rx - Is called from CAN isr to complete the received
  432. * frame processing
  433. * @ndev: Pointer to net_device structure
  434. *
  435. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  436. * does minimal processing and invokes "netif_receive_skb" to complete further
  437. * processing.
  438. * Return: 1 on success and 0 on failure.
  439. */
  440. static int xcan_rx(struct net_device *ndev)
  441. {
  442. struct xcan_priv *priv = netdev_priv(ndev);
  443. struct net_device_stats *stats = &ndev->stats;
  444. struct can_frame *cf;
  445. struct sk_buff *skb;
  446. u32 id_xcan, dlc, data[2] = {0, 0};
  447. skb = alloc_can_skb(ndev, &cf);
  448. if (unlikely(!skb)) {
  449. stats->rx_dropped++;
  450. return 0;
  451. }
  452. /* Read a frame from Xilinx zynq CANPS */
  453. id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
  454. dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
  455. XCAN_DLCR_DLC_SHIFT;
  456. /* Change Xilinx CAN data length format to socketCAN data format */
  457. cf->can_dlc = get_can_dlc(dlc);
  458. /* Change Xilinx CAN ID format to socketCAN ID format */
  459. if (id_xcan & XCAN_IDR_IDE_MASK) {
  460. /* The received frame is an Extended format frame */
  461. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  462. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  463. XCAN_IDR_ID2_SHIFT;
  464. cf->can_id |= CAN_EFF_FLAG;
  465. if (id_xcan & XCAN_IDR_RTR_MASK)
  466. cf->can_id |= CAN_RTR_FLAG;
  467. } else {
  468. /* The received frame is a standard format frame */
  469. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  470. XCAN_IDR_ID1_SHIFT;
  471. if (id_xcan & XCAN_IDR_SRR_MASK)
  472. cf->can_id |= CAN_RTR_FLAG;
  473. }
  474. /* DW1/DW2 must always be read to remove message from RXFIFO */
  475. data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
  476. data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
  477. if (!(cf->can_id & CAN_RTR_FLAG)) {
  478. /* Change Xilinx CAN data format to socketCAN data format */
  479. if (cf->can_dlc > 0)
  480. *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
  481. if (cf->can_dlc > 4)
  482. *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
  483. }
  484. stats->rx_bytes += cf->can_dlc;
  485. stats->rx_packets++;
  486. netif_receive_skb(skb);
  487. return 1;
  488. }
  489. /**
  490. * xcan_current_error_state - Get current error state from HW
  491. * @ndev: Pointer to net_device structure
  492. *
  493. * Checks the current CAN error state from the HW. Note that this
  494. * only checks for ERROR_PASSIVE and ERROR_WARNING.
  495. *
  496. * Return:
  497. * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
  498. * otherwise.
  499. */
  500. static enum can_state xcan_current_error_state(struct net_device *ndev)
  501. {
  502. struct xcan_priv *priv = netdev_priv(ndev);
  503. u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
  504. if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
  505. return CAN_STATE_ERROR_PASSIVE;
  506. else if (status & XCAN_SR_ERRWRN_MASK)
  507. return CAN_STATE_ERROR_WARNING;
  508. else
  509. return CAN_STATE_ERROR_ACTIVE;
  510. }
  511. /**
  512. * xcan_set_error_state - Set new CAN error state
  513. * @ndev: Pointer to net_device structure
  514. * @new_state: The new CAN state to be set
  515. * @cf: Error frame to be populated or NULL
  516. *
  517. * Set new CAN error state for the device, updating statistics and
  518. * populating the error frame if given.
  519. */
  520. static void xcan_set_error_state(struct net_device *ndev,
  521. enum can_state new_state,
  522. struct can_frame *cf)
  523. {
  524. struct xcan_priv *priv = netdev_priv(ndev);
  525. u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
  526. u32 txerr = ecr & XCAN_ECR_TEC_MASK;
  527. u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
  528. priv->can.state = new_state;
  529. if (cf) {
  530. cf->can_id |= CAN_ERR_CRTL;
  531. cf->data[6] = txerr;
  532. cf->data[7] = rxerr;
  533. }
  534. switch (new_state) {
  535. case CAN_STATE_ERROR_PASSIVE:
  536. priv->can.can_stats.error_passive++;
  537. if (cf)
  538. cf->data[1] = (rxerr > 127) ?
  539. CAN_ERR_CRTL_RX_PASSIVE :
  540. CAN_ERR_CRTL_TX_PASSIVE;
  541. break;
  542. case CAN_STATE_ERROR_WARNING:
  543. priv->can.can_stats.error_warning++;
  544. if (cf)
  545. cf->data[1] |= (txerr > rxerr) ?
  546. CAN_ERR_CRTL_TX_WARNING :
  547. CAN_ERR_CRTL_RX_WARNING;
  548. break;
  549. case CAN_STATE_ERROR_ACTIVE:
  550. if (cf)
  551. cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
  552. break;
  553. default:
  554. /* non-ERROR states are handled elsewhere */
  555. WARN_ON(1);
  556. break;
  557. }
  558. }
  559. /**
  560. * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
  561. * @ndev: Pointer to net_device structure
  562. *
  563. * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
  564. * the performed RX/TX has caused it to drop to a lesser state and set
  565. * the interface state accordingly.
  566. */
  567. static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
  568. {
  569. struct xcan_priv *priv = netdev_priv(ndev);
  570. enum can_state old_state = priv->can.state;
  571. enum can_state new_state;
  572. /* changing error state due to successful frame RX/TX can only
  573. * occur from these states
  574. */
  575. if (old_state != CAN_STATE_ERROR_WARNING &&
  576. old_state != CAN_STATE_ERROR_PASSIVE)
  577. return;
  578. new_state = xcan_current_error_state(ndev);
  579. if (new_state != old_state) {
  580. struct sk_buff *skb;
  581. struct can_frame *cf;
  582. skb = alloc_can_err_skb(ndev, &cf);
  583. xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
  584. if (skb) {
  585. struct net_device_stats *stats = &ndev->stats;
  586. stats->rx_packets++;
  587. stats->rx_bytes += cf->can_dlc;
  588. netif_rx(skb);
  589. }
  590. }
  591. }
  592. /**
  593. * xcan_err_interrupt - error frame Isr
  594. * @ndev: net_device pointer
  595. * @isr: interrupt status register value
  596. *
  597. * This is the CAN error interrupt and it will
  598. * check the the type of error and forward the error
  599. * frame to upper layers.
  600. */
  601. static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
  602. {
  603. struct xcan_priv *priv = netdev_priv(ndev);
  604. struct net_device_stats *stats = &ndev->stats;
  605. struct can_frame *cf;
  606. struct sk_buff *skb;
  607. u32 err_status;
  608. skb = alloc_can_err_skb(ndev, &cf);
  609. err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
  610. priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
  611. if (isr & XCAN_IXR_BSOFF_MASK) {
  612. priv->can.state = CAN_STATE_BUS_OFF;
  613. priv->can.can_stats.bus_off++;
  614. /* Leave device in Config Mode in bus-off state */
  615. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  616. can_bus_off(ndev);
  617. if (skb)
  618. cf->can_id |= CAN_ERR_BUSOFF;
  619. } else {
  620. enum can_state new_state = xcan_current_error_state(ndev);
  621. xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
  622. }
  623. /* Check for Arbitration lost interrupt */
  624. if (isr & XCAN_IXR_ARBLST_MASK) {
  625. priv->can.can_stats.arbitration_lost++;
  626. if (skb) {
  627. cf->can_id |= CAN_ERR_LOSTARB;
  628. cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
  629. }
  630. }
  631. /* Check for RX FIFO Overflow interrupt */
  632. if (isr & XCAN_IXR_RXOFLW_MASK) {
  633. stats->rx_over_errors++;
  634. stats->rx_errors++;
  635. if (skb) {
  636. cf->can_id |= CAN_ERR_CRTL;
  637. cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
  638. }
  639. }
  640. /* Check for error interrupt */
  641. if (isr & XCAN_IXR_ERROR_MASK) {
  642. if (skb)
  643. cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  644. /* Check for Ack error interrupt */
  645. if (err_status & XCAN_ESR_ACKER_MASK) {
  646. stats->tx_errors++;
  647. if (skb) {
  648. cf->can_id |= CAN_ERR_ACK;
  649. cf->data[3] = CAN_ERR_PROT_LOC_ACK;
  650. }
  651. }
  652. /* Check for Bit error interrupt */
  653. if (err_status & XCAN_ESR_BERR_MASK) {
  654. stats->tx_errors++;
  655. if (skb) {
  656. cf->can_id |= CAN_ERR_PROT;
  657. cf->data[2] = CAN_ERR_PROT_BIT;
  658. }
  659. }
  660. /* Check for Stuff error interrupt */
  661. if (err_status & XCAN_ESR_STER_MASK) {
  662. stats->rx_errors++;
  663. if (skb) {
  664. cf->can_id |= CAN_ERR_PROT;
  665. cf->data[2] = CAN_ERR_PROT_STUFF;
  666. }
  667. }
  668. /* Check for Form error interrupt */
  669. if (err_status & XCAN_ESR_FMER_MASK) {
  670. stats->rx_errors++;
  671. if (skb) {
  672. cf->can_id |= CAN_ERR_PROT;
  673. cf->data[2] = CAN_ERR_PROT_FORM;
  674. }
  675. }
  676. /* Check for CRC error interrupt */
  677. if (err_status & XCAN_ESR_CRCER_MASK) {
  678. stats->rx_errors++;
  679. if (skb) {
  680. cf->can_id |= CAN_ERR_PROT;
  681. cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
  682. }
  683. }
  684. priv->can.can_stats.bus_error++;
  685. }
  686. if (skb) {
  687. stats->rx_packets++;
  688. stats->rx_bytes += cf->can_dlc;
  689. netif_rx(skb);
  690. }
  691. netdev_dbg(ndev, "%s: error status register:0x%x\n",
  692. __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
  693. }
  694. /**
  695. * xcan_state_interrupt - It will check the state of the CAN device
  696. * @ndev: net_device pointer
  697. * @isr: interrupt status register value
  698. *
  699. * This will checks the state of the CAN device
  700. * and puts the device into appropriate state.
  701. */
  702. static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
  703. {
  704. struct xcan_priv *priv = netdev_priv(ndev);
  705. /* Check for Sleep interrupt if set put CAN device in sleep state */
  706. if (isr & XCAN_IXR_SLP_MASK)
  707. priv->can.state = CAN_STATE_SLEEPING;
  708. /* Check for Wake up interrupt if set put CAN device in Active state */
  709. if (isr & XCAN_IXR_WKUP_MASK)
  710. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  711. }
  712. /**
  713. * xcan_rx_poll - Poll routine for rx packets (NAPI)
  714. * @napi: napi structure pointer
  715. * @quota: Max number of rx packets to be processed.
  716. *
  717. * This is the poll routine for rx part.
  718. * It will process the packets maximux quota value.
  719. *
  720. * Return: number of packets received
  721. */
  722. static int xcan_rx_poll(struct napi_struct *napi, int quota)
  723. {
  724. struct net_device *ndev = napi->dev;
  725. struct xcan_priv *priv = netdev_priv(ndev);
  726. u32 isr, ier;
  727. int work_done = 0;
  728. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  729. while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
  730. work_done += xcan_rx(ndev);
  731. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
  732. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  733. }
  734. if (work_done) {
  735. can_led_event(ndev, CAN_LED_EVENT_RX);
  736. xcan_update_error_state_after_rxtx(ndev);
  737. }
  738. if (work_done < quota) {
  739. napi_complete(napi);
  740. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  741. ier |= XCAN_IXR_RXNEMP_MASK;
  742. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  743. }
  744. return work_done;
  745. }
  746. /**
  747. * xcan_tx_interrupt - Tx Done Isr
  748. * @ndev: net_device pointer
  749. * @isr: Interrupt status register value
  750. */
  751. static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
  752. {
  753. struct xcan_priv *priv = netdev_priv(ndev);
  754. struct net_device_stats *stats = &ndev->stats;
  755. unsigned int frames_in_fifo;
  756. int frames_sent = 1; /* TXOK => at least 1 frame was sent */
  757. unsigned long flags;
  758. int retries = 0;
  759. /* Synchronize with xmit as we need to know the exact number
  760. * of frames in the FIFO to stay in sync due to the TXFEMP
  761. * handling.
  762. * This also prevents a race between netif_wake_queue() and
  763. * netif_stop_queue().
  764. */
  765. spin_lock_irqsave(&priv->tx_lock, flags);
  766. frames_in_fifo = priv->tx_head - priv->tx_tail;
  767. if (WARN_ON_ONCE(frames_in_fifo == 0)) {
  768. /* clear TXOK anyway to avoid getting back here */
  769. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  770. spin_unlock_irqrestore(&priv->tx_lock, flags);
  771. return;
  772. }
  773. /* Check if 2 frames were sent (TXOK only means that at least 1
  774. * frame was sent).
  775. */
  776. if (frames_in_fifo > 1) {
  777. WARN_ON(frames_in_fifo > priv->tx_max);
  778. /* Synchronize TXOK and isr so that after the loop:
  779. * (1) isr variable is up-to-date at least up to TXOK clear
  780. * time. This avoids us clearing a TXOK of a second frame
  781. * but not noticing that the FIFO is now empty and thus
  782. * marking only a single frame as sent.
  783. * (2) No TXOK is left. Having one could mean leaving a
  784. * stray TXOK as we might process the associated frame
  785. * via TXFEMP handling as we read TXFEMP *after* TXOK
  786. * clear to satisfy (1).
  787. */
  788. while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
  789. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  790. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  791. }
  792. if (isr & XCAN_IXR_TXFEMP_MASK) {
  793. /* nothing in FIFO anymore */
  794. frames_sent = frames_in_fifo;
  795. }
  796. } else {
  797. /* single frame in fifo, just clear TXOK */
  798. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  799. }
  800. while (frames_sent--) {
  801. can_get_echo_skb(ndev, priv->tx_tail %
  802. priv->tx_max);
  803. priv->tx_tail++;
  804. stats->tx_packets++;
  805. }
  806. netif_wake_queue(ndev);
  807. spin_unlock_irqrestore(&priv->tx_lock, flags);
  808. can_led_event(ndev, CAN_LED_EVENT_TX);
  809. xcan_update_error_state_after_rxtx(ndev);
  810. }
  811. /**
  812. * xcan_interrupt - CAN Isr
  813. * @irq: irq number
  814. * @dev_id: device id poniter
  815. *
  816. * This is the xilinx CAN Isr. It checks for the type of interrupt
  817. * and invokes the corresponding ISR.
  818. *
  819. * Return:
  820. * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
  821. */
  822. static irqreturn_t xcan_interrupt(int irq, void *dev_id)
  823. {
  824. struct net_device *ndev = (struct net_device *)dev_id;
  825. struct xcan_priv *priv = netdev_priv(ndev);
  826. u32 isr, ier;
  827. u32 isr_errors;
  828. /* Get the interrupt status from Xilinx CAN */
  829. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  830. if (!isr)
  831. return IRQ_NONE;
  832. /* Check for the type of interrupt and Processing it */
  833. if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
  834. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
  835. XCAN_IXR_WKUP_MASK));
  836. xcan_state_interrupt(ndev, isr);
  837. }
  838. /* Check for Tx interrupt and Processing it */
  839. if (isr & XCAN_IXR_TXOK_MASK)
  840. xcan_tx_interrupt(ndev, isr);
  841. /* Check for the type of error interrupt and Processing it */
  842. isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  843. XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
  844. if (isr_errors) {
  845. priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
  846. xcan_err_interrupt(ndev, isr);
  847. }
  848. /* Check for the type of receive interrupt and Processing it */
  849. if (isr & XCAN_IXR_RXNEMP_MASK) {
  850. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  851. ier &= ~XCAN_IXR_RXNEMP_MASK;
  852. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  853. napi_schedule(&priv->napi);
  854. }
  855. return IRQ_HANDLED;
  856. }
  857. /**
  858. * xcan_chip_stop - Driver stop routine
  859. * @ndev: Pointer to net_device structure
  860. *
  861. * This is the drivers stop routine. It will disable the
  862. * interrupts and put the device into configuration mode.
  863. */
  864. static void xcan_chip_stop(struct net_device *ndev)
  865. {
  866. struct xcan_priv *priv = netdev_priv(ndev);
  867. u32 ier;
  868. /* Disable interrupts and leave the can in configuration mode */
  869. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  870. ier &= ~XCAN_INTR_ALL;
  871. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  872. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  873. priv->can.state = CAN_STATE_STOPPED;
  874. }
  875. /**
  876. * xcan_open - Driver open routine
  877. * @ndev: Pointer to net_device structure
  878. *
  879. * This is the driver open routine.
  880. * Return: 0 on success and failure value on error
  881. */
  882. static int xcan_open(struct net_device *ndev)
  883. {
  884. struct xcan_priv *priv = netdev_priv(ndev);
  885. int ret;
  886. ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
  887. ndev->name, ndev);
  888. if (ret < 0) {
  889. netdev_err(ndev, "irq allocation for CAN failed\n");
  890. goto err;
  891. }
  892. ret = clk_prepare_enable(priv->can_clk);
  893. if (ret) {
  894. netdev_err(ndev, "unable to enable device clock\n");
  895. goto err_irq;
  896. }
  897. ret = clk_prepare_enable(priv->bus_clk);
  898. if (ret) {
  899. netdev_err(ndev, "unable to enable bus clock\n");
  900. goto err_can_clk;
  901. }
  902. /* Set chip into reset mode */
  903. ret = set_reset_mode(ndev);
  904. if (ret < 0) {
  905. netdev_err(ndev, "mode resetting failed!\n");
  906. goto err_bus_clk;
  907. }
  908. /* Common open */
  909. ret = open_candev(ndev);
  910. if (ret)
  911. goto err_bus_clk;
  912. ret = xcan_chip_start(ndev);
  913. if (ret < 0) {
  914. netdev_err(ndev, "xcan_chip_start failed!\n");
  915. goto err_candev;
  916. }
  917. can_led_event(ndev, CAN_LED_EVENT_OPEN);
  918. napi_enable(&priv->napi);
  919. netif_start_queue(ndev);
  920. return 0;
  921. err_candev:
  922. close_candev(ndev);
  923. err_bus_clk:
  924. clk_disable_unprepare(priv->bus_clk);
  925. err_can_clk:
  926. clk_disable_unprepare(priv->can_clk);
  927. err_irq:
  928. free_irq(ndev->irq, ndev);
  929. err:
  930. return ret;
  931. }
  932. /**
  933. * xcan_close - Driver close routine
  934. * @ndev: Pointer to net_device structure
  935. *
  936. * Return: 0 always
  937. */
  938. static int xcan_close(struct net_device *ndev)
  939. {
  940. struct xcan_priv *priv = netdev_priv(ndev);
  941. netif_stop_queue(ndev);
  942. napi_disable(&priv->napi);
  943. xcan_chip_stop(ndev);
  944. clk_disable_unprepare(priv->bus_clk);
  945. clk_disable_unprepare(priv->can_clk);
  946. free_irq(ndev->irq, ndev);
  947. close_candev(ndev);
  948. can_led_event(ndev, CAN_LED_EVENT_STOP);
  949. return 0;
  950. }
  951. /**
  952. * xcan_get_berr_counter - error counter routine
  953. * @ndev: Pointer to net_device structure
  954. * @bec: Pointer to can_berr_counter structure
  955. *
  956. * This is the driver error counter routine.
  957. * Return: 0 on success and failure value on error
  958. */
  959. static int xcan_get_berr_counter(const struct net_device *ndev,
  960. struct can_berr_counter *bec)
  961. {
  962. struct xcan_priv *priv = netdev_priv(ndev);
  963. int ret;
  964. ret = clk_prepare_enable(priv->can_clk);
  965. if (ret)
  966. goto err;
  967. ret = clk_prepare_enable(priv->bus_clk);
  968. if (ret)
  969. goto err_clk;
  970. bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  971. bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  972. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  973. clk_disable_unprepare(priv->bus_clk);
  974. clk_disable_unprepare(priv->can_clk);
  975. return 0;
  976. err_clk:
  977. clk_disable_unprepare(priv->can_clk);
  978. err:
  979. return ret;
  980. }
  981. static const struct net_device_ops xcan_netdev_ops = {
  982. .ndo_open = xcan_open,
  983. .ndo_stop = xcan_close,
  984. .ndo_start_xmit = xcan_start_xmit,
  985. .ndo_change_mtu = can_change_mtu,
  986. };
  987. /**
  988. * xcan_suspend - Suspend method for the driver
  989. * @dev: Address of the platform_device structure
  990. *
  991. * Put the driver into low power mode.
  992. * Return: 0 always
  993. */
  994. static int __maybe_unused xcan_suspend(struct device *dev)
  995. {
  996. struct platform_device *pdev = dev_get_drvdata(dev);
  997. struct net_device *ndev = platform_get_drvdata(pdev);
  998. struct xcan_priv *priv = netdev_priv(ndev);
  999. if (netif_running(ndev)) {
  1000. netif_stop_queue(ndev);
  1001. netif_device_detach(ndev);
  1002. }
  1003. priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
  1004. priv->can.state = CAN_STATE_SLEEPING;
  1005. clk_disable(priv->bus_clk);
  1006. clk_disable(priv->can_clk);
  1007. return 0;
  1008. }
  1009. /**
  1010. * xcan_resume - Resume from suspend
  1011. * @dev: Address of the platformdevice structure
  1012. *
  1013. * Resume operation after suspend.
  1014. * Return: 0 on success and failure value on error
  1015. */
  1016. static int __maybe_unused xcan_resume(struct device *dev)
  1017. {
  1018. struct platform_device *pdev = dev_get_drvdata(dev);
  1019. struct net_device *ndev = platform_get_drvdata(pdev);
  1020. struct xcan_priv *priv = netdev_priv(ndev);
  1021. int ret;
  1022. ret = clk_enable(priv->bus_clk);
  1023. if (ret) {
  1024. dev_err(dev, "Cannot enable clock.\n");
  1025. return ret;
  1026. }
  1027. ret = clk_enable(priv->can_clk);
  1028. if (ret) {
  1029. dev_err(dev, "Cannot enable clock.\n");
  1030. clk_disable_unprepare(priv->bus_clk);
  1031. return ret;
  1032. }
  1033. priv->write_reg(priv, XCAN_MSR_OFFSET, 0);
  1034. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
  1035. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  1036. if (netif_running(ndev)) {
  1037. netif_device_attach(ndev);
  1038. netif_start_queue(ndev);
  1039. }
  1040. return 0;
  1041. }
  1042. static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
  1043. static const struct xcan_devtype_data xcan_zynq_data = {
  1044. .caps = XCAN_CAP_WATERMARK,
  1045. };
  1046. /* Match table for OF platform binding */
  1047. static const struct of_device_id xcan_of_match[] = {
  1048. { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
  1049. { .compatible = "xlnx,axi-can-1.00.a", },
  1050. { /* end of list */ },
  1051. };
  1052. MODULE_DEVICE_TABLE(of, xcan_of_match);
  1053. /**
  1054. * xcan_probe - Platform registration call
  1055. * @pdev: Handle to the platform device structure
  1056. *
  1057. * This function does all the memory allocation and registration for the CAN
  1058. * device.
  1059. *
  1060. * Return: 0 on success and failure value on error
  1061. */
  1062. static int xcan_probe(struct platform_device *pdev)
  1063. {
  1064. struct resource *res; /* IO mem resources */
  1065. struct net_device *ndev;
  1066. struct xcan_priv *priv;
  1067. const struct of_device_id *of_id;
  1068. int caps = 0;
  1069. void __iomem *addr;
  1070. int ret, rx_max, tx_max, tx_fifo_depth;
  1071. /* Get the virtual base address for the device */
  1072. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1073. addr = devm_ioremap_resource(&pdev->dev, res);
  1074. if (IS_ERR(addr)) {
  1075. ret = PTR_ERR(addr);
  1076. goto err;
  1077. }
  1078. ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
  1079. &tx_fifo_depth);
  1080. if (ret < 0)
  1081. goto err;
  1082. ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
  1083. if (ret < 0)
  1084. goto err;
  1085. of_id = of_match_device(xcan_of_match, &pdev->dev);
  1086. if (of_id) {
  1087. const struct xcan_devtype_data *devtype_data = of_id->data;
  1088. if (devtype_data)
  1089. caps = devtype_data->caps;
  1090. }
  1091. /* There is no way to directly figure out how many frames have been
  1092. * sent when the TXOK interrupt is processed. If watermark programming
  1093. * is supported, we can have 2 frames in the FIFO and use TXFEMP
  1094. * to determine if 1 or 2 frames have been sent.
  1095. * Theoretically we should be able to use TXFWMEMP to determine up
  1096. * to 3 frames, but it seems that after putting a second frame in the
  1097. * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
  1098. * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
  1099. * sent), which is not a sensible state - possibly TXFWMEMP is not
  1100. * completely synchronized with the rest of the bits?
  1101. */
  1102. if (caps & XCAN_CAP_WATERMARK)
  1103. tx_max = min(tx_fifo_depth, 2);
  1104. else
  1105. tx_max = 1;
  1106. /* Create a CAN device instance */
  1107. ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
  1108. if (!ndev)
  1109. return -ENOMEM;
  1110. priv = netdev_priv(ndev);
  1111. priv->dev = ndev;
  1112. priv->can.bittiming_const = &xcan_bittiming_const;
  1113. priv->can.do_set_mode = xcan_do_set_mode;
  1114. priv->can.do_get_berr_counter = xcan_get_berr_counter;
  1115. priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
  1116. CAN_CTRLMODE_BERR_REPORTING;
  1117. priv->reg_base = addr;
  1118. priv->tx_max = tx_max;
  1119. spin_lock_init(&priv->tx_lock);
  1120. /* Get IRQ for the device */
  1121. ndev->irq = platform_get_irq(pdev, 0);
  1122. ndev->flags |= IFF_ECHO; /* We support local echo */
  1123. platform_set_drvdata(pdev, ndev);
  1124. SET_NETDEV_DEV(ndev, &pdev->dev);
  1125. ndev->netdev_ops = &xcan_netdev_ops;
  1126. /* Getting the CAN can_clk info */
  1127. priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
  1128. if (IS_ERR(priv->can_clk)) {
  1129. dev_err(&pdev->dev, "Device clock not found.\n");
  1130. ret = PTR_ERR(priv->can_clk);
  1131. goto err_free;
  1132. }
  1133. /* Check for type of CAN device */
  1134. if (of_device_is_compatible(pdev->dev.of_node,
  1135. "xlnx,zynq-can-1.0")) {
  1136. priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
  1137. if (IS_ERR(priv->bus_clk)) {
  1138. dev_err(&pdev->dev, "bus clock not found\n");
  1139. ret = PTR_ERR(priv->bus_clk);
  1140. goto err_free;
  1141. }
  1142. } else {
  1143. priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
  1144. if (IS_ERR(priv->bus_clk)) {
  1145. dev_err(&pdev->dev, "bus clock not found\n");
  1146. ret = PTR_ERR(priv->bus_clk);
  1147. goto err_free;
  1148. }
  1149. }
  1150. ret = clk_prepare_enable(priv->can_clk);
  1151. if (ret) {
  1152. dev_err(&pdev->dev, "unable to enable device clock\n");
  1153. goto err_free;
  1154. }
  1155. ret = clk_prepare_enable(priv->bus_clk);
  1156. if (ret) {
  1157. dev_err(&pdev->dev, "unable to enable bus clock\n");
  1158. goto err_unprepare_disable_dev;
  1159. }
  1160. priv->write_reg = xcan_write_reg_le;
  1161. priv->read_reg = xcan_read_reg_le;
  1162. if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
  1163. priv->write_reg = xcan_write_reg_be;
  1164. priv->read_reg = xcan_read_reg_be;
  1165. }
  1166. priv->can.clock.freq = clk_get_rate(priv->can_clk);
  1167. netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
  1168. ret = register_candev(ndev);
  1169. if (ret) {
  1170. dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
  1171. goto err_unprepare_disable_busclk;
  1172. }
  1173. devm_can_led_init(ndev);
  1174. clk_disable_unprepare(priv->bus_clk);
  1175. clk_disable_unprepare(priv->can_clk);
  1176. netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
  1177. priv->reg_base, ndev->irq, priv->can.clock.freq,
  1178. tx_fifo_depth, priv->tx_max);
  1179. return 0;
  1180. err_unprepare_disable_busclk:
  1181. clk_disable_unprepare(priv->bus_clk);
  1182. err_unprepare_disable_dev:
  1183. clk_disable_unprepare(priv->can_clk);
  1184. err_free:
  1185. free_candev(ndev);
  1186. err:
  1187. return ret;
  1188. }
  1189. /**
  1190. * xcan_remove - Unregister the device after releasing the resources
  1191. * @pdev: Handle to the platform device structure
  1192. *
  1193. * This function frees all the resources allocated to the device.
  1194. * Return: 0 always
  1195. */
  1196. static int xcan_remove(struct platform_device *pdev)
  1197. {
  1198. struct net_device *ndev = platform_get_drvdata(pdev);
  1199. struct xcan_priv *priv = netdev_priv(ndev);
  1200. if (set_reset_mode(ndev) < 0)
  1201. netdev_err(ndev, "mode resetting failed!\n");
  1202. unregister_candev(ndev);
  1203. netif_napi_del(&priv->napi);
  1204. free_candev(ndev);
  1205. return 0;
  1206. }
  1207. static struct platform_driver xcan_driver = {
  1208. .probe = xcan_probe,
  1209. .remove = xcan_remove,
  1210. .driver = {
  1211. .name = DRIVER_NAME,
  1212. .pm = &xcan_dev_pm_ops,
  1213. .of_match_table = xcan_of_match,
  1214. },
  1215. };
  1216. module_platform_driver(xcan_driver);
  1217. MODULE_LICENSE("GPL");
  1218. MODULE_AUTHOR("Xilinx Inc");
  1219. MODULE_DESCRIPTION("Xilinx CAN interface");