sata_highbank.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. /*
  2. * Calxeda Highbank AHCI SATA platform driver
  3. * Copyright 2012 Calxeda, Inc.
  4. *
  5. * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/gfp.h>
  21. #include <linux/module.h>
  22. #include <linux/types.h>
  23. #include <linux/err.h>
  24. #include <linux/io.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/device.h>
  27. #include <linux/of_device.h>
  28. #include <linux/of_address.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/libata.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/delay.h>
  33. #include <linux/export.h>
  34. #include <linux/gpio.h>
  35. #include <linux/of_gpio.h>
  36. #include "ahci.h"
  37. #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
  38. #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
  39. #define SERDES_CR_CTL 0x80a0
  40. #define SERDES_CR_ADDR 0x80a1
  41. #define SERDES_CR_DATA 0x80a2
  42. #define CR_BUSY 0x0001
  43. #define CR_START 0x0001
  44. #define CR_WR_RDN 0x0002
  45. #define CPHY_TX_INPUT_STS 0x2001
  46. #define CPHY_RX_INPUT_STS 0x2002
  47. #define CPHY_SATA_TX_OVERRIDE 0x8000
  48. #define CPHY_SATA_RX_OVERRIDE 0x4000
  49. #define CPHY_TX_OVERRIDE 0x2004
  50. #define CPHY_RX_OVERRIDE 0x2005
  51. #define SPHY_LANE 0x100
  52. #define SPHY_HALF_RATE 0x0001
  53. #define CPHY_SATA_DPLL_MODE 0x0700
  54. #define CPHY_SATA_DPLL_SHIFT 8
  55. #define CPHY_SATA_DPLL_RESET (1 << 11)
  56. #define CPHY_SATA_TX_ATTEN 0x1c00
  57. #define CPHY_SATA_TX_ATTEN_SHIFT 10
  58. #define CPHY_PHY_COUNT 6
  59. #define CPHY_LANE_COUNT 4
  60. #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
  61. static DEFINE_SPINLOCK(cphy_lock);
  62. /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
  63. * sata ports to their phys and then to their lanes within the phys
  64. */
  65. struct phy_lane_info {
  66. void __iomem *phy_base;
  67. u8 lane_mapping;
  68. u8 phy_devs;
  69. u8 tx_atten;
  70. };
  71. static struct phy_lane_info port_data[CPHY_PORT_COUNT];
  72. static DEFINE_SPINLOCK(sgpio_lock);
  73. #define SCLOCK 0
  74. #define SLOAD 1
  75. #define SDATA 2
  76. #define SGPIO_PINS 3
  77. #define SGPIO_PORTS 8
  78. struct ecx_plat_data {
  79. u32 n_ports;
  80. /* number of extra clocks that the SGPIO PIC controller expects */
  81. u32 pre_clocks;
  82. u32 post_clocks;
  83. unsigned sgpio_gpio[SGPIO_PINS];
  84. u32 sgpio_pattern;
  85. u32 port_to_sgpio[SGPIO_PORTS];
  86. };
  87. #define SGPIO_SIGNALS 3
  88. #define ECX_ACTIVITY_BITS 0x300000
  89. #define ECX_ACTIVITY_SHIFT 0
  90. #define ECX_LOCATE_BITS 0x80000
  91. #define ECX_LOCATE_SHIFT 1
  92. #define ECX_FAULT_BITS 0x400000
  93. #define ECX_FAULT_SHIFT 2
  94. static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
  95. u32 shift)
  96. {
  97. return 1 << (3 * pdata->port_to_sgpio[port] + shift);
  98. }
  99. static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
  100. {
  101. if (state & ECX_ACTIVITY_BITS)
  102. pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
  103. ECX_ACTIVITY_SHIFT);
  104. else
  105. pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
  106. ECX_ACTIVITY_SHIFT);
  107. if (state & ECX_LOCATE_BITS)
  108. pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
  109. ECX_LOCATE_SHIFT);
  110. else
  111. pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
  112. ECX_LOCATE_SHIFT);
  113. if (state & ECX_FAULT_BITS)
  114. pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
  115. ECX_FAULT_SHIFT);
  116. else
  117. pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
  118. ECX_FAULT_SHIFT);
  119. }
  120. /*
  121. * Tell the LED controller that the signal has changed by raising the clock
  122. * line for 50 uS and then lowering it for 50 uS.
  123. */
  124. static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
  125. {
  126. gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
  127. udelay(50);
  128. gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
  129. udelay(50);
  130. }
  131. static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
  132. ssize_t size)
  133. {
  134. struct ahci_host_priv *hpriv = ap->host->private_data;
  135. struct ecx_plat_data *pdata = hpriv->plat_data;
  136. struct ahci_port_priv *pp = ap->private_data;
  137. unsigned long flags;
  138. int pmp, i;
  139. struct ahci_em_priv *emp;
  140. u32 sgpio_out;
  141. /* get the slot number from the message */
  142. pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
  143. if (pmp < EM_MAX_SLOTS)
  144. emp = &pp->em_priv[pmp];
  145. else
  146. return -EINVAL;
  147. if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
  148. return size;
  149. spin_lock_irqsave(&sgpio_lock, flags);
  150. ecx_parse_sgpio(pdata, ap->port_no, state);
  151. sgpio_out = pdata->sgpio_pattern;
  152. for (i = 0; i < pdata->pre_clocks; i++)
  153. ecx_led_cycle_clock(pdata);
  154. gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
  155. ecx_led_cycle_clock(pdata);
  156. gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
  157. /*
  158. * bit-bang out the SGPIO pattern, by consuming a bit and then
  159. * clocking it out.
  160. */
  161. for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
  162. gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
  163. sgpio_out >>= 1;
  164. ecx_led_cycle_clock(pdata);
  165. }
  166. for (i = 0; i < pdata->post_clocks; i++)
  167. ecx_led_cycle_clock(pdata);
  168. /* save off new led state for port/slot */
  169. emp->led_state = state;
  170. spin_unlock_irqrestore(&sgpio_lock, flags);
  171. return size;
  172. }
  173. static void highbank_set_em_messages(struct device *dev,
  174. struct ahci_host_priv *hpriv,
  175. struct ata_port_info *pi)
  176. {
  177. struct device_node *np = dev->of_node;
  178. struct ecx_plat_data *pdata = hpriv->plat_data;
  179. int i;
  180. int err;
  181. for (i = 0; i < SGPIO_PINS; i++) {
  182. err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
  183. if (IS_ERR_VALUE(err))
  184. return;
  185. pdata->sgpio_gpio[i] = err;
  186. err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
  187. if (err) {
  188. pr_err("sata_highbank gpio_request %d failed: %d\n",
  189. i, err);
  190. return;
  191. }
  192. gpio_direction_output(pdata->sgpio_gpio[i], 1);
  193. }
  194. of_property_read_u32_array(np, "calxeda,led-order",
  195. pdata->port_to_sgpio,
  196. pdata->n_ports);
  197. if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
  198. pdata->pre_clocks = 0;
  199. if (of_property_read_u32(np, "calxeda,post-clocks",
  200. &pdata->post_clocks))
  201. pdata->post_clocks = 0;
  202. /* store em_loc */
  203. hpriv->em_loc = 0;
  204. hpriv->em_buf_sz = 4;
  205. hpriv->em_msg_type = EM_MSG_TYPE_LED;
  206. pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
  207. }
  208. static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
  209. {
  210. u32 data;
  211. u8 dev = port_data[sata_port].phy_devs;
  212. spin_lock(&cphy_lock);
  213. writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
  214. data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
  215. spin_unlock(&cphy_lock);
  216. return data;
  217. }
  218. static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
  219. {
  220. u8 dev = port_data[sata_port].phy_devs;
  221. spin_lock(&cphy_lock);
  222. writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
  223. writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
  224. spin_unlock(&cphy_lock);
  225. }
  226. static void combo_phy_wait_for_ready(u8 sata_port)
  227. {
  228. while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
  229. udelay(5);
  230. }
  231. static u32 combo_phy_read(u8 sata_port, u32 addr)
  232. {
  233. combo_phy_wait_for_ready(sata_port);
  234. __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
  235. __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
  236. combo_phy_wait_for_ready(sata_port);
  237. return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
  238. }
  239. static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
  240. {
  241. combo_phy_wait_for_ready(sata_port);
  242. __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
  243. __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
  244. __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
  245. }
  246. static void highbank_cphy_disable_overrides(u8 sata_port)
  247. {
  248. u8 lane = port_data[sata_port].lane_mapping;
  249. u32 tmp;
  250. if (unlikely(port_data[sata_port].phy_base == NULL))
  251. return;
  252. tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
  253. tmp &= ~CPHY_SATA_RX_OVERRIDE;
  254. combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
  255. }
  256. static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
  257. {
  258. u8 lane = port_data[sata_port].lane_mapping;
  259. u32 tmp;
  260. if (val & 0x8)
  261. return;
  262. tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
  263. tmp &= ~CPHY_SATA_TX_OVERRIDE;
  264. combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
  265. tmp |= CPHY_SATA_TX_OVERRIDE;
  266. combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
  267. tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
  268. combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
  269. }
  270. static void cphy_override_rx_mode(u8 sata_port, u32 val)
  271. {
  272. u8 lane = port_data[sata_port].lane_mapping;
  273. u32 tmp;
  274. tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
  275. tmp &= ~CPHY_SATA_RX_OVERRIDE;
  276. combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
  277. tmp |= CPHY_SATA_RX_OVERRIDE;
  278. combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
  279. tmp &= ~CPHY_SATA_DPLL_MODE;
  280. tmp |= val << CPHY_SATA_DPLL_SHIFT;
  281. combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
  282. tmp |= CPHY_SATA_DPLL_RESET;
  283. combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
  284. tmp &= ~CPHY_SATA_DPLL_RESET;
  285. combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
  286. msleep(15);
  287. }
  288. static void highbank_cphy_override_lane(u8 sata_port)
  289. {
  290. u8 lane = port_data[sata_port].lane_mapping;
  291. u32 tmp, k = 0;
  292. if (unlikely(port_data[sata_port].phy_base == NULL))
  293. return;
  294. do {
  295. tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
  296. lane * SPHY_LANE);
  297. } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
  298. cphy_override_rx_mode(sata_port, 3);
  299. cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
  300. }
  301. static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
  302. {
  303. struct device_node *sata_node = dev->of_node;
  304. int phy_count = 0, phy, port = 0, i;
  305. void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
  306. struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
  307. u32 tx_atten[CPHY_PORT_COUNT] = {};
  308. memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
  309. do {
  310. u32 tmp;
  311. struct of_phandle_args phy_data;
  312. if (of_parse_phandle_with_args(sata_node,
  313. "calxeda,port-phys", "#phy-cells",
  314. port, &phy_data))
  315. break;
  316. for (phy = 0; phy < phy_count; phy++) {
  317. if (phy_nodes[phy] == phy_data.np)
  318. break;
  319. }
  320. if (phy_nodes[phy] == NULL) {
  321. phy_nodes[phy] = phy_data.np;
  322. cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
  323. if (cphy_base[phy] == NULL) {
  324. return 0;
  325. }
  326. phy_count += 1;
  327. }
  328. port_data[port].lane_mapping = phy_data.args[0];
  329. of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
  330. port_data[port].phy_devs = tmp;
  331. port_data[port].phy_base = cphy_base[phy];
  332. of_node_put(phy_data.np);
  333. port += 1;
  334. } while (port < CPHY_PORT_COUNT);
  335. of_property_read_u32_array(sata_node, "calxeda,tx-atten",
  336. tx_atten, port);
  337. for (i = 0; i < port; i++)
  338. port_data[i].tx_atten = (u8) tx_atten[i];
  339. return 0;
  340. }
  341. /*
  342. * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
  343. * Retrying the phy hard reset can work around the issue, but the drive
  344. * may fail again. In less than 150 out of 15000 test runs, it took more
  345. * than 10 tries for the link to be established (but never more than 35).
  346. * Triple the maximum observed retry count to provide plenty of margin for
  347. * rare events and to guarantee that the link is established.
  348. *
  349. * Also, the default 2 second time-out on a failed drive is too long in
  350. * this situation. The uboot implementation of the same driver function
  351. * uses a much shorter time-out period and never experiences a time out
  352. * issue. Reducing the time-out to 500ms improves the responsiveness.
  353. * The other timing constants were kept the same as the stock AHCI driver.
  354. * This change was also tested 15000 times on 24 drives and none of them
  355. * experienced a time out.
  356. */
  357. static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
  358. unsigned long deadline)
  359. {
  360. static const unsigned long timing[] = { 5, 100, 500};
  361. struct ata_port *ap = link->ap;
  362. struct ahci_port_priv *pp = ap->private_data;
  363. struct ahci_host_priv *hpriv = ap->host->private_data;
  364. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  365. struct ata_taskfile tf;
  366. bool online;
  367. u32 sstatus;
  368. int rc;
  369. int retry = 100;
  370. ahci_stop_engine(ap);
  371. /* clear D2H reception area to properly wait for D2H FIS */
  372. ata_tf_init(link->device, &tf);
  373. tf.command = ATA_BUSY;
  374. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  375. do {
  376. highbank_cphy_disable_overrides(link->ap->port_no);
  377. rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
  378. highbank_cphy_override_lane(link->ap->port_no);
  379. /* If the status is 1, we are connected, but the link did not
  380. * come up. So retry resetting the link again.
  381. */
  382. if (sata_scr_read(link, SCR_STATUS, &sstatus))
  383. break;
  384. if (!(sstatus & 0x3))
  385. break;
  386. } while (!online && retry--);
  387. hpriv->start_engine(ap);
  388. if (online)
  389. *class = ahci_dev_classify(ap);
  390. return rc;
  391. }
  392. static struct ata_port_operations ahci_highbank_ops = {
  393. .inherits = &ahci_ops,
  394. .hardreset = ahci_highbank_hardreset,
  395. .transmit_led_message = ecx_transmit_led_message,
  396. };
  397. static const struct ata_port_info ahci_highbank_port_info = {
  398. .flags = AHCI_FLAG_COMMON,
  399. .pio_mask = ATA_PIO4,
  400. .udma_mask = ATA_UDMA6,
  401. .port_ops = &ahci_highbank_ops,
  402. };
  403. static struct scsi_host_template ahci_highbank_platform_sht = {
  404. AHCI_SHT("sata_highbank"),
  405. };
  406. static const struct of_device_id ahci_of_match[] = {
  407. { .compatible = "calxeda,hb-ahci" },
  408. {},
  409. };
  410. MODULE_DEVICE_TABLE(of, ahci_of_match);
  411. static int ahci_highbank_probe(struct platform_device *pdev)
  412. {
  413. struct device *dev = &pdev->dev;
  414. struct ahci_host_priv *hpriv;
  415. struct ecx_plat_data *pdata;
  416. struct ata_host *host;
  417. struct resource *mem;
  418. int irq;
  419. int i;
  420. int rc;
  421. u32 n_ports;
  422. struct ata_port_info pi = ahci_highbank_port_info;
  423. const struct ata_port_info *ppi[] = { &pi, NULL };
  424. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  425. if (!mem) {
  426. dev_err(dev, "no mmio space\n");
  427. return -EINVAL;
  428. }
  429. irq = platform_get_irq(pdev, 0);
  430. if (irq <= 0) {
  431. dev_err(dev, "no irq\n");
  432. return -EINVAL;
  433. }
  434. hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
  435. if (!hpriv) {
  436. dev_err(dev, "can't alloc ahci_host_priv\n");
  437. return -ENOMEM;
  438. }
  439. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  440. if (!pdata) {
  441. dev_err(dev, "can't alloc ecx_plat_data\n");
  442. return -ENOMEM;
  443. }
  444. hpriv->irq = irq;
  445. hpriv->flags |= (unsigned long)pi.private_data;
  446. hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
  447. if (!hpriv->mmio) {
  448. dev_err(dev, "can't map %pR\n", mem);
  449. return -ENOMEM;
  450. }
  451. rc = highbank_initialize_phys(dev, hpriv->mmio);
  452. if (rc)
  453. return rc;
  454. ahci_save_initial_config(dev, hpriv);
  455. /* prepare host */
  456. if (hpriv->cap & HOST_CAP_NCQ)
  457. pi.flags |= ATA_FLAG_NCQ;
  458. if (hpriv->cap & HOST_CAP_PMP)
  459. pi.flags |= ATA_FLAG_PMP;
  460. if (hpriv->cap & HOST_CAP_64)
  461. dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
  462. /* CAP.NP sometimes indicate the index of the last enabled
  463. * port, at other times, that of the last possible port, so
  464. * determining the maximum port number requires looking at
  465. * both CAP.NP and port_map.
  466. */
  467. n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
  468. pdata->n_ports = n_ports;
  469. hpriv->plat_data = pdata;
  470. highbank_set_em_messages(dev, hpriv, &pi);
  471. host = ata_host_alloc_pinfo(dev, ppi, n_ports);
  472. if (!host) {
  473. rc = -ENOMEM;
  474. goto err0;
  475. }
  476. host->private_data = hpriv;
  477. if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
  478. host->flags |= ATA_HOST_PARALLEL_SCAN;
  479. for (i = 0; i < host->n_ports; i++) {
  480. struct ata_port *ap = host->ports[i];
  481. ata_port_desc(ap, "mmio %pR", mem);
  482. ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
  483. /* set enclosure management message type */
  484. if (ap->flags & ATA_FLAG_EM)
  485. ap->em_message_type = hpriv->em_msg_type;
  486. /* disabled/not-implemented port */
  487. if (!(hpriv->port_map & (1 << i)))
  488. ap->ops = &ata_dummy_port_ops;
  489. }
  490. rc = ahci_reset_controller(host);
  491. if (rc)
  492. goto err0;
  493. ahci_init_controller(host);
  494. ahci_print_info(host, "platform");
  495. rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
  496. if (rc)
  497. goto err0;
  498. return 0;
  499. err0:
  500. return rc;
  501. }
  502. #ifdef CONFIG_PM_SLEEP
  503. static int ahci_highbank_suspend(struct device *dev)
  504. {
  505. struct ata_host *host = dev_get_drvdata(dev);
  506. struct ahci_host_priv *hpriv = host->private_data;
  507. void __iomem *mmio = hpriv->mmio;
  508. u32 ctl;
  509. int rc;
  510. if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
  511. dev_err(dev, "firmware update required for suspend/resume\n");
  512. return -EIO;
  513. }
  514. /*
  515. * AHCI spec rev1.1 section 8.3.3:
  516. * Software must disable interrupts prior to requesting a
  517. * transition of the HBA to D3 state.
  518. */
  519. ctl = readl(mmio + HOST_CTL);
  520. ctl &= ~HOST_IRQ_EN;
  521. writel(ctl, mmio + HOST_CTL);
  522. readl(mmio + HOST_CTL); /* flush */
  523. rc = ata_host_suspend(host, PMSG_SUSPEND);
  524. if (rc)
  525. return rc;
  526. return 0;
  527. }
  528. static int ahci_highbank_resume(struct device *dev)
  529. {
  530. struct ata_host *host = dev_get_drvdata(dev);
  531. int rc;
  532. if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
  533. rc = ahci_reset_controller(host);
  534. if (rc)
  535. return rc;
  536. ahci_init_controller(host);
  537. }
  538. ata_host_resume(host);
  539. return 0;
  540. }
  541. #endif
  542. static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
  543. ahci_highbank_suspend, ahci_highbank_resume);
  544. static struct platform_driver ahci_highbank_driver = {
  545. .remove = ata_platform_remove_one,
  546. .driver = {
  547. .name = "highbank-ahci",
  548. .of_match_table = ahci_of_match,
  549. .pm = &ahci_highbank_pm_ops,
  550. },
  551. .probe = ahci_highbank_probe,
  552. };
  553. module_platform_driver(ahci_highbank_driver);
  554. MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
  555. MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
  556. MODULE_LICENSE("GPL");
  557. MODULE_ALIAS("sata:highbank");