spi-nor.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. /*
  2. * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
  3. * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
  4. *
  5. * Copyright (C) 2005, Intec Automation Inc.
  6. * Copyright (C) 2014, Freescale Semiconductor, Inc.
  7. *
  8. * This code is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/errno.h>
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/mutex.h>
  17. #include <linux/math64.h>
  18. #include <linux/sizes.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/spi/flash.h>
  22. #include <linux/mtd/spi-nor.h>
  23. /* Define max times to check status register before we give up. */
  24. /*
  25. * For everything but full-chip erase; probably could be much smaller, but kept
  26. * around for safety for now
  27. */
  28. #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
  29. /*
  30. * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
  31. * for larger flash
  32. */
  33. #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
  34. #define SPI_NOR_MAX_ID_LEN 6
  35. struct flash_info {
  36. char *name;
  37. /*
  38. * This array stores the ID bytes.
  39. * The first three bytes are the JEDIC ID.
  40. * JEDEC ID zero means "no ID" (mostly older chips).
  41. */
  42. u8 id[SPI_NOR_MAX_ID_LEN];
  43. u8 id_len;
  44. /* The size listed here is what works with SPINOR_OP_SE, which isn't
  45. * necessarily called a "sector" by the vendor.
  46. */
  47. unsigned sector_size;
  48. u16 n_sectors;
  49. u16 page_size;
  50. u16 addr_width;
  51. u16 flags;
  52. #define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
  53. #define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
  54. #define SST_WRITE 0x04 /* use SST byte programming */
  55. #define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
  56. #define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
  57. #define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
  58. #define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
  59. #define USE_FSR 0x80 /* use flag status register */
  60. };
  61. #define JEDEC_MFR(info) ((info)->id[0])
  62. static const struct flash_info *spi_nor_match_id(const char *name);
  63. /*
  64. * Read the status register, returning its value in the location
  65. * Return the status register value.
  66. * Returns negative if error occurred.
  67. */
  68. static int read_sr(struct spi_nor *nor)
  69. {
  70. int ret;
  71. u8 val;
  72. ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
  73. if (ret < 0) {
  74. pr_err("error %d reading SR\n", (int) ret);
  75. return ret;
  76. }
  77. return val;
  78. }
  79. /*
  80. * Read the flag status register, returning its value in the location
  81. * Return the status register value.
  82. * Returns negative if error occurred.
  83. */
  84. static int read_fsr(struct spi_nor *nor)
  85. {
  86. int ret;
  87. u8 val;
  88. ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
  89. if (ret < 0) {
  90. pr_err("error %d reading FSR\n", ret);
  91. return ret;
  92. }
  93. return val;
  94. }
  95. /*
  96. * Read configuration register, returning its value in the
  97. * location. Return the configuration register value.
  98. * Returns negative if error occured.
  99. */
  100. static int read_cr(struct spi_nor *nor)
  101. {
  102. int ret;
  103. u8 val;
  104. ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
  105. if (ret < 0) {
  106. dev_err(nor->dev, "error %d reading CR\n", ret);
  107. return ret;
  108. }
  109. return val;
  110. }
  111. /*
  112. * Dummy Cycle calculation for different type of read.
  113. * It can be used to support more commands with
  114. * different dummy cycle requirements.
  115. */
  116. static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
  117. {
  118. switch (nor->flash_read) {
  119. case SPI_NOR_FAST:
  120. case SPI_NOR_DUAL:
  121. case SPI_NOR_QUAD:
  122. return 8;
  123. case SPI_NOR_NORMAL:
  124. return 0;
  125. }
  126. return 0;
  127. }
  128. /*
  129. * Write status register 1 byte
  130. * Returns negative if error occurred.
  131. */
  132. static inline int write_sr(struct spi_nor *nor, u8 val)
  133. {
  134. nor->cmd_buf[0] = val;
  135. return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
  136. }
  137. /*
  138. * Set write enable latch with Write Enable command.
  139. * Returns negative if error occurred.
  140. */
  141. static inline int write_enable(struct spi_nor *nor)
  142. {
  143. return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
  144. }
  145. /*
  146. * Send write disble instruction to the chip.
  147. */
  148. static inline int write_disable(struct spi_nor *nor)
  149. {
  150. return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
  151. }
  152. static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
  153. {
  154. return mtd->priv;
  155. }
  156. /* Enable/disable 4-byte addressing mode. */
  157. static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
  158. int enable)
  159. {
  160. int status;
  161. bool need_wren = false;
  162. u8 cmd;
  163. switch (JEDEC_MFR(info)) {
  164. case SNOR_MFR_MICRON:
  165. /* Some Micron need WREN command; all will accept it */
  166. need_wren = true;
  167. case SNOR_MFR_MACRONIX:
  168. case SNOR_MFR_WINBOND:
  169. if (need_wren)
  170. write_enable(nor);
  171. cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
  172. status = nor->write_reg(nor, cmd, NULL, 0);
  173. if (need_wren)
  174. write_disable(nor);
  175. return status;
  176. default:
  177. /* Spansion style */
  178. nor->cmd_buf[0] = enable << 7;
  179. return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
  180. }
  181. }
  182. static inline int spi_nor_sr_ready(struct spi_nor *nor)
  183. {
  184. int sr = read_sr(nor);
  185. if (sr < 0)
  186. return sr;
  187. else
  188. return !(sr & SR_WIP);
  189. }
  190. static inline int spi_nor_fsr_ready(struct spi_nor *nor)
  191. {
  192. int fsr = read_fsr(nor);
  193. if (fsr < 0)
  194. return fsr;
  195. else
  196. return fsr & FSR_READY;
  197. }
  198. static int spi_nor_ready(struct spi_nor *nor)
  199. {
  200. int sr, fsr;
  201. sr = spi_nor_sr_ready(nor);
  202. if (sr < 0)
  203. return sr;
  204. fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
  205. if (fsr < 0)
  206. return fsr;
  207. return sr && fsr;
  208. }
  209. /*
  210. * Service routine to read status register until ready, or timeout occurs.
  211. * Returns non-zero if error.
  212. */
  213. static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
  214. unsigned long timeout_jiffies)
  215. {
  216. unsigned long deadline;
  217. int timeout = 0, ret;
  218. deadline = jiffies + timeout_jiffies;
  219. while (!timeout) {
  220. if (time_after_eq(jiffies, deadline))
  221. timeout = 1;
  222. ret = spi_nor_ready(nor);
  223. if (ret < 0)
  224. return ret;
  225. if (ret)
  226. return 0;
  227. cond_resched();
  228. }
  229. dev_err(nor->dev, "flash operation timed out\n");
  230. return -ETIMEDOUT;
  231. }
  232. static int spi_nor_wait_till_ready(struct spi_nor *nor)
  233. {
  234. return spi_nor_wait_till_ready_with_timeout(nor,
  235. DEFAULT_READY_WAIT_JIFFIES);
  236. }
  237. /*
  238. * Erase the whole flash memory
  239. *
  240. * Returns 0 if successful, non-zero otherwise.
  241. */
  242. static int erase_chip(struct spi_nor *nor)
  243. {
  244. dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
  245. return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
  246. }
  247. static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
  248. {
  249. int ret = 0;
  250. mutex_lock(&nor->lock);
  251. if (nor->prepare) {
  252. ret = nor->prepare(nor, ops);
  253. if (ret) {
  254. dev_err(nor->dev, "failed in the preparation.\n");
  255. mutex_unlock(&nor->lock);
  256. return ret;
  257. }
  258. }
  259. return ret;
  260. }
  261. static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
  262. {
  263. if (nor->unprepare)
  264. nor->unprepare(nor, ops);
  265. mutex_unlock(&nor->lock);
  266. }
  267. /*
  268. * Erase an address range on the nor chip. The address range may extend
  269. * one or more erase sectors. Return an error is there is a problem erasing.
  270. */
  271. static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
  272. {
  273. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  274. u32 addr, len;
  275. uint32_t rem;
  276. int ret;
  277. dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
  278. (long long)instr->len);
  279. div_u64_rem(instr->len, mtd->erasesize, &rem);
  280. if (rem)
  281. return -EINVAL;
  282. addr = instr->addr;
  283. len = instr->len;
  284. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
  285. if (ret)
  286. return ret;
  287. /* whole-chip erase? */
  288. if (len == mtd->size) {
  289. unsigned long timeout;
  290. write_enable(nor);
  291. if (erase_chip(nor)) {
  292. ret = -EIO;
  293. goto erase_err;
  294. }
  295. /*
  296. * Scale the timeout linearly with the size of the flash, with
  297. * a minimum calibrated to an old 2MB flash. We could try to
  298. * pull these from CFI/SFDP, but these values should be good
  299. * enough for now.
  300. */
  301. timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
  302. CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
  303. (unsigned long)(mtd->size / SZ_2M));
  304. ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
  305. if (ret)
  306. goto erase_err;
  307. /* REVISIT in some cases we could speed up erasing large regions
  308. * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
  309. * to use "small sector erase", but that's not always optimal.
  310. */
  311. /* "sector"-at-a-time erase */
  312. } else {
  313. while (len) {
  314. write_enable(nor);
  315. if (nor->erase(nor, addr)) {
  316. ret = -EIO;
  317. goto erase_err;
  318. }
  319. addr += mtd->erasesize;
  320. len -= mtd->erasesize;
  321. ret = spi_nor_wait_till_ready(nor);
  322. if (ret)
  323. goto erase_err;
  324. }
  325. }
  326. write_disable(nor);
  327. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
  328. instr->state = MTD_ERASE_DONE;
  329. mtd_erase_callback(instr);
  330. return ret;
  331. erase_err:
  332. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
  333. instr->state = MTD_ERASE_FAILED;
  334. return ret;
  335. }
  336. static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
  337. uint64_t *len)
  338. {
  339. struct mtd_info *mtd = &nor->mtd;
  340. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  341. int shift = ffs(mask) - 1;
  342. int pow;
  343. if (!(sr & mask)) {
  344. /* No protection */
  345. *ofs = 0;
  346. *len = 0;
  347. } else {
  348. pow = ((sr & mask) ^ mask) >> shift;
  349. *len = mtd->size >> pow;
  350. *ofs = mtd->size - *len;
  351. }
  352. }
  353. /*
  354. * Return 1 if the entire region is locked, 0 otherwise
  355. */
  356. static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
  357. u8 sr)
  358. {
  359. loff_t lock_offs;
  360. uint64_t lock_len;
  361. stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
  362. return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
  363. }
  364. /*
  365. * Lock a region of the flash. Compatible with ST Micro and similar flash.
  366. * Supports only the block protection bits BP{0,1,2} in the status register
  367. * (SR). Does not support these features found in newer SR bitfields:
  368. * - TB: top/bottom protect - only handle TB=0 (top protect)
  369. * - SEC: sector/block protect - only handle SEC=0 (block protect)
  370. * - CMP: complement protect - only support CMP=0 (range is not complemented)
  371. *
  372. * Sample table portion for 8MB flash (Winbond w25q64fw):
  373. *
  374. * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
  375. * --------------------------------------------------------------------------
  376. * X | X | 0 | 0 | 0 | NONE | NONE
  377. * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
  378. * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
  379. * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
  380. * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
  381. * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
  382. * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
  383. * X | X | 1 | 1 | 1 | 8 MB | ALL
  384. *
  385. * Returns negative on errors, 0 on success.
  386. */
  387. static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
  388. {
  389. struct mtd_info *mtd = &nor->mtd;
  390. u8 status_old, status_new;
  391. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  392. u8 shift = ffs(mask) - 1, pow, val;
  393. status_old = read_sr(nor);
  394. /* SPI NOR always locks to the end */
  395. if (ofs + len != mtd->size) {
  396. /* Does combined region extend to end? */
  397. if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
  398. status_old))
  399. return -EINVAL;
  400. len = mtd->size - ofs;
  401. }
  402. /*
  403. * Need smallest pow such that:
  404. *
  405. * 1 / (2^pow) <= (len / size)
  406. *
  407. * so (assuming power-of-2 size) we do:
  408. *
  409. * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
  410. */
  411. pow = ilog2(mtd->size) - ilog2(len);
  412. val = mask - (pow << shift);
  413. if (val & ~mask)
  414. return -EINVAL;
  415. /* Don't "lock" with no region! */
  416. if (!(val & mask))
  417. return -EINVAL;
  418. status_new = (status_old & ~mask) | val;
  419. /* Only modify protection if it will not unlock other areas */
  420. if ((status_new & mask) <= (status_old & mask))
  421. return -EINVAL;
  422. write_enable(nor);
  423. return write_sr(nor, status_new);
  424. }
  425. /*
  426. * Unlock a region of the flash. See stm_lock() for more info
  427. *
  428. * Returns negative on errors, 0 on success.
  429. */
  430. static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
  431. {
  432. struct mtd_info *mtd = &nor->mtd;
  433. uint8_t status_old, status_new;
  434. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  435. u8 shift = ffs(mask) - 1, pow, val;
  436. status_old = read_sr(nor);
  437. /* Cannot unlock; would unlock larger region than requested */
  438. if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
  439. status_old))
  440. return -EINVAL;
  441. /*
  442. * Need largest pow such that:
  443. *
  444. * 1 / (2^pow) >= (len / size)
  445. *
  446. * so (assuming power-of-2 size) we do:
  447. *
  448. * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
  449. */
  450. pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
  451. if (ofs + len == mtd->size) {
  452. val = 0; /* fully unlocked */
  453. } else {
  454. val = mask - (pow << shift);
  455. /* Some power-of-two sizes are not supported */
  456. if (val & ~mask)
  457. return -EINVAL;
  458. }
  459. status_new = (status_old & ~mask) | val;
  460. /* Only modify protection if it will not lock other areas */
  461. if ((status_new & mask) >= (status_old & mask))
  462. return -EINVAL;
  463. write_enable(nor);
  464. return write_sr(nor, status_new);
  465. }
  466. /*
  467. * Check if a region of the flash is (completely) locked. See stm_lock() for
  468. * more info.
  469. *
  470. * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
  471. * negative on errors.
  472. */
  473. static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
  474. {
  475. int status;
  476. status = read_sr(nor);
  477. if (status < 0)
  478. return status;
  479. return stm_is_locked_sr(nor, ofs, len, status);
  480. }
  481. static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  482. {
  483. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  484. int ret;
  485. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
  486. if (ret)
  487. return ret;
  488. ret = nor->flash_lock(nor, ofs, len);
  489. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
  490. return ret;
  491. }
  492. static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  493. {
  494. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  495. int ret;
  496. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
  497. if (ret)
  498. return ret;
  499. ret = nor->flash_unlock(nor, ofs, len);
  500. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
  501. return ret;
  502. }
  503. static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  504. {
  505. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  506. int ret;
  507. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
  508. if (ret)
  509. return ret;
  510. ret = nor->flash_is_locked(nor, ofs, len);
  511. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
  512. return ret;
  513. }
  514. /* Used when the "_ext_id" is two bytes at most */
  515. #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
  516. .id = { \
  517. ((_jedec_id) >> 16) & 0xff, \
  518. ((_jedec_id) >> 8) & 0xff, \
  519. (_jedec_id) & 0xff, \
  520. ((_ext_id) >> 8) & 0xff, \
  521. (_ext_id) & 0xff, \
  522. }, \
  523. .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
  524. .sector_size = (_sector_size), \
  525. .n_sectors = (_n_sectors), \
  526. .page_size = 256, \
  527. .flags = (_flags),
  528. #define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
  529. .id = { \
  530. ((_jedec_id) >> 16) & 0xff, \
  531. ((_jedec_id) >> 8) & 0xff, \
  532. (_jedec_id) & 0xff, \
  533. ((_ext_id) >> 16) & 0xff, \
  534. ((_ext_id) >> 8) & 0xff, \
  535. (_ext_id) & 0xff, \
  536. }, \
  537. .id_len = 6, \
  538. .sector_size = (_sector_size), \
  539. .n_sectors = (_n_sectors), \
  540. .page_size = 256, \
  541. .flags = (_flags),
  542. #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
  543. .sector_size = (_sector_size), \
  544. .n_sectors = (_n_sectors), \
  545. .page_size = (_page_size), \
  546. .addr_width = (_addr_width), \
  547. .flags = (_flags),
  548. /* NOTE: double check command sets and memory organization when you add
  549. * more nor chips. This current list focusses on newer chips, which
  550. * have been converging on command sets which including JEDEC ID.
  551. *
  552. * All newly added entries should describe *hardware* and should use SECT_4K
  553. * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
  554. * scenarios excluding small sectors there is config option that can be
  555. * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
  556. * For historical (and compatibility) reasons (before we got above config) some
  557. * old entries may be missing 4K flag.
  558. */
  559. static const struct flash_info spi_nor_ids[] = {
  560. /* Atmel -- some are (confusingly) marketed as "DataFlash" */
  561. { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
  562. { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
  563. { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
  564. { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
  565. { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
  566. { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
  567. { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
  568. { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
  569. { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
  570. { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
  571. /* EON -- en25xxx */
  572. { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
  573. { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
  574. { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
  575. { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
  576. { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
  577. { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
  578. { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
  579. { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
  580. /* ESMT */
  581. { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
  582. /* Everspin */
  583. { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  584. { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  585. /* Fujitsu */
  586. { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
  587. /* GigaDevice */
  588. { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
  589. { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
  590. { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
  591. /* Intel/Numonyx -- xxxs33b */
  592. { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
  593. { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
  594. { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
  595. /* ISSI */
  596. { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
  597. { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
  598. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  599. { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
  600. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  601. { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
  602. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  603. /* Macronix */
  604. { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
  605. { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
  606. { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
  607. { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
  608. { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
  609. { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
  610. { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
  611. { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
  612. { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
  613. { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
  614. { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
  615. { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
  616. { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
  617. { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
  618. { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
  619. /* Micron */
  620. { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
  621. { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
  622. { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
  623. { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
  624. { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
  625. { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
  626. { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
  627. { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  628. { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  629. { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  630. /* PMC */
  631. { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
  632. { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
  633. { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
  634. /* Spansion -- single (large) sector size only, at least
  635. * for the chips listed here (without boot sectors).
  636. */
  637. { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  638. { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  639. { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
  640. { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  641. { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  642. { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
  643. { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
  644. { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
  645. { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
  646. { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  647. { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  648. { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
  649. { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
  650. { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
  651. { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
  652. { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
  653. { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  654. { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  655. { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  656. { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
  657. { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
  658. { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
  659. { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
  660. /* SST -- large erase sizes are "overlays", "sectors" are 4K */
  661. { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
  662. { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
  663. { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
  664. { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
  665. { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
  666. { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
  667. { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
  668. { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
  669. { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
  670. { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
  671. { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
  672. { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
  673. /* ST Microelectronics -- newer production may have feature updates */
  674. { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
  675. { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
  676. { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
  677. { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
  678. { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
  679. { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
  680. { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
  681. { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
  682. { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
  683. { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
  684. { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
  685. { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
  686. { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
  687. { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
  688. { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
  689. { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
  690. { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
  691. { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
  692. { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
  693. { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
  694. { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
  695. { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
  696. { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
  697. { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
  698. { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
  699. { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
  700. { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
  701. { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
  702. { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
  703. { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
  704. /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
  705. { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
  706. { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
  707. { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
  708. { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
  709. { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
  710. { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
  711. { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
  712. { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
  713. { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  714. { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
  715. { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
  716. { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  717. { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  718. { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
  719. { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
  720. { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
  721. { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
  722. /* Catalyst / On Semiconductor -- non-JEDEC */
  723. { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  724. { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  725. { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  726. { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  727. { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  728. { },
  729. };
  730. static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
  731. {
  732. int tmp;
  733. u8 id[SPI_NOR_MAX_ID_LEN];
  734. const struct flash_info *info;
  735. tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
  736. if (tmp < 0) {
  737. dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
  738. return ERR_PTR(tmp);
  739. }
  740. for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
  741. info = &spi_nor_ids[tmp];
  742. if (info->id_len) {
  743. if (!memcmp(info->id, id, info->id_len))
  744. return &spi_nor_ids[tmp];
  745. }
  746. }
  747. dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
  748. id[0], id[1], id[2]);
  749. return ERR_PTR(-ENODEV);
  750. }
  751. static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
  752. size_t *retlen, u_char *buf)
  753. {
  754. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  755. int ret;
  756. dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
  757. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
  758. if (ret)
  759. return ret;
  760. ret = nor->read(nor, from, len, retlen, buf);
  761. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
  762. return ret;
  763. }
  764. static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
  765. size_t *retlen, const u_char *buf)
  766. {
  767. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  768. size_t actual;
  769. int ret;
  770. dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
  771. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
  772. if (ret)
  773. return ret;
  774. write_enable(nor);
  775. nor->sst_write_second = false;
  776. actual = to % 2;
  777. /* Start write from odd address. */
  778. if (actual) {
  779. nor->program_opcode = SPINOR_OP_BP;
  780. /* write one byte. */
  781. nor->write(nor, to, 1, retlen, buf);
  782. ret = spi_nor_wait_till_ready(nor);
  783. if (ret)
  784. goto time_out;
  785. }
  786. to += actual;
  787. /* Write out most of the data here. */
  788. for (; actual < len - 1; actual += 2) {
  789. nor->program_opcode = SPINOR_OP_AAI_WP;
  790. /* write two bytes. */
  791. nor->write(nor, to, 2, retlen, buf + actual);
  792. ret = spi_nor_wait_till_ready(nor);
  793. if (ret)
  794. goto time_out;
  795. to += 2;
  796. nor->sst_write_second = true;
  797. }
  798. nor->sst_write_second = false;
  799. write_disable(nor);
  800. ret = spi_nor_wait_till_ready(nor);
  801. if (ret)
  802. goto time_out;
  803. /* Write out trailing byte if it exists. */
  804. if (actual != len) {
  805. write_enable(nor);
  806. nor->program_opcode = SPINOR_OP_BP;
  807. nor->write(nor, to, 1, retlen, buf + actual);
  808. ret = spi_nor_wait_till_ready(nor);
  809. if (ret)
  810. goto time_out;
  811. write_disable(nor);
  812. }
  813. time_out:
  814. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
  815. return ret;
  816. }
  817. /*
  818. * Write an address range to the nor chip. Data must be written in
  819. * FLASH_PAGESIZE chunks. The address range may be any size provided
  820. * it is within the physical boundaries.
  821. */
  822. static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
  823. size_t *retlen, const u_char *buf)
  824. {
  825. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  826. u32 page_offset, page_size, i;
  827. int ret;
  828. dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
  829. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
  830. if (ret)
  831. return ret;
  832. write_enable(nor);
  833. page_offset = to & (nor->page_size - 1);
  834. /* do all the bytes fit onto one page? */
  835. if (page_offset + len <= nor->page_size) {
  836. nor->write(nor, to, len, retlen, buf);
  837. } else {
  838. /* the size of data remaining on the first page */
  839. page_size = nor->page_size - page_offset;
  840. nor->write(nor, to, page_size, retlen, buf);
  841. /* write everything in nor->page_size chunks */
  842. for (i = page_size; i < len; i += page_size) {
  843. page_size = len - i;
  844. if (page_size > nor->page_size)
  845. page_size = nor->page_size;
  846. ret = spi_nor_wait_till_ready(nor);
  847. if (ret)
  848. goto write_err;
  849. write_enable(nor);
  850. nor->write(nor, to + i, page_size, retlen, buf + i);
  851. }
  852. }
  853. ret = spi_nor_wait_till_ready(nor);
  854. write_err:
  855. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
  856. return ret;
  857. }
  858. static int macronix_quad_enable(struct spi_nor *nor)
  859. {
  860. int ret, val;
  861. val = read_sr(nor);
  862. write_enable(nor);
  863. write_sr(nor, val | SR_QUAD_EN_MX);
  864. if (spi_nor_wait_till_ready(nor))
  865. return 1;
  866. ret = read_sr(nor);
  867. if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
  868. dev_err(nor->dev, "Macronix Quad bit not set\n");
  869. return -EINVAL;
  870. }
  871. return 0;
  872. }
  873. /*
  874. * Write status Register and configuration register with 2 bytes
  875. * The first byte will be written to the status register, while the
  876. * second byte will be written to the configuration register.
  877. * Return negative if error occured.
  878. */
  879. static int write_sr_cr(struct spi_nor *nor, u16 val)
  880. {
  881. nor->cmd_buf[0] = val & 0xff;
  882. nor->cmd_buf[1] = (val >> 8);
  883. return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
  884. }
  885. static int spansion_quad_enable(struct spi_nor *nor)
  886. {
  887. int ret;
  888. int quad_en = CR_QUAD_EN_SPAN << 8;
  889. write_enable(nor);
  890. ret = write_sr_cr(nor, quad_en);
  891. if (ret < 0) {
  892. dev_err(nor->dev,
  893. "error while writing configuration register\n");
  894. return -EINVAL;
  895. }
  896. ret = spi_nor_wait_till_ready(nor);
  897. if (ret) {
  898. dev_err(nor->dev,
  899. "timeout while writing configuration register\n");
  900. return ret;
  901. }
  902. /* read back and check it */
  903. ret = read_cr(nor);
  904. if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
  905. dev_err(nor->dev, "Spansion Quad bit not set\n");
  906. return -EINVAL;
  907. }
  908. return 0;
  909. }
  910. static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
  911. {
  912. int status;
  913. switch (JEDEC_MFR(info)) {
  914. case SNOR_MFR_MACRONIX:
  915. status = macronix_quad_enable(nor);
  916. if (status) {
  917. dev_err(nor->dev, "Macronix quad-read not enabled\n");
  918. return -EINVAL;
  919. }
  920. return status;
  921. case SNOR_MFR_MICRON:
  922. return 0;
  923. default:
  924. status = spansion_quad_enable(nor);
  925. if (status) {
  926. dev_err(nor->dev, "Spansion quad-read not enabled\n");
  927. return -EINVAL;
  928. }
  929. return status;
  930. }
  931. }
  932. static int spi_nor_check(struct spi_nor *nor)
  933. {
  934. if (!nor->dev || !nor->read || !nor->write ||
  935. !nor->read_reg || !nor->write_reg || !nor->erase) {
  936. pr_err("spi-nor: please fill all the necessary fields!\n");
  937. return -EINVAL;
  938. }
  939. return 0;
  940. }
  941. int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
  942. {
  943. const struct flash_info *info = NULL;
  944. struct device *dev = nor->dev;
  945. struct mtd_info *mtd = &nor->mtd;
  946. struct device_node *np = nor->flash_node;
  947. int ret;
  948. int i;
  949. ret = spi_nor_check(nor);
  950. if (ret)
  951. return ret;
  952. if (name)
  953. info = spi_nor_match_id(name);
  954. /* Try to auto-detect if chip name wasn't specified or not found */
  955. if (!info)
  956. info = spi_nor_read_id(nor);
  957. if (IS_ERR_OR_NULL(info))
  958. return -ENOENT;
  959. /*
  960. * If caller has specified name of flash model that can normally be
  961. * detected using JEDEC, let's verify it.
  962. */
  963. if (name && info->id_len) {
  964. const struct flash_info *jinfo;
  965. jinfo = spi_nor_read_id(nor);
  966. if (IS_ERR(jinfo)) {
  967. return PTR_ERR(jinfo);
  968. } else if (jinfo != info) {
  969. /*
  970. * JEDEC knows better, so overwrite platform ID. We
  971. * can't trust partitions any longer, but we'll let
  972. * mtd apply them anyway, since some partitions may be
  973. * marked read-only, and we don't want to lose that
  974. * information, even if it's not 100% accurate.
  975. */
  976. dev_warn(dev, "found %s, expected %s\n",
  977. jinfo->name, info->name);
  978. info = jinfo;
  979. }
  980. }
  981. mutex_init(&nor->lock);
  982. /*
  983. * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
  984. * with the software protection bits set
  985. */
  986. if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
  987. JEDEC_MFR(info) == SNOR_MFR_INTEL ||
  988. JEDEC_MFR(info) == SNOR_MFR_SST) {
  989. write_enable(nor);
  990. write_sr(nor, 0);
  991. }
  992. if (!mtd->name)
  993. mtd->name = dev_name(dev);
  994. mtd->priv = nor;
  995. mtd->type = MTD_NORFLASH;
  996. mtd->writesize = 1;
  997. mtd->flags = MTD_CAP_NORFLASH;
  998. mtd->size = info->sector_size * info->n_sectors;
  999. mtd->_erase = spi_nor_erase;
  1000. mtd->_read = spi_nor_read;
  1001. /* NOR protection support for STmicro/Micron chips and similar */
  1002. if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
  1003. nor->flash_lock = stm_lock;
  1004. nor->flash_unlock = stm_unlock;
  1005. nor->flash_is_locked = stm_is_locked;
  1006. }
  1007. if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
  1008. mtd->_lock = spi_nor_lock;
  1009. mtd->_unlock = spi_nor_unlock;
  1010. mtd->_is_locked = spi_nor_is_locked;
  1011. }
  1012. /* sst nor chips use AAI word program */
  1013. if (info->flags & SST_WRITE)
  1014. mtd->_write = sst_write;
  1015. else
  1016. mtd->_write = spi_nor_write;
  1017. if (info->flags & USE_FSR)
  1018. nor->flags |= SNOR_F_USE_FSR;
  1019. #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
  1020. /* prefer "small sector" erase if possible */
  1021. if (info->flags & SECT_4K) {
  1022. nor->erase_opcode = SPINOR_OP_BE_4K;
  1023. mtd->erasesize = 4096;
  1024. } else if (info->flags & SECT_4K_PMC) {
  1025. nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
  1026. mtd->erasesize = 4096;
  1027. } else
  1028. #endif
  1029. {
  1030. nor->erase_opcode = SPINOR_OP_SE;
  1031. mtd->erasesize = info->sector_size;
  1032. }
  1033. if (info->flags & SPI_NOR_NO_ERASE)
  1034. mtd->flags |= MTD_NO_ERASE;
  1035. mtd->dev.parent = dev;
  1036. nor->page_size = info->page_size;
  1037. mtd->writebufsize = nor->page_size;
  1038. if (np) {
  1039. /* If we were instantiated by DT, use it */
  1040. if (of_property_read_bool(np, "m25p,fast-read"))
  1041. nor->flash_read = SPI_NOR_FAST;
  1042. else
  1043. nor->flash_read = SPI_NOR_NORMAL;
  1044. } else {
  1045. /* If we weren't instantiated by DT, default to fast-read */
  1046. nor->flash_read = SPI_NOR_FAST;
  1047. }
  1048. /* Some devices cannot do fast-read, no matter what DT tells us */
  1049. if (info->flags & SPI_NOR_NO_FR)
  1050. nor->flash_read = SPI_NOR_NORMAL;
  1051. /* Quad/Dual-read mode takes precedence over fast/normal */
  1052. if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
  1053. ret = set_quad_mode(nor, info);
  1054. if (ret) {
  1055. dev_err(dev, "quad mode not supported\n");
  1056. return ret;
  1057. }
  1058. nor->flash_read = SPI_NOR_QUAD;
  1059. } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
  1060. nor->flash_read = SPI_NOR_DUAL;
  1061. }
  1062. /* Default commands */
  1063. switch (nor->flash_read) {
  1064. case SPI_NOR_QUAD:
  1065. nor->read_opcode = SPINOR_OP_READ_1_1_4;
  1066. break;
  1067. case SPI_NOR_DUAL:
  1068. nor->read_opcode = SPINOR_OP_READ_1_1_2;
  1069. break;
  1070. case SPI_NOR_FAST:
  1071. nor->read_opcode = SPINOR_OP_READ_FAST;
  1072. break;
  1073. case SPI_NOR_NORMAL:
  1074. nor->read_opcode = SPINOR_OP_READ;
  1075. break;
  1076. default:
  1077. dev_err(dev, "No Read opcode defined\n");
  1078. return -EINVAL;
  1079. }
  1080. nor->program_opcode = SPINOR_OP_PP;
  1081. if (info->addr_width)
  1082. nor->addr_width = info->addr_width;
  1083. else if (mtd->size > 0x1000000) {
  1084. /* enable 4-byte addressing if the device exceeds 16MiB */
  1085. nor->addr_width = 4;
  1086. if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
  1087. /* Dedicated 4-byte command set */
  1088. switch (nor->flash_read) {
  1089. case SPI_NOR_QUAD:
  1090. nor->read_opcode = SPINOR_OP_READ4_1_1_4;
  1091. break;
  1092. case SPI_NOR_DUAL:
  1093. nor->read_opcode = SPINOR_OP_READ4_1_1_2;
  1094. break;
  1095. case SPI_NOR_FAST:
  1096. nor->read_opcode = SPINOR_OP_READ4_FAST;
  1097. break;
  1098. case SPI_NOR_NORMAL:
  1099. nor->read_opcode = SPINOR_OP_READ4;
  1100. break;
  1101. }
  1102. nor->program_opcode = SPINOR_OP_PP_4B;
  1103. /* No small sector erase for 4-byte command set */
  1104. nor->erase_opcode = SPINOR_OP_SE_4B;
  1105. mtd->erasesize = info->sector_size;
  1106. } else
  1107. set_4byte(nor, info, 1);
  1108. } else {
  1109. nor->addr_width = 3;
  1110. }
  1111. nor->read_dummy = spi_nor_read_dummy_cycles(nor);
  1112. dev_info(dev, "%s (%lld Kbytes)\n", info->name,
  1113. (long long)mtd->size >> 10);
  1114. dev_dbg(dev,
  1115. "mtd .name = %s, .size = 0x%llx (%lldMiB), "
  1116. ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
  1117. mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
  1118. mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
  1119. if (mtd->numeraseregions)
  1120. for (i = 0; i < mtd->numeraseregions; i++)
  1121. dev_dbg(dev,
  1122. "mtd.eraseregions[%d] = { .offset = 0x%llx, "
  1123. ".erasesize = 0x%.8x (%uKiB), "
  1124. ".numblocks = %d }\n",
  1125. i, (long long)mtd->eraseregions[i].offset,
  1126. mtd->eraseregions[i].erasesize,
  1127. mtd->eraseregions[i].erasesize / 1024,
  1128. mtd->eraseregions[i].numblocks);
  1129. return 0;
  1130. }
  1131. EXPORT_SYMBOL_GPL(spi_nor_scan);
  1132. static const struct flash_info *spi_nor_match_id(const char *name)
  1133. {
  1134. const struct flash_info *id = spi_nor_ids;
  1135. while (id->name) {
  1136. if (!strcmp(name, id->name))
  1137. return id;
  1138. id++;
  1139. }
  1140. return NULL;
  1141. }
  1142. MODULE_LICENSE("GPL");
  1143. MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
  1144. MODULE_AUTHOR("Mike Lavender");
  1145. MODULE_DESCRIPTION("framework for SPI NOR");