vmu-flash.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /* vmu-flash.c
  2. * Driver for SEGA Dreamcast Visual Memory Unit
  3. *
  4. * Copyright (c) Adrian McMenamin 2002 - 2009
  5. * Copyright (c) Paul Mundt 2001
  6. *
  7. * Licensed under version 2 of the
  8. * GNU General Public Licence
  9. */
  10. #include <linux/init.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/delay.h>
  14. #include <linux/maple.h>
  15. #include <linux/mtd/mtd.h>
  16. #include <linux/mtd/map.h>
  17. struct vmu_cache {
  18. unsigned char *buffer; /* Cache */
  19. unsigned int block; /* Which block was cached */
  20. unsigned long jiffies_atc; /* When was it cached? */
  21. int valid;
  22. };
  23. struct mdev_part {
  24. struct maple_device *mdev;
  25. int partition;
  26. };
  27. struct vmupart {
  28. u16 user_blocks;
  29. u16 root_block;
  30. u16 numblocks;
  31. char *name;
  32. struct vmu_cache *pcache;
  33. };
  34. struct memcard {
  35. u16 tempA;
  36. u16 tempB;
  37. u32 partitions;
  38. u32 blocklen;
  39. u32 writecnt;
  40. u32 readcnt;
  41. u32 removeable;
  42. int partition;
  43. int read;
  44. unsigned char *blockread;
  45. struct vmupart *parts;
  46. struct mtd_info *mtd;
  47. };
  48. struct vmu_block {
  49. unsigned int num; /* block number */
  50. unsigned int ofs; /* block offset */
  51. };
  52. static struct vmu_block *ofs_to_block(unsigned long src_ofs,
  53. struct mtd_info *mtd, int partition)
  54. {
  55. struct vmu_block *vblock;
  56. struct maple_device *mdev;
  57. struct memcard *card;
  58. struct mdev_part *mpart;
  59. int num;
  60. mpart = mtd->priv;
  61. mdev = mpart->mdev;
  62. card = maple_get_drvdata(mdev);
  63. if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
  64. goto failed;
  65. num = src_ofs / card->blocklen;
  66. if (num > card->parts[partition].numblocks)
  67. goto failed;
  68. vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
  69. if (!vblock)
  70. goto failed;
  71. vblock->num = num;
  72. vblock->ofs = src_ofs % card->blocklen;
  73. return vblock;
  74. failed:
  75. return NULL;
  76. }
  77. /* Maple bus callback function for reads */
  78. static void vmu_blockread(struct mapleq *mq)
  79. {
  80. struct maple_device *mdev;
  81. struct memcard *card;
  82. mdev = mq->dev;
  83. card = maple_get_drvdata(mdev);
  84. /* copy the read in data */
  85. if (unlikely(!card->blockread))
  86. return;
  87. memcpy(card->blockread, mq->recvbuf->buf + 12,
  88. card->blocklen/card->readcnt);
  89. }
  90. /* Interface with maple bus to read blocks
  91. * caching the results so that other parts
  92. * of the driver can access block reads */
  93. static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
  94. struct mtd_info *mtd)
  95. {
  96. struct memcard *card;
  97. struct mdev_part *mpart;
  98. struct maple_device *mdev;
  99. int partition, error = 0, x, wait;
  100. unsigned char *blockread = NULL;
  101. struct vmu_cache *pcache;
  102. __be32 sendbuf;
  103. mpart = mtd->priv;
  104. mdev = mpart->mdev;
  105. partition = mpart->partition;
  106. card = maple_get_drvdata(mdev);
  107. pcache = card->parts[partition].pcache;
  108. pcache->valid = 0;
  109. /* prepare the cache for this block */
  110. if (!pcache->buffer) {
  111. pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
  112. if (!pcache->buffer) {
  113. dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
  114. " to lack of memory\n", mdev->port,
  115. mdev->unit);
  116. error = -ENOMEM;
  117. goto outB;
  118. }
  119. }
  120. /*
  121. * Reads may be phased - again the hardware spec
  122. * supports this - though may not be any devices in
  123. * the wild that implement it, but we will here
  124. */
  125. for (x = 0; x < card->readcnt; x++) {
  126. sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
  127. if (atomic_read(&mdev->busy) == 1) {
  128. wait_event_interruptible_timeout(mdev->maple_wait,
  129. atomic_read(&mdev->busy) == 0, HZ);
  130. if (atomic_read(&mdev->busy) == 1) {
  131. dev_notice(&mdev->dev, "VMU at (%d, %d)"
  132. " is busy\n", mdev->port, mdev->unit);
  133. error = -EAGAIN;
  134. goto outB;
  135. }
  136. }
  137. atomic_set(&mdev->busy, 1);
  138. blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
  139. if (!blockread) {
  140. error = -ENOMEM;
  141. atomic_set(&mdev->busy, 0);
  142. goto outB;
  143. }
  144. card->blockread = blockread;
  145. maple_getcond_callback(mdev, vmu_blockread, 0,
  146. MAPLE_FUNC_MEMCARD);
  147. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  148. MAPLE_COMMAND_BREAD, 2, &sendbuf);
  149. /* Very long timeouts seem to be needed when box is stressed */
  150. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  151. (atomic_read(&mdev->busy) == 0 ||
  152. atomic_read(&mdev->busy) == 2), HZ * 3);
  153. /*
  154. * MTD layer does not handle hotplugging well
  155. * so have to return errors when VMU is unplugged
  156. * in the middle of a read (busy == 2)
  157. */
  158. if (error || atomic_read(&mdev->busy) == 2) {
  159. if (atomic_read(&mdev->busy) == 2)
  160. error = -ENXIO;
  161. atomic_set(&mdev->busy, 0);
  162. card->blockread = NULL;
  163. goto outA;
  164. }
  165. if (wait == 0 || wait == -ERESTARTSYS) {
  166. card->blockread = NULL;
  167. atomic_set(&mdev->busy, 0);
  168. error = -EIO;
  169. list_del_init(&(mdev->mq->list));
  170. kfree(mdev->mq->sendbuf);
  171. mdev->mq->sendbuf = NULL;
  172. if (wait == -ERESTARTSYS) {
  173. dev_warn(&mdev->dev, "VMU read on (%d, %d)"
  174. " interrupted on block 0x%X\n",
  175. mdev->port, mdev->unit, num);
  176. } else
  177. dev_notice(&mdev->dev, "VMU read on (%d, %d)"
  178. " timed out on block 0x%X\n",
  179. mdev->port, mdev->unit, num);
  180. goto outA;
  181. }
  182. memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
  183. card->blocklen/card->readcnt);
  184. memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
  185. card->blockread, card->blocklen/card->readcnt);
  186. card->blockread = NULL;
  187. pcache->block = num;
  188. pcache->jiffies_atc = jiffies;
  189. pcache->valid = 1;
  190. kfree(blockread);
  191. }
  192. return error;
  193. outA:
  194. kfree(blockread);
  195. outB:
  196. return error;
  197. }
  198. /* communicate with maple bus for phased writing */
  199. static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
  200. struct mtd_info *mtd)
  201. {
  202. struct memcard *card;
  203. struct mdev_part *mpart;
  204. struct maple_device *mdev;
  205. int partition, error, locking, x, phaselen, wait;
  206. __be32 *sendbuf;
  207. mpart = mtd->priv;
  208. mdev = mpart->mdev;
  209. partition = mpart->partition;
  210. card = maple_get_drvdata(mdev);
  211. phaselen = card->blocklen/card->writecnt;
  212. sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
  213. if (!sendbuf) {
  214. error = -ENOMEM;
  215. goto fail_nosendbuf;
  216. }
  217. for (x = 0; x < card->writecnt; x++) {
  218. sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
  219. memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
  220. /* wait until the device is not busy doing something else
  221. * or 1 second - which ever is longer */
  222. if (atomic_read(&mdev->busy) == 1) {
  223. wait_event_interruptible_timeout(mdev->maple_wait,
  224. atomic_read(&mdev->busy) == 0, HZ);
  225. if (atomic_read(&mdev->busy) == 1) {
  226. error = -EBUSY;
  227. dev_notice(&mdev->dev, "VMU write at (%d, %d)"
  228. "failed - device is busy\n",
  229. mdev->port, mdev->unit);
  230. goto fail_nolock;
  231. }
  232. }
  233. atomic_set(&mdev->busy, 1);
  234. locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  235. MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
  236. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  237. atomic_read(&mdev->busy) == 0, HZ/10);
  238. if (locking) {
  239. error = -EIO;
  240. atomic_set(&mdev->busy, 0);
  241. goto fail_nolock;
  242. }
  243. if (atomic_read(&mdev->busy) == 2) {
  244. atomic_set(&mdev->busy, 0);
  245. } else if (wait == 0 || wait == -ERESTARTSYS) {
  246. error = -EIO;
  247. dev_warn(&mdev->dev, "Write at (%d, %d) of block"
  248. " 0x%X at phase %d failed: could not"
  249. " communicate with VMU", mdev->port,
  250. mdev->unit, num, x);
  251. atomic_set(&mdev->busy, 0);
  252. kfree(mdev->mq->sendbuf);
  253. mdev->mq->sendbuf = NULL;
  254. list_del_init(&(mdev->mq->list));
  255. goto fail_nolock;
  256. }
  257. }
  258. kfree(sendbuf);
  259. return card->blocklen;
  260. fail_nolock:
  261. kfree(sendbuf);
  262. fail_nosendbuf:
  263. dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
  264. mdev->unit);
  265. return error;
  266. }
  267. /* mtd function to simulate reading byte by byte */
  268. static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
  269. struct mtd_info *mtd)
  270. {
  271. struct vmu_block *vblock;
  272. struct memcard *card;
  273. struct mdev_part *mpart;
  274. struct maple_device *mdev;
  275. unsigned char *buf, ret;
  276. int partition, error;
  277. mpart = mtd->priv;
  278. mdev = mpart->mdev;
  279. partition = mpart->partition;
  280. card = maple_get_drvdata(mdev);
  281. *retval = 0;
  282. buf = kmalloc(card->blocklen, GFP_KERNEL);
  283. if (!buf) {
  284. *retval = 1;
  285. ret = -ENOMEM;
  286. goto finish;
  287. }
  288. vblock = ofs_to_block(ofs, mtd, partition);
  289. if (!vblock) {
  290. *retval = 3;
  291. ret = -ENOMEM;
  292. goto out_buf;
  293. }
  294. error = maple_vmu_read_block(vblock->num, buf, mtd);
  295. if (error) {
  296. ret = error;
  297. *retval = 2;
  298. goto out_vblock;
  299. }
  300. ret = buf[vblock->ofs];
  301. out_vblock:
  302. kfree(vblock);
  303. out_buf:
  304. kfree(buf);
  305. finish:
  306. return ret;
  307. }
  308. /* mtd higher order function to read flash */
  309. static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
  310. size_t *retlen, u_char *buf)
  311. {
  312. struct maple_device *mdev;
  313. struct memcard *card;
  314. struct mdev_part *mpart;
  315. struct vmu_cache *pcache;
  316. struct vmu_block *vblock;
  317. int index = 0, retval, partition, leftover, numblocks;
  318. unsigned char cx;
  319. mpart = mtd->priv;
  320. mdev = mpart->mdev;
  321. partition = mpart->partition;
  322. card = maple_get_drvdata(mdev);
  323. numblocks = card->parts[partition].numblocks;
  324. if (from + len > numblocks * card->blocklen)
  325. len = numblocks * card->blocklen - from;
  326. if (len == 0)
  327. return -EIO;
  328. /* Have we cached this bit already? */
  329. pcache = card->parts[partition].pcache;
  330. do {
  331. vblock = ofs_to_block(from + index, mtd, partition);
  332. if (!vblock)
  333. return -ENOMEM;
  334. /* Have we cached this and is the cache valid and timely? */
  335. if (pcache->valid &&
  336. time_before(jiffies, pcache->jiffies_atc + HZ) &&
  337. (pcache->block == vblock->num)) {
  338. /* we have cached it, so do necessary copying */
  339. leftover = card->blocklen - vblock->ofs;
  340. if (vblock->ofs + len - index < card->blocklen) {
  341. /* only a bit of this block to copy */
  342. memcpy(buf + index,
  343. pcache->buffer + vblock->ofs,
  344. len - index);
  345. index = len;
  346. } else {
  347. /* otherwise copy remainder of whole block */
  348. memcpy(buf + index, pcache->buffer +
  349. vblock->ofs, leftover);
  350. index += leftover;
  351. }
  352. } else {
  353. /*
  354. * Not cached so read one byte -
  355. * but cache the rest of the block
  356. */
  357. cx = vmu_flash_read_char(from + index, &retval, mtd);
  358. if (retval) {
  359. *retlen = index;
  360. kfree(vblock);
  361. return cx;
  362. }
  363. memset(buf + index, cx, 1);
  364. index++;
  365. }
  366. kfree(vblock);
  367. } while (len > index);
  368. *retlen = index;
  369. return 0;
  370. }
  371. static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
  372. size_t *retlen, const u_char *buf)
  373. {
  374. struct maple_device *mdev;
  375. struct memcard *card;
  376. struct mdev_part *mpart;
  377. int index = 0, partition, error = 0, numblocks;
  378. struct vmu_cache *pcache;
  379. struct vmu_block *vblock;
  380. unsigned char *buffer;
  381. mpart = mtd->priv;
  382. mdev = mpart->mdev;
  383. partition = mpart->partition;
  384. card = maple_get_drvdata(mdev);
  385. numblocks = card->parts[partition].numblocks;
  386. if (to + len > numblocks * card->blocklen)
  387. len = numblocks * card->blocklen - to;
  388. if (len == 0) {
  389. error = -EIO;
  390. goto failed;
  391. }
  392. vblock = ofs_to_block(to, mtd, partition);
  393. if (!vblock) {
  394. error = -ENOMEM;
  395. goto failed;
  396. }
  397. buffer = kmalloc(card->blocklen, GFP_KERNEL);
  398. if (!buffer) {
  399. error = -ENOMEM;
  400. goto fail_buffer;
  401. }
  402. do {
  403. /* Read in the block we are to write to */
  404. error = maple_vmu_read_block(vblock->num, buffer, mtd);
  405. if (error)
  406. goto fail_io;
  407. do {
  408. buffer[vblock->ofs] = buf[index];
  409. vblock->ofs++;
  410. index++;
  411. if (index >= len)
  412. break;
  413. } while (vblock->ofs < card->blocklen);
  414. /* write out new buffer */
  415. error = maple_vmu_write_block(vblock->num, buffer, mtd);
  416. /* invalidate the cache */
  417. pcache = card->parts[partition].pcache;
  418. pcache->valid = 0;
  419. if (error != card->blocklen)
  420. goto fail_io;
  421. vblock->num++;
  422. vblock->ofs = 0;
  423. } while (len > index);
  424. kfree(buffer);
  425. *retlen = index;
  426. kfree(vblock);
  427. return 0;
  428. fail_io:
  429. kfree(buffer);
  430. fail_buffer:
  431. kfree(vblock);
  432. failed:
  433. dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
  434. return error;
  435. }
  436. static void vmu_flash_sync(struct mtd_info *mtd)
  437. {
  438. /* Do nothing here */
  439. }
  440. /* Maple bus callback function to recursively query hardware details */
  441. static void vmu_queryblocks(struct mapleq *mq)
  442. {
  443. struct maple_device *mdev;
  444. unsigned short *res;
  445. struct memcard *card;
  446. __be32 partnum;
  447. struct vmu_cache *pcache;
  448. struct mdev_part *mpart;
  449. struct mtd_info *mtd_cur;
  450. struct vmupart *part_cur;
  451. int error;
  452. mdev = mq->dev;
  453. card = maple_get_drvdata(mdev);
  454. res = (unsigned short *) (mq->recvbuf->buf);
  455. card->tempA = res[12];
  456. card->tempB = res[6];
  457. dev_info(&mdev->dev, "VMU device at partition %d has %d user "
  458. "blocks with a root block at %d\n", card->partition,
  459. card->tempA, card->tempB);
  460. part_cur = &card->parts[card->partition];
  461. part_cur->user_blocks = card->tempA;
  462. part_cur->root_block = card->tempB;
  463. part_cur->numblocks = card->tempB + 1;
  464. part_cur->name = kmalloc(12, GFP_KERNEL);
  465. if (!part_cur->name)
  466. goto fail_name;
  467. sprintf(part_cur->name, "vmu%d.%d.%d",
  468. mdev->port, mdev->unit, card->partition);
  469. mtd_cur = &card->mtd[card->partition];
  470. mtd_cur->name = part_cur->name;
  471. mtd_cur->type = 8;
  472. mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
  473. mtd_cur->size = part_cur->numblocks * card->blocklen;
  474. mtd_cur->erasesize = card->blocklen;
  475. mtd_cur->_write = vmu_flash_write;
  476. mtd_cur->_read = vmu_flash_read;
  477. mtd_cur->_sync = vmu_flash_sync;
  478. mtd_cur->writesize = card->blocklen;
  479. mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
  480. if (!mpart)
  481. goto fail_mpart;
  482. mpart->mdev = mdev;
  483. mpart->partition = card->partition;
  484. mtd_cur->priv = mpart;
  485. mtd_cur->owner = THIS_MODULE;
  486. pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
  487. if (!pcache)
  488. goto fail_cache_create;
  489. part_cur->pcache = pcache;
  490. error = mtd_device_register(mtd_cur, NULL, 0);
  491. if (error)
  492. goto fail_mtd_register;
  493. maple_getcond_callback(mdev, NULL, 0,
  494. MAPLE_FUNC_MEMCARD);
  495. /*
  496. * Set up a recursive call to the (probably theoretical)
  497. * second or more partition
  498. */
  499. if (++card->partition < card->partitions) {
  500. partnum = cpu_to_be32(card->partition << 24);
  501. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  502. MAPLE_FUNC_MEMCARD);
  503. maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  504. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  505. }
  506. return;
  507. fail_mtd_register:
  508. dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
  509. "error is 0x%X\n", mdev->port, mdev->unit, error);
  510. for (error = 0; error <= card->partition; error++) {
  511. kfree(((card->parts)[error]).pcache);
  512. ((card->parts)[error]).pcache = NULL;
  513. }
  514. fail_cache_create:
  515. fail_mpart:
  516. for (error = 0; error <= card->partition; error++) {
  517. kfree(((card->mtd)[error]).priv);
  518. ((card->mtd)[error]).priv = NULL;
  519. }
  520. maple_getcond_callback(mdev, NULL, 0,
  521. MAPLE_FUNC_MEMCARD);
  522. kfree(part_cur->name);
  523. fail_name:
  524. return;
  525. }
  526. /* Handles very basic info about the flash, queries for details */
  527. static int vmu_connect(struct maple_device *mdev)
  528. {
  529. unsigned long test_flash_data, basic_flash_data;
  530. int c, error;
  531. struct memcard *card;
  532. u32 partnum = 0;
  533. test_flash_data = be32_to_cpu(mdev->devinfo.function);
  534. /* Need to count how many bits are set - to find out which
  535. * function_data element has details of the memory card
  536. */
  537. c = hweight_long(test_flash_data);
  538. basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
  539. card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
  540. if (!card) {
  541. error = -ENOMEM;
  542. goto fail_nomem;
  543. }
  544. card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
  545. card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
  546. card->writecnt = basic_flash_data >> 12 & 0xF;
  547. card->readcnt = basic_flash_data >> 8 & 0xF;
  548. card->removeable = basic_flash_data >> 7 & 1;
  549. card->partition = 0;
  550. /*
  551. * Not sure there are actually any multi-partition devices in the
  552. * real world, but the hardware supports them, so, so will we
  553. */
  554. card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
  555. GFP_KERNEL);
  556. if (!card->parts) {
  557. error = -ENOMEM;
  558. goto fail_partitions;
  559. }
  560. card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
  561. GFP_KERNEL);
  562. if (!card->mtd) {
  563. error = -ENOMEM;
  564. goto fail_mtd_info;
  565. }
  566. maple_set_drvdata(mdev, card);
  567. /*
  568. * We want to trap meminfo not get cond
  569. * so set interval to zero, but rely on maple bus
  570. * driver to pass back the results of the meminfo
  571. */
  572. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  573. MAPLE_FUNC_MEMCARD);
  574. /* Make sure we are clear to go */
  575. if (atomic_read(&mdev->busy) == 1) {
  576. wait_event_interruptible_timeout(mdev->maple_wait,
  577. atomic_read(&mdev->busy) == 0, HZ);
  578. if (atomic_read(&mdev->busy) == 1) {
  579. dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
  580. mdev->port, mdev->unit);
  581. error = -EAGAIN;
  582. goto fail_device_busy;
  583. }
  584. }
  585. atomic_set(&mdev->busy, 1);
  586. /*
  587. * Set up the minfo call: vmu_queryblocks will handle
  588. * the information passed back
  589. */
  590. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  591. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  592. if (error) {
  593. dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
  594. " error is 0x%X\n", mdev->port, mdev->unit, error);
  595. goto fail_mtd_info;
  596. }
  597. return 0;
  598. fail_device_busy:
  599. kfree(card->mtd);
  600. fail_mtd_info:
  601. kfree(card->parts);
  602. fail_partitions:
  603. kfree(card);
  604. fail_nomem:
  605. return error;
  606. }
  607. static void vmu_disconnect(struct maple_device *mdev)
  608. {
  609. struct memcard *card;
  610. struct mdev_part *mpart;
  611. int x;
  612. mdev->callback = NULL;
  613. card = maple_get_drvdata(mdev);
  614. for (x = 0; x < card->partitions; x++) {
  615. mpart = ((card->mtd)[x]).priv;
  616. mpart->mdev = NULL;
  617. mtd_device_unregister(&((card->mtd)[x]));
  618. kfree(((card->parts)[x]).name);
  619. }
  620. kfree(card->parts);
  621. kfree(card->mtd);
  622. kfree(card);
  623. }
  624. /* Callback to handle eccentricities of both mtd subsystem
  625. * and general flakyness of Dreamcast VMUs
  626. */
  627. static int vmu_can_unload(struct maple_device *mdev)
  628. {
  629. struct memcard *card;
  630. int x;
  631. struct mtd_info *mtd;
  632. card = maple_get_drvdata(mdev);
  633. for (x = 0; x < card->partitions; x++) {
  634. mtd = &((card->mtd)[x]);
  635. if (mtd->usecount > 0)
  636. return 0;
  637. }
  638. return 1;
  639. }
  640. #define ERRSTR "VMU at (%d, %d) file error -"
  641. static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
  642. {
  643. enum maple_file_errors error = ((int *)recvbuf)[1];
  644. switch (error) {
  645. case MAPLE_FILEERR_INVALID_PARTITION:
  646. dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
  647. mdev->port, mdev->unit);
  648. break;
  649. case MAPLE_FILEERR_PHASE_ERROR:
  650. dev_notice(&mdev->dev, ERRSTR " phase error\n",
  651. mdev->port, mdev->unit);
  652. break;
  653. case MAPLE_FILEERR_INVALID_BLOCK:
  654. dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
  655. mdev->port, mdev->unit);
  656. break;
  657. case MAPLE_FILEERR_WRITE_ERROR:
  658. dev_notice(&mdev->dev, ERRSTR " write error\n",
  659. mdev->port, mdev->unit);
  660. break;
  661. case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
  662. dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
  663. mdev->port, mdev->unit);
  664. break;
  665. case MAPLE_FILEERR_BAD_CRC:
  666. dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
  667. mdev->port, mdev->unit);
  668. break;
  669. default:
  670. dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
  671. mdev->port, mdev->unit, error);
  672. }
  673. }
  674. static int probe_maple_vmu(struct device *dev)
  675. {
  676. int error;
  677. struct maple_device *mdev = to_maple_dev(dev);
  678. struct maple_driver *mdrv = to_maple_driver(dev->driver);
  679. mdev->can_unload = vmu_can_unload;
  680. mdev->fileerr_handler = vmu_file_error;
  681. mdev->driver = mdrv;
  682. error = vmu_connect(mdev);
  683. if (error)
  684. return error;
  685. return 0;
  686. }
  687. static int remove_maple_vmu(struct device *dev)
  688. {
  689. struct maple_device *mdev = to_maple_dev(dev);
  690. vmu_disconnect(mdev);
  691. return 0;
  692. }
  693. static struct maple_driver vmu_flash_driver = {
  694. .function = MAPLE_FUNC_MEMCARD,
  695. .drv = {
  696. .name = "Dreamcast_visual_memory",
  697. .probe = probe_maple_vmu,
  698. .remove = remove_maple_vmu,
  699. },
  700. };
  701. static int __init vmu_flash_map_init(void)
  702. {
  703. return maple_driver_register(&vmu_flash_driver);
  704. }
  705. static void __exit vmu_flash_map_exit(void)
  706. {
  707. maple_driver_unregister(&vmu_flash_driver);
  708. }
  709. module_init(vmu_flash_map_init);
  710. module_exit(vmu_flash_map_exit);
  711. MODULE_LICENSE("GPL");
  712. MODULE_AUTHOR("Adrian McMenamin");
  713. MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");