scm_blk.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. * Block driver for s390 storage class memory.
  3. *
  4. * Copyright IBM Corp. 2012
  5. * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "scm_block"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/interrupt.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/mempool.h>
  12. #include <linux/module.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/genhd.h>
  15. #include <linux/slab.h>
  16. #include <linux/list.h>
  17. #include <asm/eadm.h>
  18. #include "scm_blk.h"
  19. debug_info_t *scm_debug;
  20. static int scm_major;
  21. static mempool_t *aidaw_pool;
  22. static DEFINE_SPINLOCK(list_lock);
  23. static LIST_HEAD(inactive_requests);
  24. static unsigned int nr_requests = 64;
  25. static unsigned int nr_requests_per_io = 8;
  26. static atomic_t nr_devices = ATOMIC_INIT(0);
  27. module_param(nr_requests, uint, S_IRUGO);
  28. MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
  29. module_param(nr_requests_per_io, uint, S_IRUGO);
  30. MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
  31. MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
  32. MODULE_LICENSE("GPL");
  33. MODULE_ALIAS("scm:scmdev*");
  34. static void __scm_free_rq(struct scm_request *scmrq)
  35. {
  36. struct aob_rq_header *aobrq = to_aobrq(scmrq);
  37. free_page((unsigned long) scmrq->aob);
  38. __scm_free_rq_cluster(scmrq);
  39. kfree(scmrq->request);
  40. kfree(aobrq);
  41. }
  42. static void scm_free_rqs(void)
  43. {
  44. struct list_head *iter, *safe;
  45. struct scm_request *scmrq;
  46. spin_lock_irq(&list_lock);
  47. list_for_each_safe(iter, safe, &inactive_requests) {
  48. scmrq = list_entry(iter, struct scm_request, list);
  49. list_del(&scmrq->list);
  50. __scm_free_rq(scmrq);
  51. }
  52. spin_unlock_irq(&list_lock);
  53. mempool_destroy(aidaw_pool);
  54. }
  55. static int __scm_alloc_rq(void)
  56. {
  57. struct aob_rq_header *aobrq;
  58. struct scm_request *scmrq;
  59. aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
  60. if (!aobrq)
  61. return -ENOMEM;
  62. scmrq = (void *) aobrq->data;
  63. scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
  64. if (!scmrq->aob)
  65. goto free;
  66. scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
  67. GFP_KERNEL);
  68. if (!scmrq->request)
  69. goto free;
  70. if (__scm_alloc_rq_cluster(scmrq))
  71. goto free;
  72. INIT_LIST_HEAD(&scmrq->list);
  73. spin_lock_irq(&list_lock);
  74. list_add(&scmrq->list, &inactive_requests);
  75. spin_unlock_irq(&list_lock);
  76. return 0;
  77. free:
  78. __scm_free_rq(scmrq);
  79. return -ENOMEM;
  80. }
  81. static int scm_alloc_rqs(unsigned int nrqs)
  82. {
  83. int ret = 0;
  84. aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
  85. if (!aidaw_pool)
  86. return -ENOMEM;
  87. while (nrqs-- && !ret)
  88. ret = __scm_alloc_rq();
  89. return ret;
  90. }
  91. static struct scm_request *scm_request_fetch(void)
  92. {
  93. struct scm_request *scmrq = NULL;
  94. spin_lock(&list_lock);
  95. if (list_empty(&inactive_requests))
  96. goto out;
  97. scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
  98. list_del(&scmrq->list);
  99. out:
  100. spin_unlock(&list_lock);
  101. return scmrq;
  102. }
  103. static void scm_request_done(struct scm_request *scmrq)
  104. {
  105. unsigned long flags;
  106. struct msb *msb;
  107. u64 aidaw;
  108. int i;
  109. for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
  110. msb = &scmrq->aob->msb[i];
  111. aidaw = msb->data_addr;
  112. if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
  113. IS_ALIGNED(aidaw, PAGE_SIZE))
  114. mempool_free(virt_to_page(aidaw), aidaw_pool);
  115. }
  116. spin_lock_irqsave(&list_lock, flags);
  117. list_add(&scmrq->list, &inactive_requests);
  118. spin_unlock_irqrestore(&list_lock, flags);
  119. }
  120. static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
  121. {
  122. return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
  123. }
  124. static inline struct aidaw *scm_aidaw_alloc(void)
  125. {
  126. struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
  127. return page ? page_address(page) : NULL;
  128. }
  129. static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
  130. {
  131. unsigned long _aidaw = (unsigned long) aidaw;
  132. unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
  133. return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
  134. }
  135. struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
  136. {
  137. struct aidaw *aidaw;
  138. if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
  139. return scmrq->next_aidaw;
  140. aidaw = scm_aidaw_alloc();
  141. if (aidaw)
  142. memset(aidaw, 0, PAGE_SIZE);
  143. return aidaw;
  144. }
  145. static int scm_request_prepare(struct scm_request *scmrq)
  146. {
  147. struct scm_blk_dev *bdev = scmrq->bdev;
  148. struct scm_device *scmdev = bdev->gendisk->private_data;
  149. int pos = scmrq->aob->request.msb_count;
  150. struct msb *msb = &scmrq->aob->msb[pos];
  151. struct request *req = scmrq->request[pos];
  152. struct req_iterator iter;
  153. struct aidaw *aidaw;
  154. struct bio_vec bv;
  155. aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
  156. if (!aidaw)
  157. return -ENOMEM;
  158. msb->bs = MSB_BS_4K;
  159. scmrq->aob->request.msb_count++;
  160. msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
  161. msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
  162. msb->flags |= MSB_FLAG_IDA;
  163. msb->data_addr = (u64) aidaw;
  164. rq_for_each_segment(bv, req, iter) {
  165. WARN_ON(bv.bv_offset);
  166. msb->blk_count += bv.bv_len >> 12;
  167. aidaw->data_addr = (u64) page_address(bv.bv_page);
  168. aidaw++;
  169. }
  170. scmrq->next_aidaw = aidaw;
  171. return 0;
  172. }
  173. static inline void scm_request_set(struct scm_request *scmrq,
  174. struct request *req)
  175. {
  176. scmrq->request[scmrq->aob->request.msb_count] = req;
  177. }
  178. static inline void scm_request_init(struct scm_blk_dev *bdev,
  179. struct scm_request *scmrq)
  180. {
  181. struct aob_rq_header *aobrq = to_aobrq(scmrq);
  182. struct aob *aob = scmrq->aob;
  183. memset(scmrq->request, 0,
  184. nr_requests_per_io * sizeof(scmrq->request[0]));
  185. memset(aob, 0, sizeof(*aob));
  186. aobrq->scmdev = bdev->scmdev;
  187. aob->request.cmd_code = ARQB_CMD_MOVE;
  188. aob->request.data = (u64) aobrq;
  189. scmrq->bdev = bdev;
  190. scmrq->retries = 4;
  191. scmrq->error = 0;
  192. /* We don't use all msbs - place aidaws at the end of the aob page. */
  193. scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
  194. scm_request_cluster_init(scmrq);
  195. }
  196. static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
  197. {
  198. if (atomic_read(&bdev->queued_reqs)) {
  199. /* Queue restart is triggered by the next interrupt. */
  200. return;
  201. }
  202. blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
  203. }
  204. void scm_request_requeue(struct scm_request *scmrq)
  205. {
  206. struct scm_blk_dev *bdev = scmrq->bdev;
  207. int i;
  208. scm_release_cluster(scmrq);
  209. for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
  210. blk_requeue_request(bdev->rq, scmrq->request[i]);
  211. atomic_dec(&bdev->queued_reqs);
  212. scm_request_done(scmrq);
  213. scm_ensure_queue_restart(bdev);
  214. }
  215. void scm_request_finish(struct scm_request *scmrq)
  216. {
  217. struct scm_blk_dev *bdev = scmrq->bdev;
  218. int i;
  219. scm_release_cluster(scmrq);
  220. for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
  221. blk_end_request_all(scmrq->request[i], scmrq->error);
  222. atomic_dec(&bdev->queued_reqs);
  223. scm_request_done(scmrq);
  224. }
  225. static int scm_request_start(struct scm_request *scmrq)
  226. {
  227. struct scm_blk_dev *bdev = scmrq->bdev;
  228. int ret;
  229. atomic_inc(&bdev->queued_reqs);
  230. if (!scmrq->aob->request.msb_count) {
  231. scm_request_requeue(scmrq);
  232. return -EINVAL;
  233. }
  234. ret = eadm_start_aob(scmrq->aob);
  235. if (ret) {
  236. SCM_LOG(5, "no subchannel");
  237. scm_request_requeue(scmrq);
  238. }
  239. return ret;
  240. }
  241. static void scm_blk_request(struct request_queue *rq)
  242. {
  243. struct scm_device *scmdev = rq->queuedata;
  244. struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
  245. struct scm_request *scmrq = NULL;
  246. struct request *req;
  247. while ((req = blk_peek_request(rq))) {
  248. if (req->cmd_type != REQ_TYPE_FS) {
  249. blk_start_request(req);
  250. blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
  251. blk_end_request_all(req, -EIO);
  252. continue;
  253. }
  254. if (!scm_permit_request(bdev, req))
  255. goto out;
  256. if (!scmrq) {
  257. scmrq = scm_request_fetch();
  258. if (!scmrq) {
  259. SCM_LOG(5, "no request");
  260. goto out;
  261. }
  262. scm_request_init(bdev, scmrq);
  263. }
  264. scm_request_set(scmrq, req);
  265. if (!scm_reserve_cluster(scmrq)) {
  266. SCM_LOG(5, "cluster busy");
  267. scm_request_set(scmrq, NULL);
  268. if (scmrq->aob->request.msb_count)
  269. goto out;
  270. scm_request_done(scmrq);
  271. return;
  272. }
  273. if (scm_need_cluster_request(scmrq)) {
  274. if (scmrq->aob->request.msb_count) {
  275. /* Start cluster requests separately. */
  276. scm_request_set(scmrq, NULL);
  277. if (scm_request_start(scmrq))
  278. return;
  279. } else {
  280. atomic_inc(&bdev->queued_reqs);
  281. blk_start_request(req);
  282. scm_initiate_cluster_request(scmrq);
  283. }
  284. scmrq = NULL;
  285. continue;
  286. }
  287. if (scm_request_prepare(scmrq)) {
  288. SCM_LOG(5, "aidaw alloc failed");
  289. scm_request_set(scmrq, NULL);
  290. goto out;
  291. }
  292. blk_start_request(req);
  293. if (scmrq->aob->request.msb_count < nr_requests_per_io)
  294. continue;
  295. if (scm_request_start(scmrq))
  296. return;
  297. scmrq = NULL;
  298. }
  299. out:
  300. if (scmrq)
  301. scm_request_start(scmrq);
  302. else
  303. scm_ensure_queue_restart(bdev);
  304. }
  305. static void __scmrq_log_error(struct scm_request *scmrq)
  306. {
  307. struct aob *aob = scmrq->aob;
  308. if (scmrq->error == -ETIMEDOUT)
  309. SCM_LOG(1, "Request timeout");
  310. else {
  311. SCM_LOG(1, "Request error");
  312. SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
  313. }
  314. if (scmrq->retries)
  315. SCM_LOG(1, "Retry request");
  316. else
  317. pr_err("An I/O operation to SCM failed with rc=%d\n",
  318. scmrq->error);
  319. }
  320. void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
  321. {
  322. struct scm_request *scmrq = data;
  323. struct scm_blk_dev *bdev = scmrq->bdev;
  324. scmrq->error = error;
  325. if (error)
  326. __scmrq_log_error(scmrq);
  327. spin_lock(&bdev->lock);
  328. list_add_tail(&scmrq->list, &bdev->finished_requests);
  329. spin_unlock(&bdev->lock);
  330. tasklet_hi_schedule(&bdev->tasklet);
  331. }
  332. static void scm_blk_handle_error(struct scm_request *scmrq)
  333. {
  334. struct scm_blk_dev *bdev = scmrq->bdev;
  335. unsigned long flags;
  336. if (scmrq->error != -EIO)
  337. goto restart;
  338. /* For -EIO the response block is valid. */
  339. switch (scmrq->aob->response.eqc) {
  340. case EQC_WR_PROHIBIT:
  341. spin_lock_irqsave(&bdev->lock, flags);
  342. if (bdev->state != SCM_WR_PROHIBIT)
  343. pr_info("%lx: Write access to the SCM increment is suspended\n",
  344. (unsigned long) bdev->scmdev->address);
  345. bdev->state = SCM_WR_PROHIBIT;
  346. spin_unlock_irqrestore(&bdev->lock, flags);
  347. goto requeue;
  348. default:
  349. break;
  350. }
  351. restart:
  352. if (!eadm_start_aob(scmrq->aob))
  353. return;
  354. requeue:
  355. spin_lock_irqsave(&bdev->rq_lock, flags);
  356. scm_request_requeue(scmrq);
  357. spin_unlock_irqrestore(&bdev->rq_lock, flags);
  358. }
  359. static void scm_blk_tasklet(struct scm_blk_dev *bdev)
  360. {
  361. struct scm_request *scmrq;
  362. unsigned long flags;
  363. spin_lock_irqsave(&bdev->lock, flags);
  364. while (!list_empty(&bdev->finished_requests)) {
  365. scmrq = list_first_entry(&bdev->finished_requests,
  366. struct scm_request, list);
  367. list_del(&scmrq->list);
  368. spin_unlock_irqrestore(&bdev->lock, flags);
  369. if (scmrq->error && scmrq->retries-- > 0) {
  370. scm_blk_handle_error(scmrq);
  371. /* Request restarted or requeued, handle next. */
  372. spin_lock_irqsave(&bdev->lock, flags);
  373. continue;
  374. }
  375. if (scm_test_cluster_request(scmrq)) {
  376. scm_cluster_request_irq(scmrq);
  377. spin_lock_irqsave(&bdev->lock, flags);
  378. continue;
  379. }
  380. scm_request_finish(scmrq);
  381. spin_lock_irqsave(&bdev->lock, flags);
  382. }
  383. spin_unlock_irqrestore(&bdev->lock, flags);
  384. /* Look out for more requests. */
  385. blk_run_queue(bdev->rq);
  386. }
  387. static const struct block_device_operations scm_blk_devops = {
  388. .owner = THIS_MODULE,
  389. };
  390. int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
  391. {
  392. struct request_queue *rq;
  393. int len, ret = -ENOMEM;
  394. unsigned int devindex, nr_max_blk;
  395. devindex = atomic_inc_return(&nr_devices) - 1;
  396. /* scma..scmz + scmaa..scmzz */
  397. if (devindex > 701) {
  398. ret = -ENODEV;
  399. goto out;
  400. }
  401. bdev->scmdev = scmdev;
  402. bdev->state = SCM_OPER;
  403. spin_lock_init(&bdev->rq_lock);
  404. spin_lock_init(&bdev->lock);
  405. INIT_LIST_HEAD(&bdev->finished_requests);
  406. atomic_set(&bdev->queued_reqs, 0);
  407. tasklet_init(&bdev->tasklet,
  408. (void (*)(unsigned long)) scm_blk_tasklet,
  409. (unsigned long) bdev);
  410. rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
  411. if (!rq)
  412. goto out;
  413. bdev->rq = rq;
  414. nr_max_blk = min(scmdev->nr_max_block,
  415. (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
  416. blk_queue_logical_block_size(rq, 1 << 12);
  417. blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
  418. blk_queue_max_segments(rq, nr_max_blk);
  419. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
  420. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
  421. scm_blk_dev_cluster_setup(bdev);
  422. bdev->gendisk = alloc_disk(SCM_NR_PARTS);
  423. if (!bdev->gendisk)
  424. goto out_queue;
  425. rq->queuedata = scmdev;
  426. bdev->gendisk->driverfs_dev = &scmdev->dev;
  427. bdev->gendisk->private_data = scmdev;
  428. bdev->gendisk->fops = &scm_blk_devops;
  429. bdev->gendisk->queue = rq;
  430. bdev->gendisk->major = scm_major;
  431. bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
  432. len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
  433. if (devindex > 25) {
  434. len += snprintf(bdev->gendisk->disk_name + len,
  435. DISK_NAME_LEN - len, "%c",
  436. 'a' + (devindex / 26) - 1);
  437. devindex = devindex % 26;
  438. }
  439. snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
  440. 'a' + devindex);
  441. /* 512 byte sectors */
  442. set_capacity(bdev->gendisk, scmdev->size >> 9);
  443. add_disk(bdev->gendisk);
  444. return 0;
  445. out_queue:
  446. blk_cleanup_queue(rq);
  447. out:
  448. atomic_dec(&nr_devices);
  449. return ret;
  450. }
  451. void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
  452. {
  453. tasklet_kill(&bdev->tasklet);
  454. del_gendisk(bdev->gendisk);
  455. blk_cleanup_queue(bdev->gendisk->queue);
  456. put_disk(bdev->gendisk);
  457. }
  458. void scm_blk_set_available(struct scm_blk_dev *bdev)
  459. {
  460. unsigned long flags;
  461. spin_lock_irqsave(&bdev->lock, flags);
  462. if (bdev->state == SCM_WR_PROHIBIT)
  463. pr_info("%lx: Write access to the SCM increment is restored\n",
  464. (unsigned long) bdev->scmdev->address);
  465. bdev->state = SCM_OPER;
  466. spin_unlock_irqrestore(&bdev->lock, flags);
  467. }
  468. static bool __init scm_blk_params_valid(void)
  469. {
  470. if (!nr_requests_per_io || nr_requests_per_io > 64)
  471. return false;
  472. return scm_cluster_size_valid();
  473. }
  474. static int __init scm_blk_init(void)
  475. {
  476. int ret = -EINVAL;
  477. if (!scm_blk_params_valid())
  478. goto out;
  479. ret = register_blkdev(0, "scm");
  480. if (ret < 0)
  481. goto out;
  482. scm_major = ret;
  483. ret = scm_alloc_rqs(nr_requests);
  484. if (ret)
  485. goto out_free;
  486. scm_debug = debug_register("scm_log", 16, 1, 16);
  487. if (!scm_debug) {
  488. ret = -ENOMEM;
  489. goto out_free;
  490. }
  491. debug_register_view(scm_debug, &debug_hex_ascii_view);
  492. debug_set_level(scm_debug, 2);
  493. ret = scm_drv_init();
  494. if (ret)
  495. goto out_dbf;
  496. return ret;
  497. out_dbf:
  498. debug_unregister(scm_debug);
  499. out_free:
  500. scm_free_rqs();
  501. unregister_blkdev(scm_major, "scm");
  502. out:
  503. return ret;
  504. }
  505. module_init(scm_blk_init);
  506. static void __exit scm_blk_cleanup(void)
  507. {
  508. scm_drv_cleanup();
  509. debug_unregister(scm_debug);
  510. scm_free_rqs();
  511. unregister_blkdev(scm_major, "scm");
  512. }
  513. module_exit(scm_blk_cleanup);