zfcp_qdio.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. /*
  2. * zfcp device driver
  3. *
  4. * Setup and helper functions to access QDIO.
  5. *
  6. * Copyright IBM Corp. 2002, 2010
  7. */
  8. #define KMSG_COMPONENT "zfcp"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include "zfcp_ext.h"
  13. #include "zfcp_qdio.h"
  14. static bool enable_multibuffer = 1;
  15. module_param_named(datarouter, enable_multibuffer, bool, 0400);
  16. MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
  17. static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
  18. unsigned int qdio_err)
  19. {
  20. struct zfcp_adapter *adapter = qdio->adapter;
  21. dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
  22. if (qdio_err & QDIO_ERROR_SLSB_STATE) {
  23. zfcp_qdio_siosl(adapter);
  24. zfcp_erp_adapter_shutdown(adapter, 0, id);
  25. return;
  26. }
  27. zfcp_erp_adapter_reopen(adapter,
  28. ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
  29. ZFCP_STATUS_COMMON_ERP_FAILED, id);
  30. }
  31. static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
  32. {
  33. int i, sbal_idx;
  34. for (i = first; i < first + cnt; i++) {
  35. sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
  36. memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
  37. }
  38. }
  39. /* this needs to be called prior to updating the queue fill level */
  40. static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
  41. {
  42. unsigned long long now, span;
  43. int used;
  44. now = get_tod_clock_monotonic();
  45. span = (now - qdio->req_q_time) >> 12;
  46. used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
  47. qdio->req_q_util += used * span;
  48. qdio->req_q_time = now;
  49. }
  50. static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
  51. int queue_no, int idx, int count,
  52. unsigned long parm)
  53. {
  54. struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  55. if (unlikely(qdio_err)) {
  56. zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
  57. return;
  58. }
  59. /* cleanup all SBALs being program-owned now */
  60. zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
  61. spin_lock_irq(&qdio->stat_lock);
  62. zfcp_qdio_account(qdio);
  63. spin_unlock_irq(&qdio->stat_lock);
  64. atomic_add(count, &qdio->req_q_free);
  65. wake_up(&qdio->req_q_wq);
  66. }
  67. static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
  68. int queue_no, int idx, int count,
  69. unsigned long parm)
  70. {
  71. struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  72. struct zfcp_adapter *adapter = qdio->adapter;
  73. int sbal_no, sbal_idx;
  74. if (unlikely(qdio_err)) {
  75. if (zfcp_adapter_multi_buffer_active(adapter)) {
  76. void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
  77. struct qdio_buffer_element *sbale;
  78. u64 req_id;
  79. u8 scount;
  80. memset(pl, 0,
  81. ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
  82. sbale = qdio->res_q[idx]->element;
  83. req_id = (u64) sbale->addr;
  84. scount = min(sbale->scount + 1,
  85. ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
  86. /* incl. signaling SBAL */
  87. for (sbal_no = 0; sbal_no < scount; sbal_no++) {
  88. sbal_idx = (idx + sbal_no) %
  89. QDIO_MAX_BUFFERS_PER_Q;
  90. pl[sbal_no] = qdio->res_q[sbal_idx];
  91. }
  92. zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
  93. }
  94. zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
  95. return;
  96. }
  97. /*
  98. * go through all SBALs from input queue currently
  99. * returned by QDIO layer
  100. */
  101. for (sbal_no = 0; sbal_no < count; sbal_no++) {
  102. sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
  103. /* go through all SBALEs of SBAL */
  104. zfcp_fsf_reqid_check(qdio, sbal_idx);
  105. }
  106. /*
  107. * put SBALs back to response queue
  108. */
  109. if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
  110. zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
  111. }
  112. static struct qdio_buffer_element *
  113. zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  114. {
  115. struct qdio_buffer_element *sbale;
  116. /* set last entry flag in current SBALE of current SBAL */
  117. sbale = zfcp_qdio_sbale_curr(qdio, q_req);
  118. sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
  119. /* don't exceed last allowed SBAL */
  120. if (q_req->sbal_last == q_req->sbal_limit)
  121. return NULL;
  122. /* set chaining flag in first SBALE of current SBAL */
  123. sbale = zfcp_qdio_sbale_req(qdio, q_req);
  124. sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
  125. /* calculate index of next SBAL */
  126. q_req->sbal_last++;
  127. q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
  128. /* keep this requests number of SBALs up-to-date */
  129. q_req->sbal_number++;
  130. BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
  131. /* start at first SBALE of new SBAL */
  132. q_req->sbale_curr = 0;
  133. /* set storage-block type for new SBAL */
  134. sbale = zfcp_qdio_sbale_curr(qdio, q_req);
  135. sbale->sflags |= q_req->sbtype;
  136. return sbale;
  137. }
  138. static struct qdio_buffer_element *
  139. zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  140. {
  141. if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
  142. return zfcp_qdio_sbal_chain(qdio, q_req);
  143. q_req->sbale_curr++;
  144. return zfcp_qdio_sbale_curr(qdio, q_req);
  145. }
  146. /**
  147. * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
  148. * @qdio: pointer to struct zfcp_qdio
  149. * @q_req: pointer to struct zfcp_qdio_req
  150. * @sg: scatter-gather list
  151. * @max_sbals: upper bound for number of SBALs to be used
  152. * Returns: zero or -EINVAL on error
  153. */
  154. int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
  155. struct scatterlist *sg)
  156. {
  157. struct qdio_buffer_element *sbale;
  158. /* set storage-block type for this request */
  159. sbale = zfcp_qdio_sbale_req(qdio, q_req);
  160. sbale->sflags |= q_req->sbtype;
  161. for (; sg; sg = sg_next(sg)) {
  162. sbale = zfcp_qdio_sbale_next(qdio, q_req);
  163. if (!sbale) {
  164. atomic_inc(&qdio->req_q_full);
  165. zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
  166. q_req->sbal_number);
  167. return -EINVAL;
  168. }
  169. sbale->addr = sg_virt(sg);
  170. sbale->length = sg->length;
  171. }
  172. return 0;
  173. }
  174. static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
  175. {
  176. if (atomic_read(&qdio->req_q_free) ||
  177. !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  178. return 1;
  179. return 0;
  180. }
  181. /**
  182. * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
  183. * @qdio: pointer to struct zfcp_qdio
  184. *
  185. * The req_q_lock must be held by the caller of this function, and
  186. * this function may only be called from process context; it will
  187. * sleep when waiting for a free sbal.
  188. *
  189. * Returns: 0 on success, -EIO if there is no free sbal after waiting.
  190. */
  191. int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
  192. {
  193. long ret;
  194. ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
  195. zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
  196. if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  197. return -EIO;
  198. if (ret > 0)
  199. return 0;
  200. if (!ret) {
  201. atomic_inc(&qdio->req_q_full);
  202. /* assume hanging outbound queue, try queue recovery */
  203. zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
  204. }
  205. return -EIO;
  206. }
  207. /**
  208. * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
  209. * @qdio: pointer to struct zfcp_qdio
  210. * @q_req: pointer to struct zfcp_qdio_req
  211. * Returns: 0 on success, error otherwise
  212. */
  213. int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  214. {
  215. int retval;
  216. u8 sbal_number = q_req->sbal_number;
  217. spin_lock(&qdio->stat_lock);
  218. zfcp_qdio_account(qdio);
  219. spin_unlock(&qdio->stat_lock);
  220. retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
  221. q_req->sbal_first, sbal_number);
  222. if (unlikely(retval)) {
  223. zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
  224. sbal_number);
  225. return retval;
  226. }
  227. /* account for transferred buffers */
  228. atomic_sub(sbal_number, &qdio->req_q_free);
  229. qdio->req_q_idx += sbal_number;
  230. qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
  231. return 0;
  232. }
  233. static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
  234. struct zfcp_qdio *qdio)
  235. {
  236. memset(id, 0, sizeof(*id));
  237. id->cdev = qdio->adapter->ccw_device;
  238. id->q_format = QDIO_ZFCP_QFMT;
  239. memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
  240. ASCEBC(id->adapter_name, 8);
  241. id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
  242. if (enable_multibuffer)
  243. id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
  244. id->no_input_qs = 1;
  245. id->no_output_qs = 1;
  246. id->input_handler = zfcp_qdio_int_resp;
  247. id->output_handler = zfcp_qdio_int_req;
  248. id->int_parm = (unsigned long) qdio;
  249. id->input_sbal_addr_array = (void **) (qdio->res_q);
  250. id->output_sbal_addr_array = (void **) (qdio->req_q);
  251. id->scan_threshold =
  252. QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
  253. }
  254. /**
  255. * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
  256. * @adapter: pointer to struct zfcp_adapter
  257. * Returns: -ENOMEM on memory allocation error or return value from
  258. * qdio_allocate
  259. */
  260. static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
  261. {
  262. struct qdio_initialize init_data;
  263. int ret;
  264. ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  265. if (ret)
  266. return -ENOMEM;
  267. ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  268. if (ret)
  269. goto free_req_q;
  270. zfcp_qdio_setup_init_data(&init_data, qdio);
  271. init_waitqueue_head(&qdio->req_q_wq);
  272. ret = qdio_allocate(&init_data);
  273. if (ret)
  274. goto free_res_q;
  275. return 0;
  276. free_res_q:
  277. qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  278. free_req_q:
  279. qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  280. return ret;
  281. }
  282. /**
  283. * zfcp_close_qdio - close qdio queues for an adapter
  284. * @qdio: pointer to structure zfcp_qdio
  285. */
  286. void zfcp_qdio_close(struct zfcp_qdio *qdio)
  287. {
  288. struct zfcp_adapter *adapter = qdio->adapter;
  289. int idx, count;
  290. if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  291. return;
  292. /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
  293. spin_lock_irq(&qdio->req_q_lock);
  294. atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
  295. spin_unlock_irq(&qdio->req_q_lock);
  296. wake_up(&qdio->req_q_wq);
  297. qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
  298. /* cleanup used outbound sbals */
  299. count = atomic_read(&qdio->req_q_free);
  300. if (count < QDIO_MAX_BUFFERS_PER_Q) {
  301. idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
  302. count = QDIO_MAX_BUFFERS_PER_Q - count;
  303. zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
  304. }
  305. qdio->req_q_idx = 0;
  306. atomic_set(&qdio->req_q_free, 0);
  307. }
  308. /**
  309. * zfcp_qdio_open - prepare and initialize response queue
  310. * @qdio: pointer to struct zfcp_qdio
  311. * Returns: 0 on success, otherwise -EIO
  312. */
  313. int zfcp_qdio_open(struct zfcp_qdio *qdio)
  314. {
  315. struct qdio_buffer_element *sbale;
  316. struct qdio_initialize init_data;
  317. struct zfcp_adapter *adapter = qdio->adapter;
  318. struct ccw_device *cdev = adapter->ccw_device;
  319. struct qdio_ssqd_desc ssqd;
  320. int cc;
  321. if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
  322. return -EIO;
  323. atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
  324. &qdio->adapter->status);
  325. zfcp_qdio_setup_init_data(&init_data, qdio);
  326. if (qdio_establish(&init_data))
  327. goto failed_establish;
  328. if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
  329. goto failed_qdio;
  330. if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
  331. atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
  332. &qdio->adapter->status);
  333. if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
  334. atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
  335. qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
  336. } else {
  337. atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
  338. qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
  339. }
  340. qdio->max_sbale_per_req =
  341. ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
  342. - 2;
  343. if (qdio_activate(cdev))
  344. goto failed_qdio;
  345. for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
  346. sbale = &(qdio->res_q[cc]->element[0]);
  347. sbale->length = 0;
  348. sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
  349. sbale->sflags = 0;
  350. sbale->addr = NULL;
  351. }
  352. if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
  353. goto failed_qdio;
  354. /* set index of first available SBALS / number of available SBALS */
  355. qdio->req_q_idx = 0;
  356. atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
  357. atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
  358. if (adapter->scsi_host) {
  359. adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
  360. adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
  361. }
  362. return 0;
  363. failed_qdio:
  364. qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
  365. failed_establish:
  366. dev_err(&cdev->dev,
  367. "Setting up the QDIO connection to the FCP adapter failed\n");
  368. return -EIO;
  369. }
  370. void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
  371. {
  372. if (!qdio)
  373. return;
  374. if (qdio->adapter->ccw_device)
  375. qdio_free(qdio->adapter->ccw_device);
  376. qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  377. qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  378. kfree(qdio);
  379. }
  380. int zfcp_qdio_setup(struct zfcp_adapter *adapter)
  381. {
  382. struct zfcp_qdio *qdio;
  383. qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
  384. if (!qdio)
  385. return -ENOMEM;
  386. qdio->adapter = adapter;
  387. if (zfcp_qdio_allocate(qdio)) {
  388. kfree(qdio);
  389. return -ENOMEM;
  390. }
  391. spin_lock_init(&qdio->req_q_lock);
  392. spin_lock_init(&qdio->stat_lock);
  393. adapter->qdio = qdio;
  394. return 0;
  395. }
  396. /**
  397. * zfcp_qdio_siosl - Trigger logging in FCP channel
  398. * @adapter: The zfcp_adapter where to trigger logging
  399. *
  400. * Call the cio siosl function to trigger hardware logging. This
  401. * wrapper function sets a flag to ensure hardware logging is only
  402. * triggered once before going through qdio shutdown.
  403. *
  404. * The triggers are always run from qdio tasklet context, so no
  405. * additional synchronization is necessary.
  406. */
  407. void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
  408. {
  409. int rc;
  410. if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
  411. return;
  412. rc = ccw_device_siosl(adapter->ccw_device);
  413. if (!rc)
  414. atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
  415. &adapter->status);
  416. }