device_fsm.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. /*
  2. * finite state machine for device handling
  3. *
  4. * Copyright IBM Corp. 2002, 2008
  5. * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/jiffies.h>
  11. #include <linux/string.h>
  12. #include <asm/ccwdev.h>
  13. #include <asm/cio.h>
  14. #include <asm/chpid.h>
  15. #include "cio.h"
  16. #include "cio_debug.h"
  17. #include "css.h"
  18. #include "device.h"
  19. #include "chsc.h"
  20. #include "ioasm.h"
  21. #include "chp.h"
  22. static int timeout_log_enabled;
  23. static int __init ccw_timeout_log_setup(char *unused)
  24. {
  25. timeout_log_enabled = 1;
  26. return 1;
  27. }
  28. __setup("ccw_timeout_log", ccw_timeout_log_setup);
  29. static void ccw_timeout_log(struct ccw_device *cdev)
  30. {
  31. struct schib schib;
  32. struct subchannel *sch;
  33. struct io_subchannel_private *private;
  34. union orb *orb;
  35. int cc;
  36. sch = to_subchannel(cdev->dev.parent);
  37. private = to_io_private(sch);
  38. orb = &private->orb;
  39. cc = stsch_err(sch->schid, &schib);
  40. printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
  41. "device information:\n", get_tod_clock());
  42. printk(KERN_WARNING "cio: orb:\n");
  43. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  44. orb, sizeof(*orb), 0);
  45. printk(KERN_WARNING "cio: ccw device bus id: %s\n",
  46. dev_name(&cdev->dev));
  47. printk(KERN_WARNING "cio: subchannel bus id: %s\n",
  48. dev_name(&sch->dev));
  49. printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
  50. "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
  51. if (orb->tm.b) {
  52. printk(KERN_WARNING "cio: orb indicates transport mode\n");
  53. printk(KERN_WARNING "cio: last tcw:\n");
  54. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  55. (void *)(addr_t)orb->tm.tcw,
  56. sizeof(struct tcw), 0);
  57. } else {
  58. printk(KERN_WARNING "cio: orb indicates command mode\n");
  59. if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
  60. (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
  61. printk(KERN_WARNING "cio: last channel program "
  62. "(intern):\n");
  63. else
  64. printk(KERN_WARNING "cio: last channel program:\n");
  65. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  66. (void *)(addr_t)orb->cmd.cpa,
  67. sizeof(struct ccw1), 0);
  68. }
  69. printk(KERN_WARNING "cio: ccw device state: %d\n",
  70. cdev->private->state);
  71. printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
  72. printk(KERN_WARNING "cio: schib:\n");
  73. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  74. &schib, sizeof(schib), 0);
  75. printk(KERN_WARNING "cio: ccw device flags:\n");
  76. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  77. &cdev->private->flags, sizeof(cdev->private->flags), 0);
  78. }
  79. /*
  80. * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
  81. */
  82. static void
  83. ccw_device_timeout(unsigned long data)
  84. {
  85. struct ccw_device *cdev;
  86. cdev = (struct ccw_device *) data;
  87. spin_lock_irq(cdev->ccwlock);
  88. if (timeout_log_enabled)
  89. ccw_timeout_log(cdev);
  90. dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
  91. spin_unlock_irq(cdev->ccwlock);
  92. }
  93. /*
  94. * Set timeout
  95. */
  96. void
  97. ccw_device_set_timeout(struct ccw_device *cdev, int expires)
  98. {
  99. if (expires == 0) {
  100. del_timer(&cdev->private->timer);
  101. return;
  102. }
  103. if (timer_pending(&cdev->private->timer)) {
  104. if (mod_timer(&cdev->private->timer, jiffies + expires))
  105. return;
  106. }
  107. cdev->private->timer.function = ccw_device_timeout;
  108. cdev->private->timer.data = (unsigned long) cdev;
  109. cdev->private->timer.expires = jiffies + expires;
  110. add_timer(&cdev->private->timer);
  111. }
  112. /*
  113. * Cancel running i/o. This is called repeatedly since halt/clear are
  114. * asynchronous operations. We do one try with cio_cancel, two tries
  115. * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
  116. * Returns 0 if device now idle, -ENODEV for device not operational and
  117. * -EBUSY if an interrupt is expected (either from halt/clear or from a
  118. * status pending).
  119. */
  120. int
  121. ccw_device_cancel_halt_clear(struct ccw_device *cdev)
  122. {
  123. struct subchannel *sch;
  124. int ret;
  125. sch = to_subchannel(cdev->dev.parent);
  126. if (cio_update_schib(sch))
  127. return -ENODEV;
  128. if (!sch->schib.pmcw.ena)
  129. /* Not operational -> done. */
  130. return 0;
  131. /* Stage 1: cancel io. */
  132. if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
  133. !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
  134. if (!scsw_is_tm(&sch->schib.scsw)) {
  135. ret = cio_cancel(sch);
  136. if (ret != -EINVAL)
  137. return ret;
  138. }
  139. /* cancel io unsuccessful or not applicable (transport mode).
  140. * Continue with asynchronous instructions. */
  141. cdev->private->iretry = 3; /* 3 halt retries. */
  142. }
  143. if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
  144. /* Stage 2: halt io. */
  145. if (cdev->private->iretry) {
  146. cdev->private->iretry--;
  147. ret = cio_halt(sch);
  148. if (ret != -EBUSY)
  149. return (ret == 0) ? -EBUSY : ret;
  150. }
  151. /* halt io unsuccessful. */
  152. cdev->private->iretry = 255; /* 255 clear retries. */
  153. }
  154. /* Stage 3: clear io. */
  155. if (cdev->private->iretry) {
  156. cdev->private->iretry--;
  157. ret = cio_clear (sch);
  158. return (ret == 0) ? -EBUSY : ret;
  159. }
  160. /* Function was unsuccessful */
  161. CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
  162. cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
  163. return -EIO;
  164. }
  165. void ccw_device_update_sense_data(struct ccw_device *cdev)
  166. {
  167. memset(&cdev->id, 0, sizeof(cdev->id));
  168. cdev->id.cu_type = cdev->private->senseid.cu_type;
  169. cdev->id.cu_model = cdev->private->senseid.cu_model;
  170. cdev->id.dev_type = cdev->private->senseid.dev_type;
  171. cdev->id.dev_model = cdev->private->senseid.dev_model;
  172. }
  173. int ccw_device_test_sense_data(struct ccw_device *cdev)
  174. {
  175. return cdev->id.cu_type == cdev->private->senseid.cu_type &&
  176. cdev->id.cu_model == cdev->private->senseid.cu_model &&
  177. cdev->id.dev_type == cdev->private->senseid.dev_type &&
  178. cdev->id.dev_model == cdev->private->senseid.dev_model;
  179. }
  180. /*
  181. * The machine won't give us any notification by machine check if a chpid has
  182. * been varied online on the SE so we have to find out by magic (i. e. driving
  183. * the channel subsystem to device selection and updating our path masks).
  184. */
  185. static void
  186. __recover_lost_chpids(struct subchannel *sch, int old_lpm)
  187. {
  188. int mask, i;
  189. struct chp_id chpid;
  190. chp_id_init(&chpid);
  191. for (i = 0; i<8; i++) {
  192. mask = 0x80 >> i;
  193. if (!(sch->lpm & mask))
  194. continue;
  195. if (old_lpm & mask)
  196. continue;
  197. chpid.id = sch->schib.pmcw.chpid[i];
  198. if (!chp_is_registered(chpid))
  199. css_schedule_eval_all();
  200. }
  201. }
  202. /*
  203. * Stop device recognition.
  204. */
  205. static void
  206. ccw_device_recog_done(struct ccw_device *cdev, int state)
  207. {
  208. struct subchannel *sch;
  209. int old_lpm;
  210. sch = to_subchannel(cdev->dev.parent);
  211. if (cio_disable_subchannel(sch))
  212. state = DEV_STATE_NOT_OPER;
  213. /*
  214. * Now that we tried recognition, we have performed device selection
  215. * through ssch() and the path information is up to date.
  216. */
  217. old_lpm = sch->lpm;
  218. /* Check since device may again have become not operational. */
  219. if (cio_update_schib(sch))
  220. state = DEV_STATE_NOT_OPER;
  221. else
  222. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  223. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
  224. /* Force reprobe on all chpids. */
  225. old_lpm = 0;
  226. if (sch->lpm != old_lpm)
  227. __recover_lost_chpids(sch, old_lpm);
  228. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
  229. (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
  230. cdev->private->flags.recog_done = 1;
  231. cdev->private->state = DEV_STATE_DISCONNECTED;
  232. wake_up(&cdev->private->wait_q);
  233. return;
  234. }
  235. if (cdev->private->flags.resuming) {
  236. cdev->private->state = state;
  237. cdev->private->flags.recog_done = 1;
  238. wake_up(&cdev->private->wait_q);
  239. return;
  240. }
  241. switch (state) {
  242. case DEV_STATE_NOT_OPER:
  243. break;
  244. case DEV_STATE_OFFLINE:
  245. if (!cdev->online) {
  246. ccw_device_update_sense_data(cdev);
  247. break;
  248. }
  249. cdev->private->state = DEV_STATE_OFFLINE;
  250. cdev->private->flags.recog_done = 1;
  251. if (ccw_device_test_sense_data(cdev)) {
  252. cdev->private->flags.donotify = 1;
  253. ccw_device_online(cdev);
  254. wake_up(&cdev->private->wait_q);
  255. } else {
  256. ccw_device_update_sense_data(cdev);
  257. ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
  258. }
  259. return;
  260. case DEV_STATE_BOXED:
  261. if (cdev->id.cu_type != 0) { /* device was recognized before */
  262. cdev->private->flags.recog_done = 1;
  263. cdev->private->state = DEV_STATE_BOXED;
  264. wake_up(&cdev->private->wait_q);
  265. return;
  266. }
  267. break;
  268. }
  269. cdev->private->state = state;
  270. io_subchannel_recog_done(cdev);
  271. wake_up(&cdev->private->wait_q);
  272. }
  273. /*
  274. * Function called from device_id.c after sense id has completed.
  275. */
  276. void
  277. ccw_device_sense_id_done(struct ccw_device *cdev, int err)
  278. {
  279. switch (err) {
  280. case 0:
  281. ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
  282. break;
  283. case -ETIME: /* Sense id stopped by timeout. */
  284. ccw_device_recog_done(cdev, DEV_STATE_BOXED);
  285. break;
  286. default:
  287. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  288. break;
  289. }
  290. }
  291. /**
  292. * ccw_device_notify() - inform the device's driver about an event
  293. * @cdev: device for which an event occurred
  294. * @event: event that occurred
  295. *
  296. * Returns:
  297. * -%EINVAL if the device is offline or has no driver.
  298. * -%EOPNOTSUPP if the device's driver has no notifier registered.
  299. * %NOTIFY_OK if the driver wants to keep the device.
  300. * %NOTIFY_BAD if the driver doesn't want to keep the device.
  301. */
  302. int ccw_device_notify(struct ccw_device *cdev, int event)
  303. {
  304. int ret = -EINVAL;
  305. if (!cdev->drv)
  306. goto out;
  307. if (!cdev->online)
  308. goto out;
  309. CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
  310. cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
  311. event);
  312. if (!cdev->drv->notify) {
  313. ret = -EOPNOTSUPP;
  314. goto out;
  315. }
  316. if (cdev->drv->notify(cdev, event))
  317. ret = NOTIFY_OK;
  318. else
  319. ret = NOTIFY_BAD;
  320. out:
  321. return ret;
  322. }
  323. static void ccw_device_oper_notify(struct ccw_device *cdev)
  324. {
  325. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  326. if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
  327. /* Reenable channel measurements, if needed. */
  328. ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
  329. /* Save indication for new paths. */
  330. cdev->private->path_new_mask = sch->vpm;
  331. return;
  332. }
  333. /* Driver doesn't want device back. */
  334. ccw_device_set_notoper(cdev);
  335. ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
  336. }
  337. /*
  338. * Finished with online/offline processing.
  339. */
  340. static void
  341. ccw_device_done(struct ccw_device *cdev, int state)
  342. {
  343. struct subchannel *sch;
  344. sch = to_subchannel(cdev->dev.parent);
  345. ccw_device_set_timeout(cdev, 0);
  346. if (state != DEV_STATE_ONLINE)
  347. cio_disable_subchannel(sch);
  348. /* Reset device status. */
  349. memset(&cdev->private->irb, 0, sizeof(struct irb));
  350. cdev->private->state = state;
  351. switch (state) {
  352. case DEV_STATE_BOXED:
  353. CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
  354. cdev->private->dev_id.devno, sch->schid.sch_no);
  355. if (cdev->online &&
  356. ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
  357. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  358. cdev->private->flags.donotify = 0;
  359. break;
  360. case DEV_STATE_NOT_OPER:
  361. CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
  362. cdev->private->dev_id.devno, sch->schid.sch_no);
  363. if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
  364. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  365. else
  366. ccw_device_set_disconnected(cdev);
  367. cdev->private->flags.donotify = 0;
  368. break;
  369. case DEV_STATE_DISCONNECTED:
  370. CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
  371. "%04x\n", cdev->private->dev_id.devno,
  372. sch->schid.sch_no);
  373. if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
  374. cdev->private->state = DEV_STATE_NOT_OPER;
  375. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  376. } else
  377. ccw_device_set_disconnected(cdev);
  378. cdev->private->flags.donotify = 0;
  379. break;
  380. default:
  381. break;
  382. }
  383. if (cdev->private->flags.donotify) {
  384. cdev->private->flags.donotify = 0;
  385. ccw_device_oper_notify(cdev);
  386. }
  387. wake_up(&cdev->private->wait_q);
  388. }
  389. /*
  390. * Start device recognition.
  391. */
  392. void ccw_device_recognition(struct ccw_device *cdev)
  393. {
  394. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  395. /*
  396. * We used to start here with a sense pgid to find out whether a device
  397. * is locked by someone else. Unfortunately, the sense pgid command
  398. * code has other meanings on devices predating the path grouping
  399. * algorithm, so we start with sense id and box the device after an
  400. * timeout (or if sense pgid during path verification detects the device
  401. * is locked, as may happen on newer devices).
  402. */
  403. cdev->private->flags.recog_done = 0;
  404. cdev->private->state = DEV_STATE_SENSE_ID;
  405. if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
  406. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  407. return;
  408. }
  409. ccw_device_sense_id_start(cdev);
  410. }
  411. /*
  412. * Handle events for states that use the ccw request infrastructure.
  413. */
  414. static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
  415. {
  416. switch (e) {
  417. case DEV_EVENT_NOTOPER:
  418. ccw_request_notoper(cdev);
  419. break;
  420. case DEV_EVENT_INTERRUPT:
  421. ccw_request_handler(cdev);
  422. break;
  423. case DEV_EVENT_TIMEOUT:
  424. ccw_request_timeout(cdev);
  425. break;
  426. default:
  427. break;
  428. }
  429. }
  430. static void ccw_device_report_path_events(struct ccw_device *cdev)
  431. {
  432. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  433. int path_event[8];
  434. int chp, mask;
  435. for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
  436. path_event[chp] = PE_NONE;
  437. if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
  438. path_event[chp] |= PE_PATH_GONE;
  439. if (mask & cdev->private->path_new_mask & sch->vpm)
  440. path_event[chp] |= PE_PATH_AVAILABLE;
  441. if (mask & cdev->private->pgid_reset_mask & sch->vpm)
  442. path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
  443. }
  444. if (cdev->online && cdev->drv->path_event)
  445. cdev->drv->path_event(cdev, path_event);
  446. }
  447. static void ccw_device_reset_path_events(struct ccw_device *cdev)
  448. {
  449. cdev->private->path_gone_mask = 0;
  450. cdev->private->path_new_mask = 0;
  451. cdev->private->pgid_reset_mask = 0;
  452. }
  453. static void create_fake_irb(struct irb *irb, int type)
  454. {
  455. memset(irb, 0, sizeof(*irb));
  456. if (type == FAKE_CMD_IRB) {
  457. struct cmd_scsw *scsw = &irb->scsw.cmd;
  458. scsw->cc = 1;
  459. scsw->fctl = SCSW_FCTL_START_FUNC;
  460. scsw->actl = SCSW_ACTL_START_PEND;
  461. scsw->stctl = SCSW_STCTL_STATUS_PEND;
  462. } else if (type == FAKE_TM_IRB) {
  463. struct tm_scsw *scsw = &irb->scsw.tm;
  464. scsw->x = 1;
  465. scsw->cc = 1;
  466. scsw->fctl = SCSW_FCTL_START_FUNC;
  467. scsw->actl = SCSW_ACTL_START_PEND;
  468. scsw->stctl = SCSW_STCTL_STATUS_PEND;
  469. }
  470. }
  471. void ccw_device_verify_done(struct ccw_device *cdev, int err)
  472. {
  473. struct subchannel *sch;
  474. sch = to_subchannel(cdev->dev.parent);
  475. /* Update schib - pom may have changed. */
  476. if (cio_update_schib(sch)) {
  477. err = -ENODEV;
  478. goto callback;
  479. }
  480. /* Update lpm with verified path mask. */
  481. sch->lpm = sch->vpm;
  482. /* Repeat path verification? */
  483. if (cdev->private->flags.doverify) {
  484. ccw_device_verify_start(cdev);
  485. return;
  486. }
  487. callback:
  488. switch (err) {
  489. case 0:
  490. ccw_device_done(cdev, DEV_STATE_ONLINE);
  491. /* Deliver fake irb to device driver, if needed. */
  492. if (cdev->private->flags.fake_irb) {
  493. create_fake_irb(&cdev->private->irb,
  494. cdev->private->flags.fake_irb);
  495. cdev->private->flags.fake_irb = 0;
  496. if (cdev->handler)
  497. cdev->handler(cdev, cdev->private->intparm,
  498. &cdev->private->irb);
  499. memset(&cdev->private->irb, 0, sizeof(struct irb));
  500. }
  501. ccw_device_report_path_events(cdev);
  502. break;
  503. case -ETIME:
  504. case -EUSERS:
  505. /* Reset oper notify indication after verify error. */
  506. cdev->private->flags.donotify = 0;
  507. ccw_device_done(cdev, DEV_STATE_BOXED);
  508. break;
  509. case -EACCES:
  510. /* Reset oper notify indication after verify error. */
  511. cdev->private->flags.donotify = 0;
  512. ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
  513. break;
  514. default:
  515. /* Reset oper notify indication after verify error. */
  516. cdev->private->flags.donotify = 0;
  517. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  518. break;
  519. }
  520. ccw_device_reset_path_events(cdev);
  521. }
  522. /*
  523. * Get device online.
  524. */
  525. int
  526. ccw_device_online(struct ccw_device *cdev)
  527. {
  528. struct subchannel *sch;
  529. int ret;
  530. if ((cdev->private->state != DEV_STATE_OFFLINE) &&
  531. (cdev->private->state != DEV_STATE_BOXED))
  532. return -EINVAL;
  533. sch = to_subchannel(cdev->dev.parent);
  534. ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
  535. if (ret != 0) {
  536. /* Couldn't enable the subchannel for i/o. Sick device. */
  537. if (ret == -ENODEV)
  538. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  539. return ret;
  540. }
  541. /* Start initial path verification. */
  542. cdev->private->state = DEV_STATE_VERIFY;
  543. ccw_device_verify_start(cdev);
  544. return 0;
  545. }
  546. void
  547. ccw_device_disband_done(struct ccw_device *cdev, int err)
  548. {
  549. switch (err) {
  550. case 0:
  551. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  552. break;
  553. case -ETIME:
  554. ccw_device_done(cdev, DEV_STATE_BOXED);
  555. break;
  556. default:
  557. cdev->private->flags.donotify = 0;
  558. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  559. break;
  560. }
  561. }
  562. /*
  563. * Shutdown device.
  564. */
  565. int
  566. ccw_device_offline(struct ccw_device *cdev)
  567. {
  568. struct subchannel *sch;
  569. /* Allow ccw_device_offline while disconnected. */
  570. if (cdev->private->state == DEV_STATE_DISCONNECTED ||
  571. cdev->private->state == DEV_STATE_NOT_OPER) {
  572. cdev->private->flags.donotify = 0;
  573. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  574. return 0;
  575. }
  576. if (cdev->private->state == DEV_STATE_BOXED) {
  577. ccw_device_done(cdev, DEV_STATE_BOXED);
  578. return 0;
  579. }
  580. if (ccw_device_is_orphan(cdev)) {
  581. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  582. return 0;
  583. }
  584. sch = to_subchannel(cdev->dev.parent);
  585. if (cio_update_schib(sch))
  586. return -ENODEV;
  587. if (scsw_actl(&sch->schib.scsw) != 0)
  588. return -EBUSY;
  589. if (cdev->private->state != DEV_STATE_ONLINE)
  590. return -EINVAL;
  591. /* Are we doing path grouping? */
  592. if (!cdev->private->flags.pgroup) {
  593. /* No, set state offline immediately. */
  594. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  595. return 0;
  596. }
  597. /* Start Set Path Group commands. */
  598. cdev->private->state = DEV_STATE_DISBAND_PGID;
  599. ccw_device_disband_start(cdev);
  600. return 0;
  601. }
  602. /*
  603. * Handle not operational event in non-special state.
  604. */
  605. static void ccw_device_generic_notoper(struct ccw_device *cdev,
  606. enum dev_event dev_event)
  607. {
  608. if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
  609. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  610. else
  611. ccw_device_set_disconnected(cdev);
  612. }
  613. /*
  614. * Handle path verification event in offline state.
  615. */
  616. static void ccw_device_offline_verify(struct ccw_device *cdev,
  617. enum dev_event dev_event)
  618. {
  619. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  620. css_schedule_eval(sch->schid);
  621. }
  622. /*
  623. * Handle path verification event.
  624. */
  625. static void
  626. ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
  627. {
  628. struct subchannel *sch;
  629. if (cdev->private->state == DEV_STATE_W4SENSE) {
  630. cdev->private->flags.doverify = 1;
  631. return;
  632. }
  633. sch = to_subchannel(cdev->dev.parent);
  634. /*
  635. * Since we might not just be coming from an interrupt from the
  636. * subchannel we have to update the schib.
  637. */
  638. if (cio_update_schib(sch)) {
  639. ccw_device_verify_done(cdev, -ENODEV);
  640. return;
  641. }
  642. if (scsw_actl(&sch->schib.scsw) != 0 ||
  643. (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
  644. (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
  645. /*
  646. * No final status yet or final status not yet delivered
  647. * to the device driver. Can't do path verification now,
  648. * delay until final status was delivered.
  649. */
  650. cdev->private->flags.doverify = 1;
  651. return;
  652. }
  653. /* Device is idle, we can do the path verification. */
  654. cdev->private->state = DEV_STATE_VERIFY;
  655. ccw_device_verify_start(cdev);
  656. }
  657. /*
  658. * Handle path verification event in boxed state.
  659. */
  660. static void ccw_device_boxed_verify(struct ccw_device *cdev,
  661. enum dev_event dev_event)
  662. {
  663. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  664. if (cdev->online) {
  665. if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
  666. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  667. else
  668. ccw_device_online_verify(cdev, dev_event);
  669. } else
  670. css_schedule_eval(sch->schid);
  671. }
  672. /*
  673. * Pass interrupt to device driver.
  674. */
  675. static int ccw_device_call_handler(struct ccw_device *cdev)
  676. {
  677. unsigned int stctl;
  678. int ending_status;
  679. /*
  680. * we allow for the device action handler if .
  681. * - we received ending status
  682. * - the action handler requested to see all interrupts
  683. * - we received an intermediate status
  684. * - fast notification was requested (primary status)
  685. * - unsolicited interrupts
  686. */
  687. stctl = scsw_stctl(&cdev->private->irb.scsw);
  688. ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
  689. (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
  690. (stctl == SCSW_STCTL_STATUS_PEND);
  691. if (!ending_status &&
  692. !cdev->private->options.repall &&
  693. !(stctl & SCSW_STCTL_INTER_STATUS) &&
  694. !(cdev->private->options.fast &&
  695. (stctl & SCSW_STCTL_PRIM_STATUS)))
  696. return 0;
  697. if (ending_status)
  698. ccw_device_set_timeout(cdev, 0);
  699. if (cdev->handler)
  700. cdev->handler(cdev, cdev->private->intparm,
  701. &cdev->private->irb);
  702. memset(&cdev->private->irb, 0, sizeof(struct irb));
  703. return 1;
  704. }
  705. /*
  706. * Got an interrupt for a normal io (state online).
  707. */
  708. static void
  709. ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
  710. {
  711. struct irb *irb;
  712. int is_cmd;
  713. irb = this_cpu_ptr(&cio_irb);
  714. is_cmd = !scsw_is_tm(&irb->scsw);
  715. /* Check for unsolicited interrupt. */
  716. if (!scsw_is_solicited(&irb->scsw)) {
  717. if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
  718. !irb->esw.esw0.erw.cons) {
  719. /* Unit check but no sense data. Need basic sense. */
  720. if (ccw_device_do_sense(cdev, irb) != 0)
  721. goto call_handler_unsol;
  722. memcpy(&cdev->private->irb, irb, sizeof(struct irb));
  723. cdev->private->state = DEV_STATE_W4SENSE;
  724. cdev->private->intparm = 0;
  725. return;
  726. }
  727. call_handler_unsol:
  728. if (cdev->handler)
  729. cdev->handler (cdev, 0, irb);
  730. if (cdev->private->flags.doverify)
  731. ccw_device_online_verify(cdev, 0);
  732. return;
  733. }
  734. /* Accumulate status and find out if a basic sense is needed. */
  735. ccw_device_accumulate_irb(cdev, irb);
  736. if (is_cmd && cdev->private->flags.dosense) {
  737. if (ccw_device_do_sense(cdev, irb) == 0) {
  738. cdev->private->state = DEV_STATE_W4SENSE;
  739. }
  740. return;
  741. }
  742. /* Call the handler. */
  743. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  744. /* Start delayed path verification. */
  745. ccw_device_online_verify(cdev, 0);
  746. }
  747. /*
  748. * Got an timeout in online state.
  749. */
  750. static void
  751. ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  752. {
  753. int ret;
  754. ccw_device_set_timeout(cdev, 0);
  755. cdev->private->iretry = 255;
  756. cdev->private->async_kill_io_rc = -ETIMEDOUT;
  757. ret = ccw_device_cancel_halt_clear(cdev);
  758. if (ret == -EBUSY) {
  759. ccw_device_set_timeout(cdev, 3*HZ);
  760. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  761. return;
  762. }
  763. if (ret)
  764. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  765. else if (cdev->handler)
  766. cdev->handler(cdev, cdev->private->intparm,
  767. ERR_PTR(-ETIMEDOUT));
  768. }
  769. /*
  770. * Got an interrupt for a basic sense.
  771. */
  772. static void
  773. ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
  774. {
  775. struct irb *irb;
  776. irb = this_cpu_ptr(&cio_irb);
  777. /* Check for unsolicited interrupt. */
  778. if (scsw_stctl(&irb->scsw) ==
  779. (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
  780. if (scsw_cc(&irb->scsw) == 1)
  781. /* Basic sense hasn't started. Try again. */
  782. ccw_device_do_sense(cdev, irb);
  783. else {
  784. CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
  785. "interrupt during w4sense...\n",
  786. cdev->private->dev_id.ssid,
  787. cdev->private->dev_id.devno);
  788. if (cdev->handler)
  789. cdev->handler (cdev, 0, irb);
  790. }
  791. return;
  792. }
  793. /*
  794. * Check if a halt or clear has been issued in the meanwhile. If yes,
  795. * only deliver the halt/clear interrupt to the device driver as if it
  796. * had killed the original request.
  797. */
  798. if (scsw_fctl(&irb->scsw) &
  799. (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
  800. cdev->private->flags.dosense = 0;
  801. memset(&cdev->private->irb, 0, sizeof(struct irb));
  802. ccw_device_accumulate_irb(cdev, irb);
  803. goto call_handler;
  804. }
  805. /* Add basic sense info to irb. */
  806. ccw_device_accumulate_basic_sense(cdev, irb);
  807. if (cdev->private->flags.dosense) {
  808. /* Another basic sense is needed. */
  809. ccw_device_do_sense(cdev, irb);
  810. return;
  811. }
  812. call_handler:
  813. cdev->private->state = DEV_STATE_ONLINE;
  814. /* In case sensing interfered with setting the device online */
  815. wake_up(&cdev->private->wait_q);
  816. /* Call the handler. */
  817. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  818. /* Start delayed path verification. */
  819. ccw_device_online_verify(cdev, 0);
  820. }
  821. static void
  822. ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
  823. {
  824. ccw_device_set_timeout(cdev, 0);
  825. /* Start delayed path verification. */
  826. ccw_device_online_verify(cdev, 0);
  827. /* OK, i/o is dead now. Call interrupt handler. */
  828. if (cdev->handler)
  829. cdev->handler(cdev, cdev->private->intparm,
  830. ERR_PTR(cdev->private->async_kill_io_rc));
  831. }
  832. static void
  833. ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  834. {
  835. int ret;
  836. ret = ccw_device_cancel_halt_clear(cdev);
  837. if (ret == -EBUSY) {
  838. ccw_device_set_timeout(cdev, 3*HZ);
  839. return;
  840. }
  841. /* Start delayed path verification. */
  842. ccw_device_online_verify(cdev, 0);
  843. if (cdev->handler)
  844. cdev->handler(cdev, cdev->private->intparm,
  845. ERR_PTR(cdev->private->async_kill_io_rc));
  846. }
  847. void ccw_device_kill_io(struct ccw_device *cdev)
  848. {
  849. int ret;
  850. ccw_device_set_timeout(cdev, 0);
  851. cdev->private->iretry = 255;
  852. cdev->private->async_kill_io_rc = -EIO;
  853. ret = ccw_device_cancel_halt_clear(cdev);
  854. if (ret == -EBUSY) {
  855. ccw_device_set_timeout(cdev, 3*HZ);
  856. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  857. return;
  858. }
  859. /* Start delayed path verification. */
  860. ccw_device_online_verify(cdev, 0);
  861. if (cdev->handler)
  862. cdev->handler(cdev, cdev->private->intparm,
  863. ERR_PTR(-EIO));
  864. }
  865. static void
  866. ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
  867. {
  868. /* Start verification after current task finished. */
  869. cdev->private->flags.doverify = 1;
  870. }
  871. static void
  872. ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
  873. {
  874. struct subchannel *sch;
  875. sch = to_subchannel(cdev->dev.parent);
  876. if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
  877. /* Couldn't enable the subchannel for i/o. Sick device. */
  878. return;
  879. cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
  880. ccw_device_sense_id_start(cdev);
  881. }
  882. void ccw_device_trigger_reprobe(struct ccw_device *cdev)
  883. {
  884. struct subchannel *sch;
  885. if (cdev->private->state != DEV_STATE_DISCONNECTED)
  886. return;
  887. sch = to_subchannel(cdev->dev.parent);
  888. /* Update some values. */
  889. if (cio_update_schib(sch))
  890. return;
  891. /*
  892. * The pim, pam, pom values may not be accurate, but they are the best
  893. * we have before performing device selection :/
  894. */
  895. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  896. /*
  897. * Use the initial configuration since we can't be shure that the old
  898. * paths are valid.
  899. */
  900. io_subchannel_init_config(sch);
  901. if (cio_commit_config(sch))
  902. return;
  903. /* We should also udate ssd info, but this has to wait. */
  904. /* Check if this is another device which appeared on the same sch. */
  905. if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
  906. css_schedule_eval(sch->schid);
  907. else
  908. ccw_device_start_id(cdev, 0);
  909. }
  910. static void ccw_device_disabled_irq(struct ccw_device *cdev,
  911. enum dev_event dev_event)
  912. {
  913. struct subchannel *sch;
  914. sch = to_subchannel(cdev->dev.parent);
  915. /*
  916. * An interrupt in a disabled state means a previous disable was not
  917. * successful - should not happen, but we try to disable again.
  918. */
  919. cio_disable_subchannel(sch);
  920. }
  921. static void
  922. ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
  923. {
  924. retry_set_schib(cdev);
  925. cdev->private->state = DEV_STATE_ONLINE;
  926. dev_fsm_event(cdev, dev_event);
  927. }
  928. static void ccw_device_update_cmfblock(struct ccw_device *cdev,
  929. enum dev_event dev_event)
  930. {
  931. cmf_retry_copy_block(cdev);
  932. cdev->private->state = DEV_STATE_ONLINE;
  933. dev_fsm_event(cdev, dev_event);
  934. }
  935. static void
  936. ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
  937. {
  938. ccw_device_set_timeout(cdev, 0);
  939. cdev->private->state = DEV_STATE_NOT_OPER;
  940. wake_up(&cdev->private->wait_q);
  941. }
  942. static void
  943. ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  944. {
  945. int ret;
  946. ret = ccw_device_cancel_halt_clear(cdev);
  947. if (ret == -EBUSY) {
  948. ccw_device_set_timeout(cdev, HZ/10);
  949. } else {
  950. cdev->private->state = DEV_STATE_NOT_OPER;
  951. wake_up(&cdev->private->wait_q);
  952. }
  953. }
  954. /*
  955. * No operation action. This is used e.g. to ignore a timeout event in
  956. * state offline.
  957. */
  958. static void
  959. ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
  960. {
  961. }
  962. /*
  963. * device statemachine
  964. */
  965. fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
  966. [DEV_STATE_NOT_OPER] = {
  967. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  968. [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
  969. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  970. [DEV_EVENT_VERIFY] = ccw_device_nop,
  971. },
  972. [DEV_STATE_SENSE_PGID] = {
  973. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  974. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  975. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  976. [DEV_EVENT_VERIFY] = ccw_device_nop,
  977. },
  978. [DEV_STATE_SENSE_ID] = {
  979. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  980. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  981. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  982. [DEV_EVENT_VERIFY] = ccw_device_nop,
  983. },
  984. [DEV_STATE_OFFLINE] = {
  985. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  986. [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
  987. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  988. [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
  989. },
  990. [DEV_STATE_VERIFY] = {
  991. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  992. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  993. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  994. [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
  995. },
  996. [DEV_STATE_ONLINE] = {
  997. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  998. [DEV_EVENT_INTERRUPT] = ccw_device_irq,
  999. [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
  1000. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  1001. },
  1002. [DEV_STATE_W4SENSE] = {
  1003. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1004. [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
  1005. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1006. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  1007. },
  1008. [DEV_STATE_DISBAND_PGID] = {
  1009. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  1010. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  1011. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  1012. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1013. },
  1014. [DEV_STATE_BOXED] = {
  1015. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1016. [DEV_EVENT_INTERRUPT] = ccw_device_nop,
  1017. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1018. [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
  1019. },
  1020. /* states to wait for i/o completion before doing something */
  1021. [DEV_STATE_TIMEOUT_KILL] = {
  1022. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1023. [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
  1024. [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
  1025. [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
  1026. },
  1027. [DEV_STATE_QUIESCE] = {
  1028. [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
  1029. [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
  1030. [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
  1031. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1032. },
  1033. /* special states for devices gone not operational */
  1034. [DEV_STATE_DISCONNECTED] = {
  1035. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  1036. [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
  1037. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1038. [DEV_EVENT_VERIFY] = ccw_device_start_id,
  1039. },
  1040. [DEV_STATE_DISCONNECTED_SENSE_ID] = {
  1041. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  1042. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  1043. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  1044. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1045. },
  1046. [DEV_STATE_CMFCHANGE] = {
  1047. [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
  1048. [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
  1049. [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
  1050. [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
  1051. },
  1052. [DEV_STATE_CMFUPDATE] = {
  1053. [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
  1054. [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
  1055. [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
  1056. [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
  1057. },
  1058. [DEV_STATE_STEAL_LOCK] = {
  1059. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  1060. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  1061. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  1062. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1063. },
  1064. };
  1065. EXPORT_SYMBOL_GPL(ccw_device_set_timeout);