dasd_alias.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /*
  2. * PAV alias management for the DASD ECKD discipline
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "dasd-eckd"
  8. #include <linux/list.h>
  9. #include <linux/slab.h>
  10. #include <asm/ebcdic.h>
  11. #include "dasd_int.h"
  12. #include "dasd_eckd.h"
  13. #ifdef PRINTK_HEADER
  14. #undef PRINTK_HEADER
  15. #endif /* PRINTK_HEADER */
  16. #define PRINTK_HEADER "dasd(eckd):"
  17. /*
  18. * General concept of alias management:
  19. * - PAV and DASD alias management is specific to the eckd discipline.
  20. * - A device is connected to an lcu as long as the device exists.
  21. * dasd_alias_make_device_known_to_lcu will be called wenn the
  22. * device is checked by the eckd discipline and
  23. * dasd_alias_disconnect_device_from_lcu will be called
  24. * before the device is deleted.
  25. * - The dasd_alias_add_device / dasd_alias_remove_device
  26. * functions mark the point when a device is 'ready for service'.
  27. * - A summary unit check is a rare occasion, but it is mandatory to
  28. * support it. It requires some complex recovery actions before the
  29. * devices can be used again (see dasd_alias_handle_summary_unit_check).
  30. * - dasd_alias_get_start_dev will find an alias device that can be used
  31. * instead of the base device and does some (very simple) load balancing.
  32. * This is the function that gets called for each I/O, so when improving
  33. * something, this function should get faster or better, the rest has just
  34. * to be correct.
  35. */
  36. static void summary_unit_check_handling_work(struct work_struct *);
  37. static void lcu_update_work(struct work_struct *);
  38. static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
  39. static struct alias_root aliastree = {
  40. .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
  41. .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
  42. };
  43. static struct alias_server *_find_server(struct dasd_uid *uid)
  44. {
  45. struct alias_server *pos;
  46. list_for_each_entry(pos, &aliastree.serverlist, server) {
  47. if (!strncmp(pos->uid.vendor, uid->vendor,
  48. sizeof(uid->vendor))
  49. && !strncmp(pos->uid.serial, uid->serial,
  50. sizeof(uid->serial)))
  51. return pos;
  52. }
  53. return NULL;
  54. }
  55. static struct alias_lcu *_find_lcu(struct alias_server *server,
  56. struct dasd_uid *uid)
  57. {
  58. struct alias_lcu *pos;
  59. list_for_each_entry(pos, &server->lculist, lcu) {
  60. if (pos->uid.ssid == uid->ssid)
  61. return pos;
  62. }
  63. return NULL;
  64. }
  65. static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
  66. struct dasd_uid *uid)
  67. {
  68. struct alias_pav_group *pos;
  69. __u8 search_unit_addr;
  70. /* for hyper pav there is only one group */
  71. if (lcu->pav == HYPER_PAV) {
  72. if (list_empty(&lcu->grouplist))
  73. return NULL;
  74. else
  75. return list_first_entry(&lcu->grouplist,
  76. struct alias_pav_group, group);
  77. }
  78. /* for base pav we have to find the group that matches the base */
  79. if (uid->type == UA_BASE_DEVICE)
  80. search_unit_addr = uid->real_unit_addr;
  81. else
  82. search_unit_addr = uid->base_unit_addr;
  83. list_for_each_entry(pos, &lcu->grouplist, group) {
  84. if (pos->uid.base_unit_addr == search_unit_addr &&
  85. !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
  86. return pos;
  87. }
  88. return NULL;
  89. }
  90. static struct alias_server *_allocate_server(struct dasd_uid *uid)
  91. {
  92. struct alias_server *server;
  93. server = kzalloc(sizeof(*server), GFP_KERNEL);
  94. if (!server)
  95. return ERR_PTR(-ENOMEM);
  96. memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
  97. memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
  98. INIT_LIST_HEAD(&server->server);
  99. INIT_LIST_HEAD(&server->lculist);
  100. return server;
  101. }
  102. static void _free_server(struct alias_server *server)
  103. {
  104. kfree(server);
  105. }
  106. static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
  107. {
  108. struct alias_lcu *lcu;
  109. lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
  110. if (!lcu)
  111. return ERR_PTR(-ENOMEM);
  112. lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
  113. if (!lcu->uac)
  114. goto out_err1;
  115. lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
  116. if (!lcu->rsu_cqr)
  117. goto out_err2;
  118. lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
  119. GFP_KERNEL | GFP_DMA);
  120. if (!lcu->rsu_cqr->cpaddr)
  121. goto out_err3;
  122. lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
  123. if (!lcu->rsu_cqr->data)
  124. goto out_err4;
  125. memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
  126. memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
  127. lcu->uid.ssid = uid->ssid;
  128. lcu->pav = NO_PAV;
  129. lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
  130. INIT_LIST_HEAD(&lcu->lcu);
  131. INIT_LIST_HEAD(&lcu->inactive_devices);
  132. INIT_LIST_HEAD(&lcu->active_devices);
  133. INIT_LIST_HEAD(&lcu->grouplist);
  134. INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
  135. INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
  136. spin_lock_init(&lcu->lock);
  137. init_completion(&lcu->lcu_setup);
  138. return lcu;
  139. out_err4:
  140. kfree(lcu->rsu_cqr->cpaddr);
  141. out_err3:
  142. kfree(lcu->rsu_cqr);
  143. out_err2:
  144. kfree(lcu->uac);
  145. out_err1:
  146. kfree(lcu);
  147. return ERR_PTR(-ENOMEM);
  148. }
  149. static void _free_lcu(struct alias_lcu *lcu)
  150. {
  151. kfree(lcu->rsu_cqr->data);
  152. kfree(lcu->rsu_cqr->cpaddr);
  153. kfree(lcu->rsu_cqr);
  154. kfree(lcu->uac);
  155. kfree(lcu);
  156. }
  157. /*
  158. * This is the function that will allocate all the server and lcu data,
  159. * so this function must be called first for a new device.
  160. * If the return value is 1, the lcu was already known before, if it
  161. * is 0, this is a new lcu.
  162. * Negative return code indicates that something went wrong (e.g. -ENOMEM)
  163. */
  164. int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
  165. {
  166. struct dasd_eckd_private *private;
  167. unsigned long flags;
  168. struct alias_server *server, *newserver;
  169. struct alias_lcu *lcu, *newlcu;
  170. struct dasd_uid uid;
  171. private = (struct dasd_eckd_private *) device->private;
  172. device->discipline->get_uid(device, &uid);
  173. spin_lock_irqsave(&aliastree.lock, flags);
  174. server = _find_server(&uid);
  175. if (!server) {
  176. spin_unlock_irqrestore(&aliastree.lock, flags);
  177. newserver = _allocate_server(&uid);
  178. if (IS_ERR(newserver))
  179. return PTR_ERR(newserver);
  180. spin_lock_irqsave(&aliastree.lock, flags);
  181. server = _find_server(&uid);
  182. if (!server) {
  183. list_add(&newserver->server, &aliastree.serverlist);
  184. server = newserver;
  185. } else {
  186. /* someone was faster */
  187. _free_server(newserver);
  188. }
  189. }
  190. lcu = _find_lcu(server, &uid);
  191. if (!lcu) {
  192. spin_unlock_irqrestore(&aliastree.lock, flags);
  193. newlcu = _allocate_lcu(&uid);
  194. if (IS_ERR(newlcu))
  195. return PTR_ERR(newlcu);
  196. spin_lock_irqsave(&aliastree.lock, flags);
  197. lcu = _find_lcu(server, &uid);
  198. if (!lcu) {
  199. list_add(&newlcu->lcu, &server->lculist);
  200. lcu = newlcu;
  201. } else {
  202. /* someone was faster */
  203. _free_lcu(newlcu);
  204. }
  205. }
  206. spin_lock(&lcu->lock);
  207. list_add(&device->alias_list, &lcu->inactive_devices);
  208. private->lcu = lcu;
  209. spin_unlock(&lcu->lock);
  210. spin_unlock_irqrestore(&aliastree.lock, flags);
  211. return 0;
  212. }
  213. /*
  214. * This function removes a device from the scope of alias management.
  215. * The complicated part is to make sure that it is not in use by
  216. * any of the workers. If necessary cancel the work.
  217. */
  218. void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
  219. {
  220. struct dasd_eckd_private *private;
  221. unsigned long flags;
  222. struct alias_lcu *lcu;
  223. struct alias_server *server;
  224. int was_pending;
  225. struct dasd_uid uid;
  226. private = (struct dasd_eckd_private *) device->private;
  227. lcu = private->lcu;
  228. /* nothing to do if already disconnected */
  229. if (!lcu)
  230. return;
  231. device->discipline->get_uid(device, &uid);
  232. spin_lock_irqsave(&lcu->lock, flags);
  233. list_del_init(&device->alias_list);
  234. /* make sure that the workers don't use this device */
  235. if (device == lcu->suc_data.device) {
  236. spin_unlock_irqrestore(&lcu->lock, flags);
  237. cancel_work_sync(&lcu->suc_data.worker);
  238. spin_lock_irqsave(&lcu->lock, flags);
  239. if (device == lcu->suc_data.device) {
  240. dasd_put_device(device);
  241. lcu->suc_data.device = NULL;
  242. }
  243. }
  244. was_pending = 0;
  245. if (device == lcu->ruac_data.device) {
  246. spin_unlock_irqrestore(&lcu->lock, flags);
  247. was_pending = 1;
  248. cancel_delayed_work_sync(&lcu->ruac_data.dwork);
  249. spin_lock_irqsave(&lcu->lock, flags);
  250. if (device == lcu->ruac_data.device) {
  251. dasd_put_device(device);
  252. lcu->ruac_data.device = NULL;
  253. }
  254. }
  255. private->lcu = NULL;
  256. spin_unlock_irqrestore(&lcu->lock, flags);
  257. spin_lock_irqsave(&aliastree.lock, flags);
  258. spin_lock(&lcu->lock);
  259. if (list_empty(&lcu->grouplist) &&
  260. list_empty(&lcu->active_devices) &&
  261. list_empty(&lcu->inactive_devices)) {
  262. list_del(&lcu->lcu);
  263. spin_unlock(&lcu->lock);
  264. _free_lcu(lcu);
  265. lcu = NULL;
  266. } else {
  267. if (was_pending)
  268. _schedule_lcu_update(lcu, NULL);
  269. spin_unlock(&lcu->lock);
  270. }
  271. server = _find_server(&uid);
  272. if (server && list_empty(&server->lculist)) {
  273. list_del(&server->server);
  274. _free_server(server);
  275. }
  276. spin_unlock_irqrestore(&aliastree.lock, flags);
  277. }
  278. /*
  279. * This function assumes that the unit address configuration stored
  280. * in the lcu is up to date and will update the device uid before
  281. * adding it to a pav group.
  282. */
  283. static int _add_device_to_lcu(struct alias_lcu *lcu,
  284. struct dasd_device *device,
  285. struct dasd_device *pos)
  286. {
  287. struct dasd_eckd_private *private;
  288. struct alias_pav_group *group;
  289. struct dasd_uid uid;
  290. unsigned long flags;
  291. private = (struct dasd_eckd_private *) device->private;
  292. /* only lock if not already locked */
  293. if (device != pos)
  294. spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
  295. CDEV_NESTED_SECOND);
  296. private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
  297. private->uid.base_unit_addr =
  298. lcu->uac->unit[private->uid.real_unit_addr].base_ua;
  299. uid = private->uid;
  300. if (device != pos)
  301. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  302. /* if we have no PAV anyway, we don't need to bother with PAV groups */
  303. if (lcu->pav == NO_PAV) {
  304. list_move(&device->alias_list, &lcu->active_devices);
  305. return 0;
  306. }
  307. group = _find_group(lcu, &uid);
  308. if (!group) {
  309. group = kzalloc(sizeof(*group), GFP_ATOMIC);
  310. if (!group)
  311. return -ENOMEM;
  312. memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
  313. memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
  314. group->uid.ssid = uid.ssid;
  315. if (uid.type == UA_BASE_DEVICE)
  316. group->uid.base_unit_addr = uid.real_unit_addr;
  317. else
  318. group->uid.base_unit_addr = uid.base_unit_addr;
  319. memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
  320. INIT_LIST_HEAD(&group->group);
  321. INIT_LIST_HEAD(&group->baselist);
  322. INIT_LIST_HEAD(&group->aliaslist);
  323. list_add(&group->group, &lcu->grouplist);
  324. }
  325. if (uid.type == UA_BASE_DEVICE)
  326. list_move(&device->alias_list, &group->baselist);
  327. else
  328. list_move(&device->alias_list, &group->aliaslist);
  329. private->pavgroup = group;
  330. return 0;
  331. };
  332. static void _remove_device_from_lcu(struct alias_lcu *lcu,
  333. struct dasd_device *device)
  334. {
  335. struct dasd_eckd_private *private;
  336. struct alias_pav_group *group;
  337. private = (struct dasd_eckd_private *) device->private;
  338. list_move(&device->alias_list, &lcu->inactive_devices);
  339. group = private->pavgroup;
  340. if (!group)
  341. return;
  342. private->pavgroup = NULL;
  343. if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
  344. list_del(&group->group);
  345. kfree(group);
  346. return;
  347. }
  348. if (group->next == device)
  349. group->next = NULL;
  350. };
  351. static int
  352. suborder_not_supported(struct dasd_ccw_req *cqr)
  353. {
  354. char *sense;
  355. char reason;
  356. char msg_format;
  357. char msg_no;
  358. sense = dasd_get_sense(&cqr->irb);
  359. if (!sense)
  360. return 0;
  361. reason = sense[0];
  362. msg_format = (sense[7] & 0xF0);
  363. msg_no = (sense[7] & 0x0F);
  364. /* command reject, Format 0 MSG 4 - invalid parameter */
  365. if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
  366. return 1;
  367. return 0;
  368. }
  369. static int read_unit_address_configuration(struct dasd_device *device,
  370. struct alias_lcu *lcu)
  371. {
  372. struct dasd_psf_prssd_data *prssdp;
  373. struct dasd_ccw_req *cqr;
  374. struct ccw1 *ccw;
  375. int rc;
  376. unsigned long flags;
  377. cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  378. (sizeof(struct dasd_psf_prssd_data)),
  379. device);
  380. if (IS_ERR(cqr))
  381. return PTR_ERR(cqr);
  382. cqr->startdev = device;
  383. cqr->memdev = device;
  384. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  385. cqr->retries = 10;
  386. cqr->expires = 20 * HZ;
  387. /* Prepare for Read Subsystem Data */
  388. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  389. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  390. prssdp->order = PSF_ORDER_PRSSD;
  391. prssdp->suborder = 0x0e; /* Read unit address configuration */
  392. /* all other bytes of prssdp must be zero */
  393. ccw = cqr->cpaddr;
  394. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  395. ccw->count = sizeof(struct dasd_psf_prssd_data);
  396. ccw->flags |= CCW_FLAG_CC;
  397. ccw->cda = (__u32)(addr_t) prssdp;
  398. /* Read Subsystem Data - feature codes */
  399. memset(lcu->uac, 0, sizeof(*(lcu->uac)));
  400. ccw++;
  401. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  402. ccw->count = sizeof(*(lcu->uac));
  403. ccw->cda = (__u32)(addr_t) lcu->uac;
  404. cqr->buildclk = get_tod_clock();
  405. cqr->status = DASD_CQR_FILLED;
  406. /* need to unset flag here to detect race with summary unit check */
  407. spin_lock_irqsave(&lcu->lock, flags);
  408. lcu->flags &= ~NEED_UAC_UPDATE;
  409. spin_unlock_irqrestore(&lcu->lock, flags);
  410. do {
  411. rc = dasd_sleep_on(cqr);
  412. if (rc && suborder_not_supported(cqr))
  413. return -EOPNOTSUPP;
  414. } while (rc && (cqr->retries > 0));
  415. if (rc) {
  416. spin_lock_irqsave(&lcu->lock, flags);
  417. lcu->flags |= NEED_UAC_UPDATE;
  418. spin_unlock_irqrestore(&lcu->lock, flags);
  419. }
  420. dasd_kfree_request(cqr, cqr->memdev);
  421. return rc;
  422. }
  423. static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
  424. {
  425. unsigned long flags;
  426. struct alias_pav_group *pavgroup, *tempgroup;
  427. struct dasd_device *device, *tempdev;
  428. int i, rc;
  429. struct dasd_eckd_private *private;
  430. spin_lock_irqsave(&lcu->lock, flags);
  431. list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
  432. list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
  433. alias_list) {
  434. list_move(&device->alias_list, &lcu->active_devices);
  435. private = (struct dasd_eckd_private *) device->private;
  436. private->pavgroup = NULL;
  437. }
  438. list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
  439. alias_list) {
  440. list_move(&device->alias_list, &lcu->active_devices);
  441. private = (struct dasd_eckd_private *) device->private;
  442. private->pavgroup = NULL;
  443. }
  444. list_del(&pavgroup->group);
  445. kfree(pavgroup);
  446. }
  447. spin_unlock_irqrestore(&lcu->lock, flags);
  448. rc = read_unit_address_configuration(refdev, lcu);
  449. if (rc)
  450. return rc;
  451. /* need to take cdev lock before lcu lock */
  452. spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
  453. CDEV_NESTED_FIRST);
  454. spin_lock(&lcu->lock);
  455. lcu->pav = NO_PAV;
  456. for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
  457. switch (lcu->uac->unit[i].ua_type) {
  458. case UA_BASE_PAV_ALIAS:
  459. lcu->pav = BASE_PAV;
  460. break;
  461. case UA_HYPER_PAV_ALIAS:
  462. lcu->pav = HYPER_PAV;
  463. break;
  464. }
  465. if (lcu->pav != NO_PAV)
  466. break;
  467. }
  468. list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
  469. alias_list) {
  470. _add_device_to_lcu(lcu, device, refdev);
  471. }
  472. spin_unlock(&lcu->lock);
  473. spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
  474. return 0;
  475. }
  476. static void lcu_update_work(struct work_struct *work)
  477. {
  478. struct alias_lcu *lcu;
  479. struct read_uac_work_data *ruac_data;
  480. struct dasd_device *device;
  481. unsigned long flags;
  482. int rc;
  483. ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
  484. lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
  485. device = ruac_data->device;
  486. rc = _lcu_update(device, lcu);
  487. /*
  488. * Need to check flags again, as there could have been another
  489. * prepare_update or a new device a new device while we were still
  490. * processing the data
  491. */
  492. spin_lock_irqsave(&lcu->lock, flags);
  493. if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
  494. DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
  495. " alias data in lcu (rc = %d), retry later", rc);
  496. if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
  497. dasd_put_device(device);
  498. } else {
  499. dasd_put_device(device);
  500. lcu->ruac_data.device = NULL;
  501. lcu->flags &= ~UPDATE_PENDING;
  502. }
  503. spin_unlock_irqrestore(&lcu->lock, flags);
  504. }
  505. static int _schedule_lcu_update(struct alias_lcu *lcu,
  506. struct dasd_device *device)
  507. {
  508. struct dasd_device *usedev = NULL;
  509. struct alias_pav_group *group;
  510. lcu->flags |= NEED_UAC_UPDATE;
  511. if (lcu->ruac_data.device) {
  512. /* already scheduled or running */
  513. return 0;
  514. }
  515. if (device && !list_empty(&device->alias_list))
  516. usedev = device;
  517. if (!usedev && !list_empty(&lcu->grouplist)) {
  518. group = list_first_entry(&lcu->grouplist,
  519. struct alias_pav_group, group);
  520. if (!list_empty(&group->baselist))
  521. usedev = list_first_entry(&group->baselist,
  522. struct dasd_device,
  523. alias_list);
  524. else if (!list_empty(&group->aliaslist))
  525. usedev = list_first_entry(&group->aliaslist,
  526. struct dasd_device,
  527. alias_list);
  528. }
  529. if (!usedev && !list_empty(&lcu->active_devices)) {
  530. usedev = list_first_entry(&lcu->active_devices,
  531. struct dasd_device, alias_list);
  532. }
  533. /*
  534. * if we haven't found a proper device yet, give up for now, the next
  535. * device that will be set active will trigger an lcu update
  536. */
  537. if (!usedev)
  538. return -EINVAL;
  539. dasd_get_device(usedev);
  540. lcu->ruac_data.device = usedev;
  541. if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
  542. dasd_put_device(usedev);
  543. return 0;
  544. }
  545. int dasd_alias_add_device(struct dasd_device *device)
  546. {
  547. struct dasd_eckd_private *private;
  548. struct alias_lcu *lcu;
  549. unsigned long flags;
  550. int rc;
  551. private = (struct dasd_eckd_private *) device->private;
  552. lcu = private->lcu;
  553. rc = 0;
  554. /* need to take cdev lock before lcu lock */
  555. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  556. spin_lock(&lcu->lock);
  557. if (!(lcu->flags & UPDATE_PENDING)) {
  558. rc = _add_device_to_lcu(lcu, device, device);
  559. if (rc)
  560. lcu->flags |= UPDATE_PENDING;
  561. }
  562. if (lcu->flags & UPDATE_PENDING) {
  563. list_move(&device->alias_list, &lcu->active_devices);
  564. _schedule_lcu_update(lcu, device);
  565. }
  566. spin_unlock(&lcu->lock);
  567. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  568. return rc;
  569. }
  570. int dasd_alias_update_add_device(struct dasd_device *device)
  571. {
  572. struct dasd_eckd_private *private;
  573. private = (struct dasd_eckd_private *) device->private;
  574. private->lcu->flags |= UPDATE_PENDING;
  575. return dasd_alias_add_device(device);
  576. }
  577. int dasd_alias_remove_device(struct dasd_device *device)
  578. {
  579. struct dasd_eckd_private *private;
  580. struct alias_lcu *lcu;
  581. unsigned long flags;
  582. private = (struct dasd_eckd_private *) device->private;
  583. lcu = private->lcu;
  584. /* nothing to do if already removed */
  585. if (!lcu)
  586. return 0;
  587. spin_lock_irqsave(&lcu->lock, flags);
  588. _remove_device_from_lcu(lcu, device);
  589. spin_unlock_irqrestore(&lcu->lock, flags);
  590. return 0;
  591. }
  592. struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
  593. {
  594. struct dasd_device *alias_device;
  595. struct alias_pav_group *group;
  596. struct alias_lcu *lcu;
  597. struct dasd_eckd_private *private, *alias_priv;
  598. unsigned long flags;
  599. private = (struct dasd_eckd_private *) base_device->private;
  600. group = private->pavgroup;
  601. lcu = private->lcu;
  602. if (!group || !lcu)
  603. return NULL;
  604. if (lcu->pav == NO_PAV ||
  605. lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
  606. return NULL;
  607. if (unlikely(!(private->features.feature[8] & 0x01))) {
  608. /*
  609. * PAV enabled but prefix not, very unlikely
  610. * seems to be a lost pathgroup
  611. * use base device to do IO
  612. */
  613. DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
  614. "Prefix not enabled with PAV enabled\n");
  615. return NULL;
  616. }
  617. spin_lock_irqsave(&lcu->lock, flags);
  618. alias_device = group->next;
  619. if (!alias_device) {
  620. if (list_empty(&group->aliaslist)) {
  621. spin_unlock_irqrestore(&lcu->lock, flags);
  622. return NULL;
  623. } else {
  624. alias_device = list_first_entry(&group->aliaslist,
  625. struct dasd_device,
  626. alias_list);
  627. }
  628. }
  629. if (list_is_last(&alias_device->alias_list, &group->aliaslist))
  630. group->next = list_first_entry(&group->aliaslist,
  631. struct dasd_device, alias_list);
  632. else
  633. group->next = list_first_entry(&alias_device->alias_list,
  634. struct dasd_device, alias_list);
  635. spin_unlock_irqrestore(&lcu->lock, flags);
  636. alias_priv = (struct dasd_eckd_private *) alias_device->private;
  637. if ((alias_priv->count < private->count) && !alias_device->stopped &&
  638. !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
  639. return alias_device;
  640. else
  641. return NULL;
  642. }
  643. /*
  644. * Summary unit check handling depends on the way alias devices
  645. * are handled so it is done here rather then in dasd_eckd.c
  646. */
  647. static int reset_summary_unit_check(struct alias_lcu *lcu,
  648. struct dasd_device *device,
  649. char reason)
  650. {
  651. struct dasd_ccw_req *cqr;
  652. int rc = 0;
  653. struct ccw1 *ccw;
  654. cqr = lcu->rsu_cqr;
  655. strncpy((char *) &cqr->magic, "ECKD", 4);
  656. ASCEBC((char *) &cqr->magic, 4);
  657. ccw = cqr->cpaddr;
  658. ccw->cmd_code = DASD_ECKD_CCW_RSCK;
  659. ccw->flags = CCW_FLAG_SLI;
  660. ccw->count = 16;
  661. ccw->cda = (__u32)(addr_t) cqr->data;
  662. ((char *)cqr->data)[0] = reason;
  663. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  664. cqr->retries = 255; /* set retry counter to enable basic ERP */
  665. cqr->startdev = device;
  666. cqr->memdev = device;
  667. cqr->block = NULL;
  668. cqr->expires = 5 * HZ;
  669. cqr->buildclk = get_tod_clock();
  670. cqr->status = DASD_CQR_FILLED;
  671. rc = dasd_sleep_on_immediatly(cqr);
  672. return rc;
  673. }
  674. static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
  675. {
  676. struct alias_pav_group *pavgroup;
  677. struct dasd_device *device;
  678. struct dasd_eckd_private *private;
  679. unsigned long flags;
  680. /* active and inactive list can contain alias as well as base devices */
  681. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  682. private = (struct dasd_eckd_private *) device->private;
  683. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  684. if (private->uid.type != UA_BASE_DEVICE) {
  685. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  686. flags);
  687. continue;
  688. }
  689. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  690. dasd_schedule_block_bh(device->block);
  691. dasd_schedule_device_bh(device);
  692. }
  693. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  694. private = (struct dasd_eckd_private *) device->private;
  695. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  696. if (private->uid.type != UA_BASE_DEVICE) {
  697. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  698. flags);
  699. continue;
  700. }
  701. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  702. dasd_schedule_block_bh(device->block);
  703. dasd_schedule_device_bh(device);
  704. }
  705. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  706. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  707. dasd_schedule_block_bh(device->block);
  708. dasd_schedule_device_bh(device);
  709. }
  710. }
  711. }
  712. static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
  713. {
  714. struct alias_pav_group *pavgroup;
  715. struct dasd_device *device, *temp;
  716. struct dasd_eckd_private *private;
  717. int rc;
  718. unsigned long flags;
  719. LIST_HEAD(active);
  720. /*
  721. * Problem here ist that dasd_flush_device_queue may wait
  722. * for termination of a request to complete. We can't keep
  723. * the lcu lock during that time, so we must assume that
  724. * the lists may have changed.
  725. * Idea: first gather all active alias devices in a separate list,
  726. * then flush the first element of this list unlocked, and afterwards
  727. * check if it is still on the list before moving it to the
  728. * active_devices list.
  729. */
  730. spin_lock_irqsave(&lcu->lock, flags);
  731. list_for_each_entry_safe(device, temp, &lcu->active_devices,
  732. alias_list) {
  733. private = (struct dasd_eckd_private *) device->private;
  734. if (private->uid.type == UA_BASE_DEVICE)
  735. continue;
  736. list_move(&device->alias_list, &active);
  737. }
  738. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  739. list_splice_init(&pavgroup->aliaslist, &active);
  740. }
  741. while (!list_empty(&active)) {
  742. device = list_first_entry(&active, struct dasd_device,
  743. alias_list);
  744. spin_unlock_irqrestore(&lcu->lock, flags);
  745. rc = dasd_flush_device_queue(device);
  746. spin_lock_irqsave(&lcu->lock, flags);
  747. /*
  748. * only move device around if it wasn't moved away while we
  749. * were waiting for the flush
  750. */
  751. if (device == list_first_entry(&active,
  752. struct dasd_device, alias_list)) {
  753. list_move(&device->alias_list, &lcu->active_devices);
  754. private = (struct dasd_eckd_private *) device->private;
  755. private->pavgroup = NULL;
  756. }
  757. }
  758. spin_unlock_irqrestore(&lcu->lock, flags);
  759. }
  760. static void __stop_device_on_lcu(struct dasd_device *device,
  761. struct dasd_device *pos)
  762. {
  763. /* If pos == device then device is already locked! */
  764. if (pos == device) {
  765. dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
  766. return;
  767. }
  768. spin_lock(get_ccwdev_lock(pos->cdev));
  769. dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
  770. spin_unlock(get_ccwdev_lock(pos->cdev));
  771. }
  772. /*
  773. * This function is called in interrupt context, so the
  774. * cdev lock for device is already locked!
  775. */
  776. static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
  777. struct dasd_device *device)
  778. {
  779. struct alias_pav_group *pavgroup;
  780. struct dasd_device *pos;
  781. list_for_each_entry(pos, &lcu->active_devices, alias_list)
  782. __stop_device_on_lcu(device, pos);
  783. list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
  784. __stop_device_on_lcu(device, pos);
  785. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  786. list_for_each_entry(pos, &pavgroup->baselist, alias_list)
  787. __stop_device_on_lcu(device, pos);
  788. list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
  789. __stop_device_on_lcu(device, pos);
  790. }
  791. }
  792. static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
  793. {
  794. struct alias_pav_group *pavgroup;
  795. struct dasd_device *device;
  796. unsigned long flags;
  797. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  798. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  799. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  800. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  801. }
  802. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  803. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  804. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  805. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  806. }
  807. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  808. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  809. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  810. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  811. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  812. flags);
  813. }
  814. list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
  815. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  816. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  817. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  818. flags);
  819. }
  820. }
  821. }
  822. static void summary_unit_check_handling_work(struct work_struct *work)
  823. {
  824. struct alias_lcu *lcu;
  825. struct summary_unit_check_work_data *suc_data;
  826. unsigned long flags;
  827. struct dasd_device *device;
  828. suc_data = container_of(work, struct summary_unit_check_work_data,
  829. worker);
  830. lcu = container_of(suc_data, struct alias_lcu, suc_data);
  831. device = suc_data->device;
  832. /* 1. flush alias devices */
  833. flush_all_alias_devices_on_lcu(lcu);
  834. /* 2. reset summary unit check */
  835. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  836. dasd_device_remove_stop_bits(device,
  837. (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
  838. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  839. reset_summary_unit_check(lcu, device, suc_data->reason);
  840. spin_lock_irqsave(&lcu->lock, flags);
  841. _unstop_all_devices_on_lcu(lcu);
  842. _restart_all_base_devices_on_lcu(lcu);
  843. /* 3. read new alias configuration */
  844. _schedule_lcu_update(lcu, device);
  845. lcu->suc_data.device = NULL;
  846. dasd_put_device(device);
  847. spin_unlock_irqrestore(&lcu->lock, flags);
  848. }
  849. /*
  850. * note: this will be called from int handler context (cdev locked)
  851. */
  852. void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
  853. struct irb *irb)
  854. {
  855. struct alias_lcu *lcu;
  856. char reason;
  857. struct dasd_eckd_private *private;
  858. char *sense;
  859. private = (struct dasd_eckd_private *) device->private;
  860. sense = dasd_get_sense(irb);
  861. if (sense) {
  862. reason = sense[8];
  863. DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
  864. "eckd handle summary unit check: reason", reason);
  865. } else {
  866. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  867. "eckd handle summary unit check:"
  868. " no reason code available");
  869. return;
  870. }
  871. lcu = private->lcu;
  872. if (!lcu) {
  873. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  874. "device not ready to handle summary"
  875. " unit check (no lcu structure)");
  876. return;
  877. }
  878. spin_lock(&lcu->lock);
  879. _stop_all_devices_on_lcu(lcu, device);
  880. /* prepare for lcu_update */
  881. private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
  882. /* If this device is about to be removed just return and wait for
  883. * the next interrupt on a different device
  884. */
  885. if (list_empty(&device->alias_list)) {
  886. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  887. "device is in offline processing,"
  888. " don't do summary unit check handling");
  889. spin_unlock(&lcu->lock);
  890. return;
  891. }
  892. if (lcu->suc_data.device) {
  893. /* already scheduled or running */
  894. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  895. "previous instance of summary unit check worker"
  896. " still pending");
  897. spin_unlock(&lcu->lock);
  898. return ;
  899. }
  900. lcu->suc_data.reason = reason;
  901. lcu->suc_data.device = device;
  902. dasd_get_device(device);
  903. spin_unlock(&lcu->lock);
  904. if (!schedule_work(&lcu->suc_data.worker))
  905. dasd_put_device(device);
  906. };