sclp.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * core function to access sclp interface
  3. *
  4. * Copyright IBM Corp. 1999, 2009
  5. *
  6. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  7. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  8. */
  9. #include <linux/kernel_stat.h>
  10. #include <linux/module.h>
  11. #include <linux/err.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/timer.h>
  15. #include <linux/reboot.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/init.h>
  18. #include <linux/suspend.h>
  19. #include <linux/completion.h>
  20. #include <linux/platform_device.h>
  21. #include <asm/types.h>
  22. #include <asm/irq.h>
  23. #include "sclp.h"
  24. #define SCLP_HEADER "sclp: "
  25. /* Lock to protect internal data consistency. */
  26. static DEFINE_SPINLOCK(sclp_lock);
  27. /* Mask of events that we can send to the sclp interface. */
  28. static sccb_mask_t sclp_receive_mask;
  29. /* Mask of events that we can receive from the sclp interface. */
  30. static sccb_mask_t sclp_send_mask;
  31. /* List of registered event listeners and senders. */
  32. static struct list_head sclp_reg_list;
  33. /* List of queued requests. */
  34. static struct list_head sclp_req_queue;
  35. /* Data for read and and init requests. */
  36. static struct sclp_req sclp_read_req;
  37. static struct sclp_req sclp_init_req;
  38. static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  39. static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  40. /* Suspend request */
  41. static DECLARE_COMPLETION(sclp_request_queue_flushed);
  42. /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
  43. int sclp_console_pages = SCLP_CONSOLE_PAGES;
  44. /* Flag to indicate if buffer pages are dropped on buffer full condition */
  45. int sclp_console_drop = 1;
  46. /* Number of times the console dropped buffer pages */
  47. unsigned long sclp_console_full;
  48. static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
  49. {
  50. complete(&sclp_request_queue_flushed);
  51. }
  52. static int __init sclp_setup_console_pages(char *str)
  53. {
  54. int pages, rc;
  55. rc = kstrtoint(str, 0, &pages);
  56. if (!rc && pages >= SCLP_CONSOLE_PAGES)
  57. sclp_console_pages = pages;
  58. return 1;
  59. }
  60. __setup("sclp_con_pages=", sclp_setup_console_pages);
  61. static int __init sclp_setup_console_drop(char *str)
  62. {
  63. int drop, rc;
  64. rc = kstrtoint(str, 0, &drop);
  65. if (!rc)
  66. sclp_console_drop = drop;
  67. return 1;
  68. }
  69. __setup("sclp_con_drop=", sclp_setup_console_drop);
  70. static struct sclp_req sclp_suspend_req;
  71. /* Timer for request retries. */
  72. static struct timer_list sclp_request_timer;
  73. /* Timer for queued requests. */
  74. static struct timer_list sclp_queue_timer;
  75. /* Internal state: is the driver initialized? */
  76. static volatile enum sclp_init_state_t {
  77. sclp_init_state_uninitialized,
  78. sclp_init_state_initializing,
  79. sclp_init_state_initialized
  80. } sclp_init_state = sclp_init_state_uninitialized;
  81. /* Internal state: is a request active at the sclp? */
  82. static volatile enum sclp_running_state_t {
  83. sclp_running_state_idle,
  84. sclp_running_state_running,
  85. sclp_running_state_reset_pending
  86. } sclp_running_state = sclp_running_state_idle;
  87. /* Internal state: is a read request pending? */
  88. static volatile enum sclp_reading_state_t {
  89. sclp_reading_state_idle,
  90. sclp_reading_state_reading
  91. } sclp_reading_state = sclp_reading_state_idle;
  92. /* Internal state: is the driver currently serving requests? */
  93. static volatile enum sclp_activation_state_t {
  94. sclp_activation_state_active,
  95. sclp_activation_state_deactivating,
  96. sclp_activation_state_inactive,
  97. sclp_activation_state_activating
  98. } sclp_activation_state = sclp_activation_state_active;
  99. /* Internal state: is an init mask request pending? */
  100. static volatile enum sclp_mask_state_t {
  101. sclp_mask_state_idle,
  102. sclp_mask_state_initializing
  103. } sclp_mask_state = sclp_mask_state_idle;
  104. /* Internal state: is the driver suspended? */
  105. static enum sclp_suspend_state_t {
  106. sclp_suspend_state_running,
  107. sclp_suspend_state_suspended,
  108. } sclp_suspend_state = sclp_suspend_state_running;
  109. /* Maximum retry counts */
  110. #define SCLP_INIT_RETRY 3
  111. #define SCLP_MASK_RETRY 3
  112. /* Timeout intervals in seconds.*/
  113. #define SCLP_BUSY_INTERVAL 10
  114. #define SCLP_RETRY_INTERVAL 30
  115. static void sclp_process_queue(void);
  116. static void __sclp_make_read_req(void);
  117. static int sclp_init_mask(int calculate);
  118. static int sclp_init(void);
  119. /* Perform service call. Return 0 on success, non-zero otherwise. */
  120. int
  121. sclp_service_call(sclp_cmdw_t command, void *sccb)
  122. {
  123. int cc = 4; /* Initialize for program check handling */
  124. asm volatile(
  125. "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
  126. "1: ipm %0\n"
  127. " srl %0,28\n"
  128. "2:\n"
  129. EX_TABLE(0b, 2b)
  130. EX_TABLE(1b, 2b)
  131. : "+&d" (cc) : "d" (command), "a" (__pa(sccb))
  132. : "cc", "memory");
  133. if (cc == 4)
  134. return -EINVAL;
  135. if (cc == 3)
  136. return -EIO;
  137. if (cc == 2)
  138. return -EBUSY;
  139. return 0;
  140. }
  141. static void
  142. __sclp_queue_read_req(void)
  143. {
  144. if (sclp_reading_state == sclp_reading_state_idle) {
  145. sclp_reading_state = sclp_reading_state_reading;
  146. __sclp_make_read_req();
  147. /* Add request to head of queue */
  148. list_add(&sclp_read_req.list, &sclp_req_queue);
  149. }
  150. }
  151. /* Set up request retry timer. Called while sclp_lock is locked. */
  152. static inline void
  153. __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
  154. unsigned long data)
  155. {
  156. del_timer(&sclp_request_timer);
  157. sclp_request_timer.function = function;
  158. sclp_request_timer.data = data;
  159. sclp_request_timer.expires = jiffies + time;
  160. add_timer(&sclp_request_timer);
  161. }
  162. /* Request timeout handler. Restart the request queue. If DATA is non-zero,
  163. * force restart of running request. */
  164. static void
  165. sclp_request_timeout(unsigned long data)
  166. {
  167. unsigned long flags;
  168. spin_lock_irqsave(&sclp_lock, flags);
  169. if (data) {
  170. if (sclp_running_state == sclp_running_state_running) {
  171. /* Break running state and queue NOP read event request
  172. * to get a defined interface state. */
  173. __sclp_queue_read_req();
  174. sclp_running_state = sclp_running_state_idle;
  175. }
  176. } else {
  177. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  178. sclp_request_timeout, 0);
  179. }
  180. spin_unlock_irqrestore(&sclp_lock, flags);
  181. sclp_process_queue();
  182. }
  183. /*
  184. * Returns the expire value in jiffies of the next pending request timeout,
  185. * if any. Needs to be called with sclp_lock.
  186. */
  187. static unsigned long __sclp_req_queue_find_next_timeout(void)
  188. {
  189. unsigned long expires_next = 0;
  190. struct sclp_req *req;
  191. list_for_each_entry(req, &sclp_req_queue, list) {
  192. if (!req->queue_expires)
  193. continue;
  194. if (!expires_next ||
  195. (time_before(req->queue_expires, expires_next)))
  196. expires_next = req->queue_expires;
  197. }
  198. return expires_next;
  199. }
  200. /*
  201. * Returns expired request, if any, and removes it from the list.
  202. */
  203. static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
  204. {
  205. unsigned long flags, now;
  206. struct sclp_req *req;
  207. spin_lock_irqsave(&sclp_lock, flags);
  208. now = jiffies;
  209. /* Don't need list_for_each_safe because we break out after list_del */
  210. list_for_each_entry(req, &sclp_req_queue, list) {
  211. if (!req->queue_expires)
  212. continue;
  213. if (time_before_eq(req->queue_expires, now)) {
  214. if (req->status == SCLP_REQ_QUEUED) {
  215. req->status = SCLP_REQ_QUEUED_TIMEOUT;
  216. list_del(&req->list);
  217. goto out;
  218. }
  219. }
  220. }
  221. req = NULL;
  222. out:
  223. spin_unlock_irqrestore(&sclp_lock, flags);
  224. return req;
  225. }
  226. /*
  227. * Timeout handler for queued requests. Removes request from list and
  228. * invokes callback. This timer can be set per request in situations where
  229. * waiting too long would be harmful to the system, e.g. during SE reboot.
  230. */
  231. static void sclp_req_queue_timeout(unsigned long data)
  232. {
  233. unsigned long flags, expires_next;
  234. struct sclp_req *req;
  235. do {
  236. req = __sclp_req_queue_remove_expired_req();
  237. if (req && req->callback)
  238. req->callback(req, req->callback_data);
  239. } while (req);
  240. spin_lock_irqsave(&sclp_lock, flags);
  241. expires_next = __sclp_req_queue_find_next_timeout();
  242. if (expires_next)
  243. mod_timer(&sclp_queue_timer, expires_next);
  244. spin_unlock_irqrestore(&sclp_lock, flags);
  245. }
  246. /* Try to start a request. Return zero if the request was successfully
  247. * started or if it will be started at a later time. Return non-zero otherwise.
  248. * Called while sclp_lock is locked. */
  249. static int
  250. __sclp_start_request(struct sclp_req *req)
  251. {
  252. int rc;
  253. if (sclp_running_state != sclp_running_state_idle)
  254. return 0;
  255. del_timer(&sclp_request_timer);
  256. rc = sclp_service_call(req->command, req->sccb);
  257. req->start_count++;
  258. if (rc == 0) {
  259. /* Successfully started request */
  260. req->status = SCLP_REQ_RUNNING;
  261. sclp_running_state = sclp_running_state_running;
  262. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  263. sclp_request_timeout, 1);
  264. return 0;
  265. } else if (rc == -EBUSY) {
  266. /* Try again later */
  267. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  268. sclp_request_timeout, 0);
  269. return 0;
  270. }
  271. /* Request failed */
  272. req->status = SCLP_REQ_FAILED;
  273. return rc;
  274. }
  275. /* Try to start queued requests. */
  276. static void
  277. sclp_process_queue(void)
  278. {
  279. struct sclp_req *req;
  280. int rc;
  281. unsigned long flags;
  282. spin_lock_irqsave(&sclp_lock, flags);
  283. if (sclp_running_state != sclp_running_state_idle) {
  284. spin_unlock_irqrestore(&sclp_lock, flags);
  285. return;
  286. }
  287. del_timer(&sclp_request_timer);
  288. while (!list_empty(&sclp_req_queue)) {
  289. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  290. if (!req->sccb)
  291. goto do_post;
  292. rc = __sclp_start_request(req);
  293. if (rc == 0)
  294. break;
  295. /* Request failed */
  296. if (req->start_count > 1) {
  297. /* Cannot abort already submitted request - could still
  298. * be active at the SCLP */
  299. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  300. sclp_request_timeout, 0);
  301. break;
  302. }
  303. do_post:
  304. /* Post-processing for aborted request */
  305. list_del(&req->list);
  306. if (req->callback) {
  307. spin_unlock_irqrestore(&sclp_lock, flags);
  308. req->callback(req, req->callback_data);
  309. spin_lock_irqsave(&sclp_lock, flags);
  310. }
  311. }
  312. spin_unlock_irqrestore(&sclp_lock, flags);
  313. }
  314. static int __sclp_can_add_request(struct sclp_req *req)
  315. {
  316. if (req == &sclp_suspend_req || req == &sclp_init_req)
  317. return 1;
  318. if (sclp_suspend_state != sclp_suspend_state_running)
  319. return 0;
  320. if (sclp_init_state != sclp_init_state_initialized)
  321. return 0;
  322. if (sclp_activation_state != sclp_activation_state_active)
  323. return 0;
  324. return 1;
  325. }
  326. /* Queue a new request. Return zero on success, non-zero otherwise. */
  327. int
  328. sclp_add_request(struct sclp_req *req)
  329. {
  330. unsigned long flags;
  331. int rc;
  332. spin_lock_irqsave(&sclp_lock, flags);
  333. if (!__sclp_can_add_request(req)) {
  334. spin_unlock_irqrestore(&sclp_lock, flags);
  335. return -EIO;
  336. }
  337. req->status = SCLP_REQ_QUEUED;
  338. req->start_count = 0;
  339. list_add_tail(&req->list, &sclp_req_queue);
  340. rc = 0;
  341. if (req->queue_timeout) {
  342. req->queue_expires = jiffies + req->queue_timeout * HZ;
  343. if (!timer_pending(&sclp_queue_timer) ||
  344. time_after(sclp_queue_timer.expires, req->queue_expires))
  345. mod_timer(&sclp_queue_timer, req->queue_expires);
  346. } else
  347. req->queue_expires = 0;
  348. /* Start if request is first in list */
  349. if (sclp_running_state == sclp_running_state_idle &&
  350. req->list.prev == &sclp_req_queue) {
  351. if (!req->sccb) {
  352. list_del(&req->list);
  353. rc = -ENODATA;
  354. goto out;
  355. }
  356. rc = __sclp_start_request(req);
  357. if (rc)
  358. list_del(&req->list);
  359. }
  360. out:
  361. spin_unlock_irqrestore(&sclp_lock, flags);
  362. return rc;
  363. }
  364. EXPORT_SYMBOL(sclp_add_request);
  365. /* Dispatch events found in request buffer to registered listeners. Return 0
  366. * if all events were dispatched, non-zero otherwise. */
  367. static int
  368. sclp_dispatch_evbufs(struct sccb_header *sccb)
  369. {
  370. unsigned long flags;
  371. struct evbuf_header *evbuf;
  372. struct list_head *l;
  373. struct sclp_register *reg;
  374. int offset;
  375. int rc;
  376. spin_lock_irqsave(&sclp_lock, flags);
  377. rc = 0;
  378. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  379. offset += evbuf->length) {
  380. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  381. /* Check for malformed hardware response */
  382. if (evbuf->length == 0)
  383. break;
  384. /* Search for event handler */
  385. reg = NULL;
  386. list_for_each(l, &sclp_reg_list) {
  387. reg = list_entry(l, struct sclp_register, list);
  388. if (reg->receive_mask & (1 << (32 - evbuf->type)))
  389. break;
  390. else
  391. reg = NULL;
  392. }
  393. if (reg && reg->receiver_fn) {
  394. spin_unlock_irqrestore(&sclp_lock, flags);
  395. reg->receiver_fn(evbuf);
  396. spin_lock_irqsave(&sclp_lock, flags);
  397. } else if (reg == NULL)
  398. rc = -EOPNOTSUPP;
  399. }
  400. spin_unlock_irqrestore(&sclp_lock, flags);
  401. return rc;
  402. }
  403. /* Read event data request callback. */
  404. static void
  405. sclp_read_cb(struct sclp_req *req, void *data)
  406. {
  407. unsigned long flags;
  408. struct sccb_header *sccb;
  409. sccb = (struct sccb_header *) req->sccb;
  410. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  411. sccb->response_code == 0x220))
  412. sclp_dispatch_evbufs(sccb);
  413. spin_lock_irqsave(&sclp_lock, flags);
  414. sclp_reading_state = sclp_reading_state_idle;
  415. spin_unlock_irqrestore(&sclp_lock, flags);
  416. }
  417. /* Prepare read event data request. Called while sclp_lock is locked. */
  418. static void __sclp_make_read_req(void)
  419. {
  420. struct sccb_header *sccb;
  421. sccb = (struct sccb_header *) sclp_read_sccb;
  422. clear_page(sccb);
  423. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  424. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  425. sclp_read_req.status = SCLP_REQ_QUEUED;
  426. sclp_read_req.start_count = 0;
  427. sclp_read_req.callback = sclp_read_cb;
  428. sclp_read_req.sccb = sccb;
  429. sccb->length = PAGE_SIZE;
  430. sccb->function_code = 0;
  431. sccb->control_mask[2] = 0x80;
  432. }
  433. /* Search request list for request with matching sccb. Return request if found,
  434. * NULL otherwise. Called while sclp_lock is locked. */
  435. static inline struct sclp_req *
  436. __sclp_find_req(u32 sccb)
  437. {
  438. struct list_head *l;
  439. struct sclp_req *req;
  440. list_for_each(l, &sclp_req_queue) {
  441. req = list_entry(l, struct sclp_req, list);
  442. if (sccb == (u32) (addr_t) req->sccb)
  443. return req;
  444. }
  445. return NULL;
  446. }
  447. /* Handler for external interruption. Perform request post-processing.
  448. * Prepare read event data request if necessary. Start processing of next
  449. * request on queue. */
  450. static void sclp_interrupt_handler(struct ext_code ext_code,
  451. unsigned int param32, unsigned long param64)
  452. {
  453. struct sclp_req *req;
  454. u32 finished_sccb;
  455. u32 evbuf_pending;
  456. inc_irq_stat(IRQEXT_SCP);
  457. spin_lock(&sclp_lock);
  458. finished_sccb = param32 & 0xfffffff8;
  459. evbuf_pending = param32 & 0x3;
  460. if (finished_sccb) {
  461. del_timer(&sclp_request_timer);
  462. sclp_running_state = sclp_running_state_reset_pending;
  463. req = __sclp_find_req(finished_sccb);
  464. if (req) {
  465. /* Request post-processing */
  466. list_del(&req->list);
  467. req->status = SCLP_REQ_DONE;
  468. if (req->callback) {
  469. spin_unlock(&sclp_lock);
  470. req->callback(req, req->callback_data);
  471. spin_lock(&sclp_lock);
  472. }
  473. }
  474. sclp_running_state = sclp_running_state_idle;
  475. }
  476. if (evbuf_pending &&
  477. sclp_activation_state == sclp_activation_state_active)
  478. __sclp_queue_read_req();
  479. spin_unlock(&sclp_lock);
  480. sclp_process_queue();
  481. }
  482. /* Convert interval in jiffies to TOD ticks. */
  483. static inline u64
  484. sclp_tod_from_jiffies(unsigned long jiffies)
  485. {
  486. return (u64) (jiffies / HZ) << 32;
  487. }
  488. /* Wait until a currently running request finished. Note: while this function
  489. * is running, no timers are served on the calling CPU. */
  490. void
  491. sclp_sync_wait(void)
  492. {
  493. unsigned long long old_tick;
  494. unsigned long flags;
  495. unsigned long cr0, cr0_sync;
  496. u64 timeout;
  497. int irq_context;
  498. /* We'll be disabling timer interrupts, so we need a custom timeout
  499. * mechanism */
  500. timeout = 0;
  501. if (timer_pending(&sclp_request_timer)) {
  502. /* Get timeout TOD value */
  503. timeout = get_tod_clock_fast() +
  504. sclp_tod_from_jiffies(sclp_request_timer.expires -
  505. jiffies);
  506. }
  507. local_irq_save(flags);
  508. /* Prevent bottom half from executing once we force interrupts open */
  509. irq_context = in_interrupt();
  510. if (!irq_context)
  511. local_bh_disable();
  512. /* Enable service-signal interruption, disable timer interrupts */
  513. old_tick = local_tick_disable();
  514. trace_hardirqs_on();
  515. __ctl_store(cr0, 0, 0);
  516. cr0_sync = cr0;
  517. cr0_sync &= 0xffff00a0;
  518. cr0_sync |= 0x00000200;
  519. __ctl_load(cr0_sync, 0, 0);
  520. __arch_local_irq_stosm(0x01);
  521. /* Loop until driver state indicates finished request */
  522. while (sclp_running_state != sclp_running_state_idle) {
  523. /* Check for expired request timer */
  524. if (timer_pending(&sclp_request_timer) &&
  525. get_tod_clock_fast() > timeout &&
  526. del_timer(&sclp_request_timer))
  527. sclp_request_timer.function(sclp_request_timer.data);
  528. cpu_relax();
  529. }
  530. local_irq_disable();
  531. __ctl_load(cr0, 0, 0);
  532. if (!irq_context)
  533. _local_bh_enable();
  534. local_tick_enable(old_tick);
  535. local_irq_restore(flags);
  536. }
  537. EXPORT_SYMBOL(sclp_sync_wait);
  538. /* Dispatch changes in send and receive mask to registered listeners. */
  539. static void
  540. sclp_dispatch_state_change(void)
  541. {
  542. struct list_head *l;
  543. struct sclp_register *reg;
  544. unsigned long flags;
  545. sccb_mask_t receive_mask;
  546. sccb_mask_t send_mask;
  547. do {
  548. spin_lock_irqsave(&sclp_lock, flags);
  549. reg = NULL;
  550. list_for_each(l, &sclp_reg_list) {
  551. reg = list_entry(l, struct sclp_register, list);
  552. receive_mask = reg->send_mask & sclp_receive_mask;
  553. send_mask = reg->receive_mask & sclp_send_mask;
  554. if (reg->sclp_receive_mask != receive_mask ||
  555. reg->sclp_send_mask != send_mask) {
  556. reg->sclp_receive_mask = receive_mask;
  557. reg->sclp_send_mask = send_mask;
  558. break;
  559. } else
  560. reg = NULL;
  561. }
  562. spin_unlock_irqrestore(&sclp_lock, flags);
  563. if (reg && reg->state_change_fn)
  564. reg->state_change_fn(reg);
  565. } while (reg);
  566. }
  567. struct sclp_statechangebuf {
  568. struct evbuf_header header;
  569. u8 validity_sclp_active_facility_mask : 1;
  570. u8 validity_sclp_receive_mask : 1;
  571. u8 validity_sclp_send_mask : 1;
  572. u8 validity_read_data_function_mask : 1;
  573. u16 _zeros : 12;
  574. u16 mask_length;
  575. u64 sclp_active_facility_mask;
  576. sccb_mask_t sclp_receive_mask;
  577. sccb_mask_t sclp_send_mask;
  578. u32 read_data_function_mask;
  579. } __attribute__((packed));
  580. /* State change event callback. Inform listeners of changes. */
  581. static void
  582. sclp_state_change_cb(struct evbuf_header *evbuf)
  583. {
  584. unsigned long flags;
  585. struct sclp_statechangebuf *scbuf;
  586. scbuf = (struct sclp_statechangebuf *) evbuf;
  587. if (scbuf->mask_length != sizeof(sccb_mask_t))
  588. return;
  589. spin_lock_irqsave(&sclp_lock, flags);
  590. if (scbuf->validity_sclp_receive_mask)
  591. sclp_receive_mask = scbuf->sclp_receive_mask;
  592. if (scbuf->validity_sclp_send_mask)
  593. sclp_send_mask = scbuf->sclp_send_mask;
  594. spin_unlock_irqrestore(&sclp_lock, flags);
  595. if (scbuf->validity_sclp_active_facility_mask)
  596. sclp.facilities = scbuf->sclp_active_facility_mask;
  597. sclp_dispatch_state_change();
  598. }
  599. static struct sclp_register sclp_state_change_event = {
  600. .receive_mask = EVTYP_STATECHANGE_MASK,
  601. .receiver_fn = sclp_state_change_cb
  602. };
  603. /* Calculate receive and send mask of currently registered listeners.
  604. * Called while sclp_lock is locked. */
  605. static inline void
  606. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  607. {
  608. struct list_head *l;
  609. struct sclp_register *t;
  610. *receive_mask = 0;
  611. *send_mask = 0;
  612. list_for_each(l, &sclp_reg_list) {
  613. t = list_entry(l, struct sclp_register, list);
  614. *receive_mask |= t->receive_mask;
  615. *send_mask |= t->send_mask;
  616. }
  617. }
  618. /* Register event listener. Return 0 on success, non-zero otherwise. */
  619. int
  620. sclp_register(struct sclp_register *reg)
  621. {
  622. unsigned long flags;
  623. sccb_mask_t receive_mask;
  624. sccb_mask_t send_mask;
  625. int rc;
  626. rc = sclp_init();
  627. if (rc)
  628. return rc;
  629. spin_lock_irqsave(&sclp_lock, flags);
  630. /* Check event mask for collisions */
  631. __sclp_get_mask(&receive_mask, &send_mask);
  632. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  633. spin_unlock_irqrestore(&sclp_lock, flags);
  634. return -EBUSY;
  635. }
  636. /* Trigger initial state change callback */
  637. reg->sclp_receive_mask = 0;
  638. reg->sclp_send_mask = 0;
  639. reg->pm_event_posted = 0;
  640. list_add(&reg->list, &sclp_reg_list);
  641. spin_unlock_irqrestore(&sclp_lock, flags);
  642. rc = sclp_init_mask(1);
  643. if (rc) {
  644. spin_lock_irqsave(&sclp_lock, flags);
  645. list_del(&reg->list);
  646. spin_unlock_irqrestore(&sclp_lock, flags);
  647. }
  648. return rc;
  649. }
  650. EXPORT_SYMBOL(sclp_register);
  651. /* Unregister event listener. */
  652. void
  653. sclp_unregister(struct sclp_register *reg)
  654. {
  655. unsigned long flags;
  656. spin_lock_irqsave(&sclp_lock, flags);
  657. list_del(&reg->list);
  658. spin_unlock_irqrestore(&sclp_lock, flags);
  659. sclp_init_mask(1);
  660. }
  661. EXPORT_SYMBOL(sclp_unregister);
  662. /* Remove event buffers which are marked processed. Return the number of
  663. * remaining event buffers. */
  664. int
  665. sclp_remove_processed(struct sccb_header *sccb)
  666. {
  667. struct evbuf_header *evbuf;
  668. int unprocessed;
  669. u16 remaining;
  670. evbuf = (struct evbuf_header *) (sccb + 1);
  671. unprocessed = 0;
  672. remaining = sccb->length - sizeof(struct sccb_header);
  673. while (remaining > 0) {
  674. remaining -= evbuf->length;
  675. if (evbuf->flags & 0x80) {
  676. sccb->length -= evbuf->length;
  677. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  678. remaining);
  679. } else {
  680. unprocessed++;
  681. evbuf = (struct evbuf_header *)
  682. ((addr_t) evbuf + evbuf->length);
  683. }
  684. }
  685. return unprocessed;
  686. }
  687. EXPORT_SYMBOL(sclp_remove_processed);
  688. /* Prepare init mask request. Called while sclp_lock is locked. */
  689. static inline void
  690. __sclp_make_init_req(u32 receive_mask, u32 send_mask)
  691. {
  692. struct init_sccb *sccb;
  693. sccb = (struct init_sccb *) sclp_init_sccb;
  694. clear_page(sccb);
  695. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  696. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  697. sclp_init_req.status = SCLP_REQ_FILLED;
  698. sclp_init_req.start_count = 0;
  699. sclp_init_req.callback = NULL;
  700. sclp_init_req.callback_data = NULL;
  701. sclp_init_req.sccb = sccb;
  702. sccb->header.length = sizeof(struct init_sccb);
  703. sccb->mask_length = sizeof(sccb_mask_t);
  704. sccb->receive_mask = receive_mask;
  705. sccb->send_mask = send_mask;
  706. sccb->sclp_receive_mask = 0;
  707. sccb->sclp_send_mask = 0;
  708. }
  709. /* Start init mask request. If calculate is non-zero, calculate the mask as
  710. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  711. * success, non-zero otherwise. */
  712. static int
  713. sclp_init_mask(int calculate)
  714. {
  715. unsigned long flags;
  716. struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
  717. sccb_mask_t receive_mask;
  718. sccb_mask_t send_mask;
  719. int retry;
  720. int rc;
  721. unsigned long wait;
  722. spin_lock_irqsave(&sclp_lock, flags);
  723. /* Check if interface is in appropriate state */
  724. if (sclp_mask_state != sclp_mask_state_idle) {
  725. spin_unlock_irqrestore(&sclp_lock, flags);
  726. return -EBUSY;
  727. }
  728. if (sclp_activation_state == sclp_activation_state_inactive) {
  729. spin_unlock_irqrestore(&sclp_lock, flags);
  730. return -EINVAL;
  731. }
  732. sclp_mask_state = sclp_mask_state_initializing;
  733. /* Determine mask */
  734. if (calculate)
  735. __sclp_get_mask(&receive_mask, &send_mask);
  736. else {
  737. receive_mask = 0;
  738. send_mask = 0;
  739. }
  740. rc = -EIO;
  741. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  742. /* Prepare request */
  743. __sclp_make_init_req(receive_mask, send_mask);
  744. spin_unlock_irqrestore(&sclp_lock, flags);
  745. if (sclp_add_request(&sclp_init_req)) {
  746. /* Try again later */
  747. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  748. while (time_before(jiffies, wait))
  749. sclp_sync_wait();
  750. spin_lock_irqsave(&sclp_lock, flags);
  751. continue;
  752. }
  753. while (sclp_init_req.status != SCLP_REQ_DONE &&
  754. sclp_init_req.status != SCLP_REQ_FAILED)
  755. sclp_sync_wait();
  756. spin_lock_irqsave(&sclp_lock, flags);
  757. if (sclp_init_req.status == SCLP_REQ_DONE &&
  758. sccb->header.response_code == 0x20) {
  759. /* Successful request */
  760. if (calculate) {
  761. sclp_receive_mask = sccb->sclp_receive_mask;
  762. sclp_send_mask = sccb->sclp_send_mask;
  763. } else {
  764. sclp_receive_mask = 0;
  765. sclp_send_mask = 0;
  766. }
  767. spin_unlock_irqrestore(&sclp_lock, flags);
  768. sclp_dispatch_state_change();
  769. spin_lock_irqsave(&sclp_lock, flags);
  770. rc = 0;
  771. break;
  772. }
  773. }
  774. sclp_mask_state = sclp_mask_state_idle;
  775. spin_unlock_irqrestore(&sclp_lock, flags);
  776. return rc;
  777. }
  778. /* Deactivate SCLP interface. On success, new requests will be rejected,
  779. * events will no longer be dispatched. Return 0 on success, non-zero
  780. * otherwise. */
  781. int
  782. sclp_deactivate(void)
  783. {
  784. unsigned long flags;
  785. int rc;
  786. spin_lock_irqsave(&sclp_lock, flags);
  787. /* Deactivate can only be called when active */
  788. if (sclp_activation_state != sclp_activation_state_active) {
  789. spin_unlock_irqrestore(&sclp_lock, flags);
  790. return -EINVAL;
  791. }
  792. sclp_activation_state = sclp_activation_state_deactivating;
  793. spin_unlock_irqrestore(&sclp_lock, flags);
  794. rc = sclp_init_mask(0);
  795. spin_lock_irqsave(&sclp_lock, flags);
  796. if (rc == 0)
  797. sclp_activation_state = sclp_activation_state_inactive;
  798. else
  799. sclp_activation_state = sclp_activation_state_active;
  800. spin_unlock_irqrestore(&sclp_lock, flags);
  801. return rc;
  802. }
  803. EXPORT_SYMBOL(sclp_deactivate);
  804. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  805. * requests will be accepted, events will be dispatched again. Return 0 on
  806. * success, non-zero otherwise. */
  807. int
  808. sclp_reactivate(void)
  809. {
  810. unsigned long flags;
  811. int rc;
  812. spin_lock_irqsave(&sclp_lock, flags);
  813. /* Reactivate can only be called when inactive */
  814. if (sclp_activation_state != sclp_activation_state_inactive) {
  815. spin_unlock_irqrestore(&sclp_lock, flags);
  816. return -EINVAL;
  817. }
  818. sclp_activation_state = sclp_activation_state_activating;
  819. spin_unlock_irqrestore(&sclp_lock, flags);
  820. rc = sclp_init_mask(1);
  821. spin_lock_irqsave(&sclp_lock, flags);
  822. if (rc == 0)
  823. sclp_activation_state = sclp_activation_state_active;
  824. else
  825. sclp_activation_state = sclp_activation_state_inactive;
  826. spin_unlock_irqrestore(&sclp_lock, flags);
  827. return rc;
  828. }
  829. EXPORT_SYMBOL(sclp_reactivate);
  830. /* Handler for external interruption used during initialization. Modify
  831. * request state to done. */
  832. static void sclp_check_handler(struct ext_code ext_code,
  833. unsigned int param32, unsigned long param64)
  834. {
  835. u32 finished_sccb;
  836. inc_irq_stat(IRQEXT_SCP);
  837. finished_sccb = param32 & 0xfffffff8;
  838. /* Is this the interrupt we are waiting for? */
  839. if (finished_sccb == 0)
  840. return;
  841. if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
  842. panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
  843. finished_sccb);
  844. spin_lock(&sclp_lock);
  845. if (sclp_running_state == sclp_running_state_running) {
  846. sclp_init_req.status = SCLP_REQ_DONE;
  847. sclp_running_state = sclp_running_state_idle;
  848. }
  849. spin_unlock(&sclp_lock);
  850. }
  851. /* Initial init mask request timed out. Modify request state to failed. */
  852. static void
  853. sclp_check_timeout(unsigned long data)
  854. {
  855. unsigned long flags;
  856. spin_lock_irqsave(&sclp_lock, flags);
  857. if (sclp_running_state == sclp_running_state_running) {
  858. sclp_init_req.status = SCLP_REQ_FAILED;
  859. sclp_running_state = sclp_running_state_idle;
  860. }
  861. spin_unlock_irqrestore(&sclp_lock, flags);
  862. }
  863. /* Perform a check of the SCLP interface. Return zero if the interface is
  864. * available and there are no pending requests from a previous instance.
  865. * Return non-zero otherwise. */
  866. static int
  867. sclp_check_interface(void)
  868. {
  869. struct init_sccb *sccb;
  870. unsigned long flags;
  871. int retry;
  872. int rc;
  873. spin_lock_irqsave(&sclp_lock, flags);
  874. /* Prepare init mask command */
  875. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  876. if (rc) {
  877. spin_unlock_irqrestore(&sclp_lock, flags);
  878. return rc;
  879. }
  880. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  881. __sclp_make_init_req(0, 0);
  882. sccb = (struct init_sccb *) sclp_init_req.sccb;
  883. rc = sclp_service_call(sclp_init_req.command, sccb);
  884. if (rc == -EIO)
  885. break;
  886. sclp_init_req.status = SCLP_REQ_RUNNING;
  887. sclp_running_state = sclp_running_state_running;
  888. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  889. sclp_check_timeout, 0);
  890. spin_unlock_irqrestore(&sclp_lock, flags);
  891. /* Enable service-signal interruption - needs to happen
  892. * with IRQs enabled. */
  893. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  894. /* Wait for signal from interrupt or timeout */
  895. sclp_sync_wait();
  896. /* Disable service-signal interruption - needs to happen
  897. * with IRQs enabled. */
  898. irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
  899. spin_lock_irqsave(&sclp_lock, flags);
  900. del_timer(&sclp_request_timer);
  901. if (sclp_init_req.status == SCLP_REQ_DONE &&
  902. sccb->header.response_code == 0x20) {
  903. rc = 0;
  904. break;
  905. } else
  906. rc = -EBUSY;
  907. }
  908. unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  909. spin_unlock_irqrestore(&sclp_lock, flags);
  910. return rc;
  911. }
  912. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  913. * events from interfering with rebooted system. */
  914. static int
  915. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  916. {
  917. sclp_deactivate();
  918. return NOTIFY_DONE;
  919. }
  920. static struct notifier_block sclp_reboot_notifier = {
  921. .notifier_call = sclp_reboot_event
  922. };
  923. /*
  924. * Suspend/resume SCLP notifier implementation
  925. */
  926. static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
  927. {
  928. struct sclp_register *reg;
  929. unsigned long flags;
  930. if (!rollback) {
  931. spin_lock_irqsave(&sclp_lock, flags);
  932. list_for_each_entry(reg, &sclp_reg_list, list)
  933. reg->pm_event_posted = 0;
  934. spin_unlock_irqrestore(&sclp_lock, flags);
  935. }
  936. do {
  937. spin_lock_irqsave(&sclp_lock, flags);
  938. list_for_each_entry(reg, &sclp_reg_list, list) {
  939. if (rollback && reg->pm_event_posted)
  940. goto found;
  941. if (!rollback && !reg->pm_event_posted)
  942. goto found;
  943. }
  944. spin_unlock_irqrestore(&sclp_lock, flags);
  945. return;
  946. found:
  947. spin_unlock_irqrestore(&sclp_lock, flags);
  948. if (reg->pm_event_fn)
  949. reg->pm_event_fn(reg, sclp_pm_event);
  950. reg->pm_event_posted = rollback ? 0 : 1;
  951. } while (1);
  952. }
  953. /*
  954. * Susend/resume callbacks for platform device
  955. */
  956. static int sclp_freeze(struct device *dev)
  957. {
  958. unsigned long flags;
  959. int rc;
  960. sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
  961. spin_lock_irqsave(&sclp_lock, flags);
  962. sclp_suspend_state = sclp_suspend_state_suspended;
  963. spin_unlock_irqrestore(&sclp_lock, flags);
  964. /* Init supend data */
  965. memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
  966. sclp_suspend_req.callback = sclp_suspend_req_cb;
  967. sclp_suspend_req.status = SCLP_REQ_FILLED;
  968. init_completion(&sclp_request_queue_flushed);
  969. rc = sclp_add_request(&sclp_suspend_req);
  970. if (rc == 0)
  971. wait_for_completion(&sclp_request_queue_flushed);
  972. else if (rc != -ENODATA)
  973. goto fail_thaw;
  974. rc = sclp_deactivate();
  975. if (rc)
  976. goto fail_thaw;
  977. return 0;
  978. fail_thaw:
  979. spin_lock_irqsave(&sclp_lock, flags);
  980. sclp_suspend_state = sclp_suspend_state_running;
  981. spin_unlock_irqrestore(&sclp_lock, flags);
  982. sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
  983. return rc;
  984. }
  985. static int sclp_undo_suspend(enum sclp_pm_event event)
  986. {
  987. unsigned long flags;
  988. int rc;
  989. rc = sclp_reactivate();
  990. if (rc)
  991. return rc;
  992. spin_lock_irqsave(&sclp_lock, flags);
  993. sclp_suspend_state = sclp_suspend_state_running;
  994. spin_unlock_irqrestore(&sclp_lock, flags);
  995. sclp_pm_event(event, 0);
  996. return 0;
  997. }
  998. static int sclp_thaw(struct device *dev)
  999. {
  1000. return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  1001. }
  1002. static int sclp_restore(struct device *dev)
  1003. {
  1004. return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
  1005. }
  1006. static const struct dev_pm_ops sclp_pm_ops = {
  1007. .freeze = sclp_freeze,
  1008. .thaw = sclp_thaw,
  1009. .restore = sclp_restore,
  1010. };
  1011. static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
  1012. {
  1013. return sprintf(buf, "%i\n", sclp_console_pages);
  1014. }
  1015. static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
  1016. static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
  1017. {
  1018. return sprintf(buf, "%i\n", sclp_console_drop);
  1019. }
  1020. static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
  1021. static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
  1022. {
  1023. return sprintf(buf, "%lu\n", sclp_console_full);
  1024. }
  1025. static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
  1026. static struct attribute *sclp_drv_attrs[] = {
  1027. &driver_attr_con_pages.attr,
  1028. &driver_attr_con_drop.attr,
  1029. &driver_attr_con_full.attr,
  1030. NULL,
  1031. };
  1032. static struct attribute_group sclp_drv_attr_group = {
  1033. .attrs = sclp_drv_attrs,
  1034. };
  1035. static const struct attribute_group *sclp_drv_attr_groups[] = {
  1036. &sclp_drv_attr_group,
  1037. NULL,
  1038. };
  1039. static struct platform_driver sclp_pdrv = {
  1040. .driver = {
  1041. .name = "sclp",
  1042. .pm = &sclp_pm_ops,
  1043. .groups = sclp_drv_attr_groups,
  1044. },
  1045. };
  1046. static struct platform_device *sclp_pdev;
  1047. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  1048. * otherwise. */
  1049. static int
  1050. sclp_init(void)
  1051. {
  1052. unsigned long flags;
  1053. int rc = 0;
  1054. spin_lock_irqsave(&sclp_lock, flags);
  1055. /* Check for previous or running initialization */
  1056. if (sclp_init_state != sclp_init_state_uninitialized)
  1057. goto fail_unlock;
  1058. sclp_init_state = sclp_init_state_initializing;
  1059. /* Set up variables */
  1060. INIT_LIST_HEAD(&sclp_req_queue);
  1061. INIT_LIST_HEAD(&sclp_reg_list);
  1062. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  1063. init_timer(&sclp_request_timer);
  1064. init_timer(&sclp_queue_timer);
  1065. sclp_queue_timer.function = sclp_req_queue_timeout;
  1066. /* Check interface */
  1067. spin_unlock_irqrestore(&sclp_lock, flags);
  1068. rc = sclp_check_interface();
  1069. spin_lock_irqsave(&sclp_lock, flags);
  1070. if (rc)
  1071. goto fail_init_state_uninitialized;
  1072. /* Register reboot handler */
  1073. rc = register_reboot_notifier(&sclp_reboot_notifier);
  1074. if (rc)
  1075. goto fail_init_state_uninitialized;
  1076. /* Register interrupt handler */
  1077. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
  1078. if (rc)
  1079. goto fail_unregister_reboot_notifier;
  1080. sclp_init_state = sclp_init_state_initialized;
  1081. spin_unlock_irqrestore(&sclp_lock, flags);
  1082. /* Enable service-signal external interruption - needs to happen with
  1083. * IRQs enabled. */
  1084. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1085. sclp_init_mask(1);
  1086. return 0;
  1087. fail_unregister_reboot_notifier:
  1088. unregister_reboot_notifier(&sclp_reboot_notifier);
  1089. fail_init_state_uninitialized:
  1090. sclp_init_state = sclp_init_state_uninitialized;
  1091. fail_unlock:
  1092. spin_unlock_irqrestore(&sclp_lock, flags);
  1093. return rc;
  1094. }
  1095. /*
  1096. * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
  1097. * to print the panic message.
  1098. */
  1099. static int sclp_panic_notify(struct notifier_block *self,
  1100. unsigned long event, void *data)
  1101. {
  1102. if (sclp_suspend_state == sclp_suspend_state_suspended)
  1103. sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  1104. return NOTIFY_OK;
  1105. }
  1106. static struct notifier_block sclp_on_panic_nb = {
  1107. .notifier_call = sclp_panic_notify,
  1108. .priority = SCLP_PANIC_PRIO,
  1109. };
  1110. static __init int sclp_initcall(void)
  1111. {
  1112. int rc;
  1113. rc = platform_driver_register(&sclp_pdrv);
  1114. if (rc)
  1115. return rc;
  1116. sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
  1117. rc = PTR_ERR_OR_ZERO(sclp_pdev);
  1118. if (rc)
  1119. goto fail_platform_driver_unregister;
  1120. rc = atomic_notifier_chain_register(&panic_notifier_list,
  1121. &sclp_on_panic_nb);
  1122. if (rc)
  1123. goto fail_platform_device_unregister;
  1124. return sclp_init();
  1125. fail_platform_device_unregister:
  1126. platform_device_unregister(sclp_pdev);
  1127. fail_platform_driver_unregister:
  1128. platform_driver_unregister(&sclp_pdrv);
  1129. return rc;
  1130. }
  1131. arch_initcall(sclp_initcall);