esas2r_int.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942
  1. /*
  2. * linux/drivers/scsi/esas2r/esas2r_int.c
  3. * esas2r interrupt handling
  4. *
  5. * Copyright (c) 2001-2013 ATTO Technology, Inc.
  6. * (mailto:linuxdrivers@attotech.com)
  7. */
  8. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  9. /*
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * NO WARRANTY
  20. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24. * solely responsible for determining the appropriateness of using and
  25. * distributing the Program and assumes all risks associated with its
  26. * exercise of rights under this Agreement, including but not limited to
  27. * the risks and costs of program errors, damage to or loss of data,
  28. * programs or equipment, and unavailability or interruption of operations.
  29. *
  30. * DISCLAIMER OF LIABILITY
  31. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38. *
  39. * You should have received a copy of the GNU General Public License
  40. * along with this program; if not, write to the Free Software
  41. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  42. */
  43. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  44. #include "esas2r.h"
  45. /* Local function prototypes */
  46. static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
  47. static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
  48. static void esas2r_process_bus_reset(struct esas2r_adapter *a);
  49. /*
  50. * Poll the adapter for interrupts and service them.
  51. * This function handles both legacy interrupts and MSI.
  52. */
  53. void esas2r_polled_interrupt(struct esas2r_adapter *a)
  54. {
  55. u32 intstat;
  56. u32 doorbell;
  57. esas2r_disable_chip_interrupts(a);
  58. intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
  59. if (intstat & MU_INTSTAT_POST_OUT) {
  60. /* clear the interrupt */
  61. esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
  62. MU_OLIS_INT);
  63. esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
  64. esas2r_get_outbound_responses(a);
  65. }
  66. if (intstat & MU_INTSTAT_DRBL) {
  67. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  68. if (doorbell != 0)
  69. esas2r_doorbell_interrupt(a, doorbell);
  70. }
  71. esas2r_enable_chip_interrupts(a);
  72. if (atomic_read(&a->disable_cnt) == 0)
  73. esas2r_do_deferred_processes(a);
  74. }
  75. /*
  76. * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
  77. * schedules a TASKLET to process events, whereas the MSI handler just
  78. * processes interrupt events directly.
  79. */
  80. irqreturn_t esas2r_interrupt(int irq, void *dev_id)
  81. {
  82. struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
  83. if (!esas2r_adapter_interrupt_pending(a))
  84. return IRQ_NONE;
  85. set_bit(AF2_INT_PENDING, &a->flags2);
  86. esas2r_schedule_tasklet(a);
  87. return IRQ_HANDLED;
  88. }
  89. void esas2r_adapter_interrupt(struct esas2r_adapter *a)
  90. {
  91. u32 doorbell;
  92. if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
  93. /* clear the interrupt */
  94. esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
  95. MU_OLIS_INT);
  96. esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
  97. esas2r_get_outbound_responses(a);
  98. }
  99. if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
  100. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  101. if (doorbell != 0)
  102. esas2r_doorbell_interrupt(a, doorbell);
  103. }
  104. a->int_mask = ESAS2R_INT_STS_MASK;
  105. esas2r_enable_chip_interrupts(a);
  106. if (likely(atomic_read(&a->disable_cnt) == 0))
  107. esas2r_do_deferred_processes(a);
  108. }
  109. irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
  110. {
  111. struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
  112. u32 intstat;
  113. u32 doorbell;
  114. intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
  115. if (likely(intstat & MU_INTSTAT_POST_OUT)) {
  116. /* clear the interrupt */
  117. esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
  118. MU_OLIS_INT);
  119. esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
  120. esas2r_get_outbound_responses(a);
  121. }
  122. if (unlikely(intstat & MU_INTSTAT_DRBL)) {
  123. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  124. if (doorbell != 0)
  125. esas2r_doorbell_interrupt(a, doorbell);
  126. }
  127. /*
  128. * Work around a chip bug and force a new MSI to be sent if one is
  129. * still pending.
  130. */
  131. esas2r_disable_chip_interrupts(a);
  132. esas2r_enable_chip_interrupts(a);
  133. if (likely(atomic_read(&a->disable_cnt) == 0))
  134. esas2r_do_deferred_processes(a);
  135. esas2r_do_tasklet_tasks(a);
  136. return 1;
  137. }
  138. static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
  139. struct esas2r_request *rq,
  140. struct atto_vda_ob_rsp *rsp)
  141. {
  142. /*
  143. * For I/O requests, only copy the response if an error
  144. * occurred and setup a callback to do error processing.
  145. */
  146. if (unlikely(rq->req_stat != RS_SUCCESS)) {
  147. memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
  148. if (rq->req_stat == RS_ABORTED) {
  149. if (rq->timeout > RQ_MAX_TIMEOUT)
  150. rq->req_stat = RS_TIMEOUT;
  151. } else if (rq->req_stat == RS_SCSI_ERROR) {
  152. u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
  153. esas2r_trace("scsistatus: %x", scsistatus);
  154. /* Any of these are a good result. */
  155. if (scsistatus == SAM_STAT_GOOD || scsistatus ==
  156. SAM_STAT_CONDITION_MET || scsistatus ==
  157. SAM_STAT_INTERMEDIATE || scsistatus ==
  158. SAM_STAT_INTERMEDIATE_CONDITION_MET) {
  159. rq->req_stat = RS_SUCCESS;
  160. rq->func_rsp.scsi_rsp.scsi_stat =
  161. SAM_STAT_GOOD;
  162. }
  163. }
  164. }
  165. }
  166. static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
  167. {
  168. struct atto_vda_ob_rsp *rsp;
  169. u32 rspput_ptr;
  170. u32 rspget_ptr;
  171. struct esas2r_request *rq;
  172. u32 handle;
  173. unsigned long flags;
  174. LIST_HEAD(comp_list);
  175. esas2r_trace_enter();
  176. spin_lock_irqsave(&a->queue_lock, flags);
  177. /* Get the outbound limit and pointers */
  178. rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
  179. rspget_ptr = a->last_read;
  180. esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
  181. /* If we don't have anything to process, get out */
  182. if (unlikely(rspget_ptr == rspput_ptr)) {
  183. spin_unlock_irqrestore(&a->queue_lock, flags);
  184. esas2r_trace_exit();
  185. return;
  186. }
  187. /* Make sure the firmware is healthy */
  188. if (unlikely(rspput_ptr >= a->list_size)) {
  189. spin_unlock_irqrestore(&a->queue_lock, flags);
  190. esas2r_bugon();
  191. esas2r_local_reset_adapter(a);
  192. esas2r_trace_exit();
  193. return;
  194. }
  195. do {
  196. rspget_ptr++;
  197. if (rspget_ptr >= a->list_size)
  198. rspget_ptr = 0;
  199. rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
  200. + rspget_ptr;
  201. handle = rsp->handle;
  202. /* Verify the handle range */
  203. if (unlikely(LOWORD(handle) == 0
  204. || LOWORD(handle) > num_requests +
  205. num_ae_requests + 1)) {
  206. esas2r_bugon();
  207. continue;
  208. }
  209. /* Get the request for this handle */
  210. rq = a->req_table[LOWORD(handle)];
  211. if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
  212. esas2r_bugon();
  213. continue;
  214. }
  215. list_del(&rq->req_list);
  216. /* Get the completion status */
  217. rq->req_stat = rsp->req_stat;
  218. esas2r_trace("handle: %x", handle);
  219. esas2r_trace("rq: %p", rq);
  220. esas2r_trace("req_status: %x", rq->req_stat);
  221. if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
  222. esas2r_handle_outbound_rsp_err(a, rq, rsp);
  223. } else {
  224. /*
  225. * Copy the outbound completion struct for non-I/O
  226. * requests.
  227. */
  228. memcpy(&rq->func_rsp, &rsp->func_rsp,
  229. sizeof(rsp->func_rsp));
  230. }
  231. /* Queue the request for completion. */
  232. list_add_tail(&rq->comp_list, &comp_list);
  233. } while (rspget_ptr != rspput_ptr);
  234. a->last_read = rspget_ptr;
  235. spin_unlock_irqrestore(&a->queue_lock, flags);
  236. esas2r_comp_list_drain(a, &comp_list);
  237. esas2r_trace_exit();
  238. }
  239. /*
  240. * Perform all deferred processes for the adapter. Deferred
  241. * processes can only be done while the current interrupt
  242. * disable_cnt for the adapter is zero.
  243. */
  244. void esas2r_do_deferred_processes(struct esas2r_adapter *a)
  245. {
  246. int startreqs = 2;
  247. struct esas2r_request *rq;
  248. unsigned long flags;
  249. /*
  250. * startreqs is used to control starting requests
  251. * that are on the deferred queue
  252. * = 0 - do not start any requests
  253. * = 1 - can start discovery requests
  254. * = 2 - can start any request
  255. */
  256. if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
  257. test_bit(AF_FLASHING, &a->flags))
  258. startreqs = 0;
  259. else if (test_bit(AF_DISC_PENDING, &a->flags))
  260. startreqs = 1;
  261. atomic_inc(&a->disable_cnt);
  262. /* Clear off the completed list to be processed later. */
  263. if (esas2r_is_tasklet_pending(a)) {
  264. esas2r_schedule_tasklet(a);
  265. startreqs = 0;
  266. }
  267. /*
  268. * If we can start requests then traverse the defer queue
  269. * looking for requests to start or complete
  270. */
  271. if (startreqs && !list_empty(&a->defer_list)) {
  272. LIST_HEAD(comp_list);
  273. struct list_head *element, *next;
  274. spin_lock_irqsave(&a->queue_lock, flags);
  275. list_for_each_safe(element, next, &a->defer_list) {
  276. rq = list_entry(element, struct esas2r_request,
  277. req_list);
  278. if (rq->req_stat != RS_PENDING) {
  279. list_del(element);
  280. list_add_tail(&rq->comp_list, &comp_list);
  281. }
  282. /*
  283. * Process discovery and OS requests separately. We
  284. * can't hold up discovery requests when discovery is
  285. * pending. In general, there may be different sets of
  286. * conditions for starting different types of requests.
  287. */
  288. else if (rq->req_type == RT_DISC_REQ) {
  289. list_del(element);
  290. esas2r_disc_local_start_request(a, rq);
  291. } else if (startreqs == 2) {
  292. list_del(element);
  293. esas2r_local_start_request(a, rq);
  294. /*
  295. * Flashing could have been set by last local
  296. * start
  297. */
  298. if (test_bit(AF_FLASHING, &a->flags))
  299. break;
  300. }
  301. }
  302. spin_unlock_irqrestore(&a->queue_lock, flags);
  303. esas2r_comp_list_drain(a, &comp_list);
  304. }
  305. atomic_dec(&a->disable_cnt);
  306. }
  307. /*
  308. * Process an adapter reset (or one that is about to happen)
  309. * by making sure all outstanding requests are completed that
  310. * haven't been already.
  311. */
  312. void esas2r_process_adapter_reset(struct esas2r_adapter *a)
  313. {
  314. struct esas2r_request *rq = &a->general_req;
  315. unsigned long flags;
  316. struct esas2r_disc_context *dc;
  317. LIST_HEAD(comp_list);
  318. struct list_head *element;
  319. esas2r_trace_enter();
  320. spin_lock_irqsave(&a->queue_lock, flags);
  321. /* abort the active discovery, if any. */
  322. if (rq->interrupt_cx) {
  323. dc = (struct esas2r_disc_context *)rq->interrupt_cx;
  324. dc->disc_evt = 0;
  325. clear_bit(AF_DISC_IN_PROG, &a->flags);
  326. }
  327. /*
  328. * just clear the interrupt callback for now. it will be dequeued if
  329. * and when we find it on the active queue and we don't want the
  330. * callback called. also set the dummy completion callback in case we
  331. * were doing an I/O request.
  332. */
  333. rq->interrupt_cx = NULL;
  334. rq->interrupt_cb = NULL;
  335. rq->comp_cb = esas2r_dummy_complete;
  336. /* Reset the read and write pointers */
  337. *a->outbound_copy =
  338. a->last_write =
  339. a->last_read = a->list_size - 1;
  340. set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
  341. /* Kill all the requests on the active list */
  342. list_for_each(element, &a->defer_list) {
  343. rq = list_entry(element, struct esas2r_request, req_list);
  344. if (rq->req_stat == RS_STARTED)
  345. if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
  346. list_add_tail(&rq->comp_list, &comp_list);
  347. }
  348. spin_unlock_irqrestore(&a->queue_lock, flags);
  349. esas2r_comp_list_drain(a, &comp_list);
  350. esas2r_process_bus_reset(a);
  351. esas2r_trace_exit();
  352. }
  353. static void esas2r_process_bus_reset(struct esas2r_adapter *a)
  354. {
  355. struct esas2r_request *rq;
  356. struct list_head *element;
  357. unsigned long flags;
  358. LIST_HEAD(comp_list);
  359. esas2r_trace_enter();
  360. esas2r_hdebug("reset detected");
  361. spin_lock_irqsave(&a->queue_lock, flags);
  362. /* kill all the requests on the deferred queue */
  363. list_for_each(element, &a->defer_list) {
  364. rq = list_entry(element, struct esas2r_request, req_list);
  365. if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
  366. list_add_tail(&rq->comp_list, &comp_list);
  367. }
  368. spin_unlock_irqrestore(&a->queue_lock, flags);
  369. esas2r_comp_list_drain(a, &comp_list);
  370. if (atomic_read(&a->disable_cnt) == 0)
  371. esas2r_do_deferred_processes(a);
  372. clear_bit(AF_OS_RESET, &a->flags);
  373. esas2r_trace_exit();
  374. }
  375. static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
  376. {
  377. clear_bit(AF_CHPRST_NEEDED, &a->flags);
  378. clear_bit(AF_BUSRST_NEEDED, &a->flags);
  379. clear_bit(AF_BUSRST_DETECTED, &a->flags);
  380. clear_bit(AF_BUSRST_PENDING, &a->flags);
  381. /*
  382. * Make sure we don't get attempt more than 3 resets
  383. * when the uptime between resets does not exceed one
  384. * minute. This will stop any situation where there is
  385. * really something wrong with the hardware. The way
  386. * this works is that we start with uptime ticks at 0.
  387. * Each time we do a reset, we add 20 seconds worth to
  388. * the count. Each time a timer tick occurs, as long
  389. * as a chip reset is not pending, we decrement the
  390. * tick count. If the uptime ticks ever gets to 60
  391. * seconds worth, we disable the adapter from that
  392. * point forward. Three strikes, you're out.
  393. */
  394. if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
  395. ESAS2R_CHP_UPTIME_MAX)) {
  396. esas2r_hdebug("*** adapter disabled ***");
  397. /*
  398. * Ok, some kind of hard failure. Make sure we
  399. * exit this loop with chip interrupts
  400. * permanently disabled so we don't lock up the
  401. * entire system. Also flag degraded mode to
  402. * prevent the heartbeat from trying to recover.
  403. */
  404. set_bit(AF_DEGRADED_MODE, &a->flags);
  405. set_bit(AF_DISABLED, &a->flags);
  406. clear_bit(AF_CHPRST_PENDING, &a->flags);
  407. clear_bit(AF_DISC_PENDING, &a->flags);
  408. esas2r_disable_chip_interrupts(a);
  409. a->int_mask = 0;
  410. esas2r_process_adapter_reset(a);
  411. esas2r_log(ESAS2R_LOG_CRIT,
  412. "Adapter disabled because of hardware failure");
  413. } else {
  414. bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
  415. if (!alrdyrst)
  416. /*
  417. * Only disable interrupts if this is
  418. * the first reset attempt.
  419. */
  420. esas2r_disable_chip_interrupts(a);
  421. if ((test_bit(AF_POWER_MGT, &a->flags)) &&
  422. !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
  423. /*
  424. * Don't reset the chip on the first
  425. * deferred power up attempt.
  426. */
  427. } else {
  428. esas2r_hdebug("*** resetting chip ***");
  429. esas2r_reset_chip(a);
  430. }
  431. /* Kick off the reinitialization */
  432. a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
  433. a->chip_init_time = jiffies_to_msecs(jiffies);
  434. if (!test_bit(AF_POWER_MGT, &a->flags)) {
  435. esas2r_process_adapter_reset(a);
  436. if (!alrdyrst) {
  437. /* Remove devices now that I/O is cleaned up. */
  438. a->prev_dev_cnt =
  439. esas2r_targ_db_get_tgt_cnt(a);
  440. esas2r_targ_db_remove_all(a, false);
  441. }
  442. }
  443. a->int_mask = 0;
  444. }
  445. }
  446. static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
  447. {
  448. while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
  449. /*
  450. * Balance the enable in esas2r_initadapter_hw.
  451. * Esas2r_power_down already took care of it for power
  452. * management.
  453. */
  454. if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
  455. !test_bit(AF_POWER_MGT, &a->flags))
  456. esas2r_disable_chip_interrupts(a);
  457. /* Reinitialize the chip. */
  458. esas2r_check_adapter(a);
  459. esas2r_init_adapter_hw(a, 0);
  460. if (test_bit(AF_CHPRST_NEEDED, &a->flags))
  461. break;
  462. if (test_bit(AF_POWER_MGT, &a->flags)) {
  463. /* Recovery from power management. */
  464. if (test_bit(AF_FIRST_INIT, &a->flags)) {
  465. /* Chip reset during normal power up */
  466. esas2r_log(ESAS2R_LOG_CRIT,
  467. "The firmware was reset during a normal power-up sequence");
  468. } else {
  469. /* Deferred power up complete. */
  470. clear_bit(AF_POWER_MGT, &a->flags);
  471. esas2r_send_reset_ae(a, true);
  472. }
  473. } else {
  474. /* Recovery from online chip reset. */
  475. if (test_bit(AF_FIRST_INIT, &a->flags)) {
  476. /* Chip reset during driver load */
  477. } else {
  478. /* Chip reset after driver load */
  479. esas2r_send_reset_ae(a, false);
  480. }
  481. esas2r_log(ESAS2R_LOG_CRIT,
  482. "Recovering from a chip reset while the chip was online");
  483. }
  484. clear_bit(AF_CHPRST_STARTED, &a->flags);
  485. esas2r_enable_chip_interrupts(a);
  486. /*
  487. * Clear this flag last! this indicates that the chip has been
  488. * reset already during initialization.
  489. */
  490. clear_bit(AF_CHPRST_DETECTED, &a->flags);
  491. }
  492. }
  493. /* Perform deferred tasks when chip interrupts are disabled */
  494. void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
  495. {
  496. if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
  497. test_bit(AF_CHPRST_DETECTED, &a->flags)) {
  498. if (test_bit(AF_CHPRST_NEEDED, &a->flags))
  499. esas2r_chip_rst_needed_during_tasklet(a);
  500. esas2r_handle_chip_rst_during_tasklet(a);
  501. }
  502. if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
  503. esas2r_hdebug("hard resetting bus");
  504. clear_bit(AF_BUSRST_NEEDED, &a->flags);
  505. if (test_bit(AF_FLASHING, &a->flags))
  506. set_bit(AF_BUSRST_DETECTED, &a->flags);
  507. else
  508. esas2r_write_register_dword(a, MU_DOORBELL_IN,
  509. DRBL_RESET_BUS);
  510. }
  511. if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
  512. esas2r_process_bus_reset(a);
  513. esas2r_log_dev(ESAS2R_LOG_WARN,
  514. &(a->host->shost_gendev),
  515. "scsi_report_bus_reset() called");
  516. scsi_report_bus_reset(a->host, 0);
  517. clear_bit(AF_BUSRST_DETECTED, &a->flags);
  518. clear_bit(AF_BUSRST_PENDING, &a->flags);
  519. esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
  520. }
  521. if (test_bit(AF_PORT_CHANGE, &a->flags)) {
  522. clear_bit(AF_PORT_CHANGE, &a->flags);
  523. esas2r_targ_db_report_changes(a);
  524. }
  525. if (atomic_read(&a->disable_cnt) == 0)
  526. esas2r_do_deferred_processes(a);
  527. }
  528. static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
  529. {
  530. if (!(doorbell & DRBL_FORCE_INT)) {
  531. esas2r_trace_enter();
  532. esas2r_trace("doorbell: %x", doorbell);
  533. }
  534. /* First clear the doorbell bits */
  535. esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
  536. if (doorbell & DRBL_RESET_BUS)
  537. set_bit(AF_BUSRST_DETECTED, &a->flags);
  538. if (doorbell & DRBL_FORCE_INT)
  539. clear_bit(AF_HEARTBEAT, &a->flags);
  540. if (doorbell & DRBL_PANIC_REASON_MASK) {
  541. esas2r_hdebug("*** Firmware Panic ***");
  542. esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
  543. }
  544. if (doorbell & DRBL_FW_RESET) {
  545. set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
  546. esas2r_local_reset_adapter(a);
  547. }
  548. if (!(doorbell & DRBL_FORCE_INT))
  549. esas2r_trace_exit();
  550. }
  551. void esas2r_force_interrupt(struct esas2r_adapter *a)
  552. {
  553. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
  554. DRBL_DRV_VER);
  555. }
  556. static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
  557. u16 target, u32 length)
  558. {
  559. struct esas2r_target *t = a->targetdb + target;
  560. u32 cplen = length;
  561. unsigned long flags;
  562. if (cplen > sizeof(t->lu_event))
  563. cplen = sizeof(t->lu_event);
  564. esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
  565. esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
  566. spin_lock_irqsave(&a->mem_lock, flags);
  567. t->new_target_state = TS_INVALID;
  568. if (ae->lu.dwevent & VDAAE_LU_LOST) {
  569. t->new_target_state = TS_NOT_PRESENT;
  570. } else {
  571. switch (ae->lu.bystate) {
  572. case VDAAE_LU_NOT_PRESENT:
  573. case VDAAE_LU_OFFLINE:
  574. case VDAAE_LU_DELETED:
  575. case VDAAE_LU_FACTORY_DISABLED:
  576. t->new_target_state = TS_NOT_PRESENT;
  577. break;
  578. case VDAAE_LU_ONLINE:
  579. case VDAAE_LU_DEGRADED:
  580. t->new_target_state = TS_PRESENT;
  581. break;
  582. }
  583. }
  584. if (t->new_target_state != TS_INVALID) {
  585. memcpy(&t->lu_event, &ae->lu, cplen);
  586. esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
  587. }
  588. spin_unlock_irqrestore(&a->mem_lock, flags);
  589. }
  590. void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
  591. {
  592. union atto_vda_ae *ae =
  593. (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
  594. u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
  595. union atto_vda_ae *last =
  596. (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
  597. + length);
  598. esas2r_trace_enter();
  599. esas2r_trace("length: %d", length);
  600. if (length > sizeof(struct atto_vda_ae_data)
  601. || (length & 3) != 0
  602. || length == 0) {
  603. esas2r_log(ESAS2R_LOG_WARN,
  604. "The AE request response length (%p) is too long: %d",
  605. rq, length);
  606. esas2r_hdebug("aereq->length (0x%x) too long", length);
  607. esas2r_bugon();
  608. last = ae;
  609. }
  610. while (ae < last) {
  611. u16 target;
  612. esas2r_trace("ae: %p", ae);
  613. esas2r_trace("ae->hdr: %p", &(ae->hdr));
  614. length = ae->hdr.bylength;
  615. if (length > (u32)((u8 *)last - (u8 *)ae)
  616. || (length & 3) != 0
  617. || length == 0) {
  618. esas2r_log(ESAS2R_LOG_CRIT,
  619. "the async event length is invalid (%p): %d",
  620. ae, length);
  621. esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
  622. esas2r_bugon();
  623. break;
  624. }
  625. esas2r_nuxi_ae_data(ae);
  626. esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
  627. sizeof(union atto_vda_ae));
  628. switch (ae->hdr.bytype) {
  629. case VDAAE_HDR_TYPE_RAID:
  630. if (ae->raid.dwflags & (VDAAE_GROUP_STATE
  631. | VDAAE_RBLD_STATE
  632. | VDAAE_MEMBER_CHG
  633. | VDAAE_PART_CHG)) {
  634. esas2r_log(ESAS2R_LOG_INFO,
  635. "RAID event received - name:%s rebuild_state:%d group_state:%d",
  636. ae->raid.acname,
  637. ae->raid.byrebuild_state,
  638. ae->raid.bygroup_state);
  639. }
  640. break;
  641. case VDAAE_HDR_TYPE_LU:
  642. esas2r_log(ESAS2R_LOG_INFO,
  643. "LUN event received: event:%d target_id:%d LUN:%d state:%d",
  644. ae->lu.dwevent,
  645. ae->lu.id.tgtlun.wtarget_id,
  646. ae->lu.id.tgtlun.bylun,
  647. ae->lu.bystate);
  648. target = ae->lu.id.tgtlun.wtarget_id;
  649. if (target < ESAS2R_MAX_TARGETS)
  650. esas2r_lun_event(a, ae, target, length);
  651. break;
  652. case VDAAE_HDR_TYPE_DISK:
  653. esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
  654. break;
  655. default:
  656. /* Silently ignore the rest and let the apps deal with
  657. * them.
  658. */
  659. break;
  660. }
  661. ae = (union atto_vda_ae *)((u8 *)ae + length);
  662. }
  663. /* Now requeue it. */
  664. esas2r_start_ae_request(a, rq);
  665. esas2r_trace_exit();
  666. }
  667. /* Send an asynchronous event for a chip reset or power management. */
  668. void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
  669. {
  670. struct atto_vda_ae_hdr ae;
  671. if (pwr_mgt)
  672. ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
  673. else
  674. ae.bytype = VDAAE_HDR_TYPE_RESET;
  675. ae.byversion = VDAAE_HDR_VER_0;
  676. ae.byflags = 0;
  677. ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
  678. if (pwr_mgt)
  679. esas2r_hdebug("*** sending power management AE ***");
  680. else
  681. esas2r_hdebug("*** sending reset AE ***");
  682. esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
  683. sizeof(union atto_vda_ae));
  684. }
  685. void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
  686. {}
  687. static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
  688. struct esas2r_request *rq)
  689. {
  690. u8 snslen, snslen2;
  691. snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
  692. if (snslen > rq->sense_len)
  693. snslen = rq->sense_len;
  694. if (snslen) {
  695. if (rq->sense_buf)
  696. memcpy(rq->sense_buf, rq->data_buf, snslen);
  697. else
  698. rq->sense_buf = (u8 *)rq->data_buf;
  699. /* See about possible sense data */
  700. if (snslen2 > 0x0c) {
  701. u8 *s = (u8 *)rq->data_buf;
  702. esas2r_trace_enter();
  703. /* Report LUNS data has changed */
  704. if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
  705. esas2r_trace("rq->target_id: %d",
  706. rq->target_id);
  707. esas2r_target_state_changed(a, rq->target_id,
  708. TS_LUN_CHANGE);
  709. }
  710. esas2r_trace("add_sense_key=%x", s[0x0c]);
  711. esas2r_trace("add_sense_qual=%x", s[0x0d]);
  712. esas2r_trace_exit();
  713. }
  714. }
  715. rq->sense_len = snslen;
  716. }
  717. void esas2r_complete_request(struct esas2r_adapter *a,
  718. struct esas2r_request *rq)
  719. {
  720. if (rq->vrq->scsi.function == VDA_FUNC_FLASH
  721. && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
  722. clear_bit(AF_FLASHING, &a->flags);
  723. /* See if we setup a callback to do special processing */
  724. if (rq->interrupt_cb) {
  725. (*rq->interrupt_cb)(a, rq);
  726. if (rq->req_stat == RS_PENDING) {
  727. esas2r_start_request(a, rq);
  728. return;
  729. }
  730. }
  731. if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
  732. && unlikely(rq->req_stat != RS_SUCCESS)) {
  733. esas2r_check_req_rsp_sense(a, rq);
  734. esas2r_log_request_failure(a, rq);
  735. }
  736. (*rq->comp_cb)(a, rq);
  737. }