hpsa.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /*
  2. * Disk Array driver for HP Smart Array SAS controllers
  3. * Copyright 2014-2015 PMC-Sierra, Inc.
  4. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more details.
  14. *
  15. * Questions/Comments/Bugfixes to storagedev@pmcs.com
  16. *
  17. */
  18. #ifndef HPSA_H
  19. #define HPSA_H
  20. #include <scsi/scsicam.h>
  21. #define IO_OK 0
  22. #define IO_ERROR 1
  23. struct ctlr_info;
  24. struct access_method {
  25. void (*submit_command)(struct ctlr_info *h,
  26. struct CommandList *c);
  27. void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
  28. bool (*intr_pending)(struct ctlr_info *h);
  29. unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
  30. };
  31. /* for SAS hosts and SAS expanders */
  32. struct hpsa_sas_node {
  33. struct device *parent_dev;
  34. struct list_head port_list_head;
  35. };
  36. struct hpsa_sas_port {
  37. struct list_head port_list_entry;
  38. u64 sas_address;
  39. struct sas_port *port;
  40. int next_phy_index;
  41. struct list_head phy_list_head;
  42. struct hpsa_sas_node *parent_node;
  43. struct sas_rphy *rphy;
  44. };
  45. struct hpsa_sas_phy {
  46. struct list_head phy_list_entry;
  47. struct sas_phy *phy;
  48. struct hpsa_sas_port *parent_port;
  49. bool added_to_port;
  50. };
  51. struct hpsa_scsi_dev_t {
  52. unsigned int devtype;
  53. int bus, target, lun; /* as presented to the OS */
  54. unsigned char scsi3addr[8]; /* as presented to the HW */
  55. u8 physical_device : 1;
  56. u8 expose_device;
  57. #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
  58. unsigned char device_id[16]; /* from inquiry pg. 0x83 */
  59. u64 sas_address;
  60. unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
  61. unsigned char model[16]; /* bytes 16-31 of inquiry data */
  62. unsigned char raid_level; /* from inquiry page 0xC1 */
  63. unsigned char volume_offline; /* discovered via TUR or VPD */
  64. u16 queue_depth; /* max queue_depth for this device */
  65. atomic_t reset_cmds_out; /* Count of commands to-be affected */
  66. atomic_t ioaccel_cmds_out; /* Only used for physical devices
  67. * counts commands sent to physical
  68. * device via "ioaccel" path.
  69. */
  70. u32 ioaccel_handle;
  71. u8 active_path_index;
  72. u8 path_map;
  73. u8 bay;
  74. u8 box[8];
  75. u16 phys_connector[8];
  76. int offload_config; /* I/O accel RAID offload configured */
  77. int offload_enabled; /* I/O accel RAID offload enabled */
  78. int offload_to_be_enabled;
  79. int hba_ioaccel_enabled;
  80. int offload_to_mirror; /* Send next I/O accelerator RAID
  81. * offload request to mirror drive
  82. */
  83. struct raid_map_data raid_map; /* I/O accelerator RAID map */
  84. /*
  85. * Pointers from logical drive map indices to the phys drives that
  86. * make those logical drives. Note, multiple logical drives may
  87. * share physical drives. You can have for instance 5 physical
  88. * drives with 3 logical drives each using those same 5 physical
  89. * disks. We need these pointers for counting i/o's out to physical
  90. * devices in order to honor physical device queue depth limits.
  91. */
  92. struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
  93. int nphysical_disks;
  94. int supports_aborts;
  95. struct hpsa_sas_port *sas_port;
  96. int external; /* 1-from external array 0-not <0-unknown */
  97. };
  98. struct reply_queue_buffer {
  99. u64 *head;
  100. size_t size;
  101. u8 wraparound;
  102. u32 current_entry;
  103. dma_addr_t busaddr;
  104. };
  105. #pragma pack(1)
  106. struct bmic_controller_parameters {
  107. u8 led_flags;
  108. u8 enable_command_list_verification;
  109. u8 backed_out_write_drives;
  110. u16 stripes_for_parity;
  111. u8 parity_distribution_mode_flags;
  112. u16 max_driver_requests;
  113. u16 elevator_trend_count;
  114. u8 disable_elevator;
  115. u8 force_scan_complete;
  116. u8 scsi_transfer_mode;
  117. u8 force_narrow;
  118. u8 rebuild_priority;
  119. u8 expand_priority;
  120. u8 host_sdb_asic_fix;
  121. u8 pdpi_burst_from_host_disabled;
  122. char software_name[64];
  123. char hardware_name[32];
  124. u8 bridge_revision;
  125. u8 snapshot_priority;
  126. u32 os_specific;
  127. u8 post_prompt_timeout;
  128. u8 automatic_drive_slamming;
  129. u8 reserved1;
  130. u8 nvram_flags;
  131. u8 cache_nvram_flags;
  132. u8 drive_config_flags;
  133. u16 reserved2;
  134. u8 temp_warning_level;
  135. u8 temp_shutdown_level;
  136. u8 temp_condition_reset;
  137. u8 max_coalesce_commands;
  138. u32 max_coalesce_delay;
  139. u8 orca_password[4];
  140. u8 access_id[16];
  141. u8 reserved[356];
  142. };
  143. #pragma pack()
  144. struct ctlr_info {
  145. int ctlr;
  146. char devname[8];
  147. char *product_name;
  148. struct pci_dev *pdev;
  149. u32 board_id;
  150. u64 sas_address;
  151. void __iomem *vaddr;
  152. unsigned long paddr;
  153. int nr_cmds; /* Number of commands allowed on this controller */
  154. #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
  155. #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
  156. struct CfgTable __iomem *cfgtable;
  157. int interrupts_enabled;
  158. int max_commands;
  159. atomic_t commands_outstanding;
  160. # define PERF_MODE_INT 0
  161. # define DOORBELL_INT 1
  162. # define SIMPLE_MODE_INT 2
  163. # define MEMQ_MODE_INT 3
  164. unsigned int intr[MAX_REPLY_QUEUES];
  165. unsigned int msix_vector;
  166. unsigned int msi_vector;
  167. int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
  168. struct access_method access;
  169. /* queue and queue Info */
  170. unsigned int Qdepth;
  171. unsigned int maxSG;
  172. spinlock_t lock;
  173. int maxsgentries;
  174. u8 max_cmd_sg_entries;
  175. int chainsize;
  176. struct SGDescriptor **cmd_sg_list;
  177. struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
  178. /* pointers to command and error info pool */
  179. struct CommandList *cmd_pool;
  180. dma_addr_t cmd_pool_dhandle;
  181. struct io_accel1_cmd *ioaccel_cmd_pool;
  182. dma_addr_t ioaccel_cmd_pool_dhandle;
  183. struct io_accel2_cmd *ioaccel2_cmd_pool;
  184. dma_addr_t ioaccel2_cmd_pool_dhandle;
  185. struct ErrorInfo *errinfo_pool;
  186. dma_addr_t errinfo_pool_dhandle;
  187. unsigned long *cmd_pool_bits;
  188. int scan_finished;
  189. u8 scan_waiting : 1;
  190. spinlock_t scan_lock;
  191. wait_queue_head_t scan_wait_queue;
  192. struct Scsi_Host *scsi_host;
  193. spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
  194. int ndevices; /* number of used elements in .dev[] array. */
  195. struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
  196. /*
  197. * Performant mode tables.
  198. */
  199. u32 trans_support;
  200. u32 trans_offset;
  201. struct TransTable_struct __iomem *transtable;
  202. unsigned long transMethod;
  203. /* cap concurrent passthrus at some reasonable maximum */
  204. #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
  205. atomic_t passthru_cmds_avail;
  206. /*
  207. * Performant mode completion buffers
  208. */
  209. size_t reply_queue_size;
  210. struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
  211. u8 nreply_queues;
  212. u32 *blockFetchTable;
  213. u32 *ioaccel1_blockFetchTable;
  214. u32 *ioaccel2_blockFetchTable;
  215. u32 __iomem *ioaccel2_bft2_regs;
  216. unsigned char *hba_inquiry_data;
  217. u32 driver_support;
  218. u32 fw_support;
  219. int ioaccel_support;
  220. int ioaccel_maxsg;
  221. u64 last_intr_timestamp;
  222. u32 last_heartbeat;
  223. u64 last_heartbeat_timestamp;
  224. u32 heartbeat_sample_interval;
  225. atomic_t firmware_flash_in_progress;
  226. u32 __percpu *lockup_detected;
  227. struct delayed_work monitor_ctlr_work;
  228. struct delayed_work rescan_ctlr_work;
  229. int remove_in_progress;
  230. /* Address of h->q[x] is passed to intr handler to know which queue */
  231. u8 q[MAX_REPLY_QUEUES];
  232. char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
  233. u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
  234. #define HPSATMF_BITS_SUPPORTED (1 << 0)
  235. #define HPSATMF_PHYS_LUN_RESET (1 << 1)
  236. #define HPSATMF_PHYS_NEX_RESET (1 << 2)
  237. #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
  238. #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
  239. #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
  240. #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
  241. #define HPSATMF_PHYS_QRY_TASK (1 << 7)
  242. #define HPSATMF_PHYS_QRY_TSET (1 << 8)
  243. #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
  244. #define HPSATMF_IOACCEL_ENABLED (1 << 15)
  245. #define HPSATMF_MASK_SUPPORTED (1 << 16)
  246. #define HPSATMF_LOG_LUN_RESET (1 << 17)
  247. #define HPSATMF_LOG_NEX_RESET (1 << 18)
  248. #define HPSATMF_LOG_TASK_ABORT (1 << 19)
  249. #define HPSATMF_LOG_TSET_ABORT (1 << 20)
  250. #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
  251. #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
  252. #define HPSATMF_LOG_QRY_TASK (1 << 23)
  253. #define HPSATMF_LOG_QRY_TSET (1 << 24)
  254. #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
  255. u32 events;
  256. #define CTLR_STATE_CHANGE_EVENT (1 << 0)
  257. #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
  258. #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
  259. #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
  260. #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
  261. #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
  262. #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
  263. #define RESCAN_REQUIRED_EVENT_BITS \
  264. (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
  265. CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
  266. CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
  267. CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
  268. CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
  269. spinlock_t offline_device_lock;
  270. struct list_head offline_device_list;
  271. int acciopath_status;
  272. int drv_req_rescan;
  273. int raid_offload_debug;
  274. int discovery_polling;
  275. struct ReportLUNdata *lastlogicals;
  276. int needs_abort_tags_swizzled;
  277. struct workqueue_struct *resubmit_wq;
  278. struct workqueue_struct *rescan_ctlr_wq;
  279. atomic_t abort_cmds_available;
  280. wait_queue_head_t abort_cmd_wait_queue;
  281. wait_queue_head_t event_sync_wait_queue;
  282. struct mutex reset_mutex;
  283. u8 reset_in_progress;
  284. struct hpsa_sas_node *sas_host;
  285. };
  286. struct offline_device_entry {
  287. unsigned char scsi3addr[8];
  288. struct list_head offline_list;
  289. };
  290. #define HPSA_ABORT_MSG 0
  291. #define HPSA_DEVICE_RESET_MSG 1
  292. #define HPSA_RESET_TYPE_CONTROLLER 0x00
  293. #define HPSA_RESET_TYPE_BUS 0x01
  294. #define HPSA_RESET_TYPE_TARGET 0x03
  295. #define HPSA_RESET_TYPE_LUN 0x04
  296. #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
  297. #define HPSA_MSG_SEND_RETRY_LIMIT 10
  298. #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
  299. /* Maximum time in seconds driver will wait for command completions
  300. * when polling before giving up.
  301. */
  302. #define HPSA_MAX_POLL_TIME_SECS (20)
  303. /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
  304. * how many times to retry TEST UNIT READY on a device
  305. * while waiting for it to become ready before giving up.
  306. * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
  307. * between sending TURs while waiting for a device
  308. * to become ready.
  309. */
  310. #define HPSA_TUR_RETRY_LIMIT (20)
  311. #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
  312. /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
  313. * to become ready, in seconds, before giving up on it.
  314. * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
  315. * between polling the board to see if it is ready, in
  316. * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
  317. * HPSA_BOARD_READY_ITERATIONS are derived from those.
  318. */
  319. #define HPSA_BOARD_READY_WAIT_SECS (120)
  320. #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
  321. #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
  322. #define HPSA_BOARD_READY_POLL_INTERVAL \
  323. ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
  324. #define HPSA_BOARD_READY_ITERATIONS \
  325. ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
  326. HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
  327. #define HPSA_BOARD_NOT_READY_ITERATIONS \
  328. ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
  329. HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
  330. #define HPSA_POST_RESET_PAUSE_MSECS (3000)
  331. #define HPSA_POST_RESET_NOOP_RETRIES (12)
  332. /* Defining the diffent access_menthods */
  333. /*
  334. * Memory mapped FIFO interface (SMART 53xx cards)
  335. */
  336. #define SA5_DOORBELL 0x20
  337. #define SA5_REQUEST_PORT_OFFSET 0x40
  338. #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
  339. #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
  340. #define SA5_REPLY_INTR_MASK_OFFSET 0x34
  341. #define SA5_REPLY_PORT_OFFSET 0x44
  342. #define SA5_INTR_STATUS 0x30
  343. #define SA5_SCRATCHPAD_OFFSET 0xB0
  344. #define SA5_CTCFG_OFFSET 0xB4
  345. #define SA5_CTMEM_OFFSET 0xB8
  346. #define SA5_INTR_OFF 0x08
  347. #define SA5B_INTR_OFF 0x04
  348. #define SA5_INTR_PENDING 0x08
  349. #define SA5B_INTR_PENDING 0x04
  350. #define FIFO_EMPTY 0xffffffff
  351. #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
  352. #define HPSA_ERROR_BIT 0x02
  353. /* Performant mode flags */
  354. #define SA5_PERF_INTR_PENDING 0x04
  355. #define SA5_PERF_INTR_OFF 0x05
  356. #define SA5_OUTDB_STATUS_PERF_BIT 0x01
  357. #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
  358. #define SA5_OUTDB_CLEAR 0xA0
  359. #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
  360. #define SA5_OUTDB_STATUS 0x9C
  361. #define HPSA_INTR_ON 1
  362. #define HPSA_INTR_OFF 0
  363. /*
  364. * Inbound Post Queue offsets for IO Accelerator Mode 2
  365. */
  366. #define IOACCEL2_INBOUND_POSTQ_32 0x48
  367. #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
  368. #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
  369. #define HPSA_PHYSICAL_DEVICE_BUS 0
  370. #define HPSA_RAID_VOLUME_BUS 1
  371. #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
  372. #define HPSA_HBA_BUS 3
  373. /*
  374. Send the command to the hardware
  375. */
  376. static void SA5_submit_command(struct ctlr_info *h,
  377. struct CommandList *c)
  378. {
  379. writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
  380. (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
  381. }
  382. static void SA5_submit_command_no_read(struct ctlr_info *h,
  383. struct CommandList *c)
  384. {
  385. writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
  386. }
  387. static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
  388. struct CommandList *c)
  389. {
  390. writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
  391. }
  392. /*
  393. * This card is the opposite of the other cards.
  394. * 0 turns interrupts on...
  395. * 0x08 turns them off...
  396. */
  397. static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
  398. {
  399. if (val) { /* Turn interrupts on */
  400. h->interrupts_enabled = 1;
  401. writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  402. (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  403. } else { /* Turn them off */
  404. h->interrupts_enabled = 0;
  405. writel(SA5_INTR_OFF,
  406. h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  407. (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  408. }
  409. }
  410. static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
  411. {
  412. if (val) { /* turn on interrupts */
  413. h->interrupts_enabled = 1;
  414. writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  415. (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  416. } else {
  417. h->interrupts_enabled = 0;
  418. writel(SA5_PERF_INTR_OFF,
  419. h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  420. (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  421. }
  422. }
  423. static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
  424. {
  425. struct reply_queue_buffer *rq = &h->reply_queue[q];
  426. unsigned long register_value = FIFO_EMPTY;
  427. /* msi auto clears the interrupt pending bit. */
  428. if (unlikely(!(h->msi_vector || h->msix_vector))) {
  429. /* flush the controller write of the reply queue by reading
  430. * outbound doorbell status register.
  431. */
  432. (void) readl(h->vaddr + SA5_OUTDB_STATUS);
  433. writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
  434. /* Do a read in order to flush the write to the controller
  435. * (as per spec.)
  436. */
  437. (void) readl(h->vaddr + SA5_OUTDB_STATUS);
  438. }
  439. if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
  440. register_value = rq->head[rq->current_entry];
  441. rq->current_entry++;
  442. atomic_dec(&h->commands_outstanding);
  443. } else {
  444. register_value = FIFO_EMPTY;
  445. }
  446. /* Check for wraparound */
  447. if (rq->current_entry == h->max_commands) {
  448. rq->current_entry = 0;
  449. rq->wraparound ^= 1;
  450. }
  451. return register_value;
  452. }
  453. /*
  454. * returns value read from hardware.
  455. * returns FIFO_EMPTY if there is nothing to read
  456. */
  457. static unsigned long SA5_completed(struct ctlr_info *h,
  458. __attribute__((unused)) u8 q)
  459. {
  460. unsigned long register_value
  461. = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
  462. if (register_value != FIFO_EMPTY)
  463. atomic_dec(&h->commands_outstanding);
  464. #ifdef HPSA_DEBUG
  465. if (register_value != FIFO_EMPTY)
  466. dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
  467. register_value);
  468. else
  469. dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
  470. #endif
  471. return register_value;
  472. }
  473. /*
  474. * Returns true if an interrupt is pending..
  475. */
  476. static bool SA5_intr_pending(struct ctlr_info *h)
  477. {
  478. unsigned long register_value =
  479. readl(h->vaddr + SA5_INTR_STATUS);
  480. return register_value & SA5_INTR_PENDING;
  481. }
  482. static bool SA5_performant_intr_pending(struct ctlr_info *h)
  483. {
  484. unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
  485. if (!register_value)
  486. return false;
  487. /* Read outbound doorbell to flush */
  488. register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
  489. return register_value & SA5_OUTDB_STATUS_PERF_BIT;
  490. }
  491. #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
  492. static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
  493. {
  494. unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
  495. return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
  496. true : false;
  497. }
  498. #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
  499. #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
  500. #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
  501. #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
  502. static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
  503. {
  504. u64 register_value;
  505. struct reply_queue_buffer *rq = &h->reply_queue[q];
  506. BUG_ON(q >= h->nreply_queues);
  507. register_value = rq->head[rq->current_entry];
  508. if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
  509. rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
  510. if (++rq->current_entry == rq->size)
  511. rq->current_entry = 0;
  512. /*
  513. * @todo
  514. *
  515. * Don't really need to write the new index after each command,
  516. * but with current driver design this is easiest.
  517. */
  518. wmb();
  519. writel((q << 24) | rq->current_entry, h->vaddr +
  520. IOACCEL_MODE1_CONSUMER_INDEX);
  521. atomic_dec(&h->commands_outstanding);
  522. }
  523. return (unsigned long) register_value;
  524. }
  525. static struct access_method SA5_access = {
  526. SA5_submit_command,
  527. SA5_intr_mask,
  528. SA5_intr_pending,
  529. SA5_completed,
  530. };
  531. static struct access_method SA5_ioaccel_mode1_access = {
  532. SA5_submit_command,
  533. SA5_performant_intr_mask,
  534. SA5_ioaccel_mode1_intr_pending,
  535. SA5_ioaccel_mode1_completed,
  536. };
  537. static struct access_method SA5_ioaccel_mode2_access = {
  538. SA5_submit_command_ioaccel2,
  539. SA5_performant_intr_mask,
  540. SA5_performant_intr_pending,
  541. SA5_performant_completed,
  542. };
  543. static struct access_method SA5_performant_access = {
  544. SA5_submit_command,
  545. SA5_performant_intr_mask,
  546. SA5_performant_intr_pending,
  547. SA5_performant_completed,
  548. };
  549. static struct access_method SA5_performant_access_no_read = {
  550. SA5_submit_command_no_read,
  551. SA5_performant_intr_mask,
  552. SA5_performant_intr_pending,
  553. SA5_performant_completed,
  554. };
  555. struct board_type {
  556. u32 board_id;
  557. char *product_name;
  558. struct access_method *access;
  559. };
  560. #endif /* HPSA_H */