io_sch.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #ifndef S390_IO_SCH_H
  2. #define S390_IO_SCH_H
  3. #include <linux/types.h>
  4. #include <asm/schid.h>
  5. #include <asm/ccwdev.h>
  6. #include <asm/irq.h>
  7. #include "css.h"
  8. #include "orb.h"
  9. struct io_subchannel_private {
  10. union orb orb; /* operation request block */
  11. struct ccw1 sense_ccw; /* static ccw for sense command */
  12. struct ccw_device *cdev;/* pointer to the child ccw device */
  13. struct {
  14. unsigned int suspend:1; /* allow suspend */
  15. unsigned int prefetch:1;/* deny prefetch */
  16. unsigned int inter:1; /* suppress intermediate interrupts */
  17. } __packed options;
  18. } __aligned(8);
  19. #define to_io_private(n) ((struct io_subchannel_private *) \
  20. dev_get_drvdata(&(n)->dev))
  21. #define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
  22. static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
  23. {
  24. struct io_subchannel_private *priv = to_io_private(sch);
  25. return priv ? priv->cdev : NULL;
  26. }
  27. static inline void sch_set_cdev(struct subchannel *sch,
  28. struct ccw_device *cdev)
  29. {
  30. struct io_subchannel_private *priv = to_io_private(sch);
  31. if (priv)
  32. priv->cdev = cdev;
  33. }
  34. #define MAX_CIWS 8
  35. /*
  36. * Possible status values for a CCW request's I/O.
  37. */
  38. enum io_status {
  39. IO_DONE,
  40. IO_RUNNING,
  41. IO_STATUS_ERROR,
  42. IO_PATH_ERROR,
  43. IO_REJECTED,
  44. IO_KILLED
  45. };
  46. /**
  47. * ccw_request - Internal CCW request.
  48. * @cp: channel program to start
  49. * @timeout: maximum allowable time in jiffies between start I/O and interrupt
  50. * @maxretries: number of retries per I/O operation and path
  51. * @lpm: mask of paths to use
  52. * @check: optional callback that determines if results are final
  53. * @filter: optional callback to adjust request status based on IRB data
  54. * @callback: final callback
  55. * @data: user-defined pointer passed to all callbacks
  56. * @singlepath: if set, use only one path from @lpm per start I/O
  57. * @cancel: non-zero if request was cancelled
  58. * @done: non-zero if request was finished
  59. * @mask: current path mask
  60. * @retries: current number of retries
  61. * @drc: delayed return code
  62. */
  63. struct ccw_request {
  64. struct ccw1 *cp;
  65. unsigned long timeout;
  66. u16 maxretries;
  67. u8 lpm;
  68. int (*check)(struct ccw_device *, void *);
  69. enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
  70. enum io_status);
  71. void (*callback)(struct ccw_device *, void *, int);
  72. void *data;
  73. unsigned int singlepath:1;
  74. /* These fields are used internally. */
  75. unsigned int cancel:1;
  76. unsigned int done:1;
  77. u16 mask;
  78. u16 retries;
  79. int drc;
  80. } __attribute__((packed));
  81. /*
  82. * sense-id response buffer layout
  83. */
  84. struct senseid {
  85. /* common part */
  86. u8 reserved; /* always 0x'FF' */
  87. u16 cu_type; /* control unit type */
  88. u8 cu_model; /* control unit model */
  89. u16 dev_type; /* device type */
  90. u8 dev_model; /* device model */
  91. u8 unused; /* padding byte */
  92. /* extended part */
  93. struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
  94. } __attribute__ ((packed, aligned(4)));
  95. enum cdev_todo {
  96. CDEV_TODO_NOTHING,
  97. CDEV_TODO_ENABLE_CMF,
  98. CDEV_TODO_REBIND,
  99. CDEV_TODO_REGISTER,
  100. CDEV_TODO_UNREG,
  101. CDEV_TODO_UNREG_EVAL,
  102. };
  103. #define FAKE_CMD_IRB 1
  104. #define FAKE_TM_IRB 2
  105. struct ccw_device_private {
  106. struct ccw_device *cdev;
  107. struct subchannel *sch;
  108. int state; /* device state */
  109. atomic_t onoff;
  110. struct ccw_dev_id dev_id; /* device id */
  111. struct subchannel_id schid; /* subchannel number */
  112. struct ccw_request req; /* internal I/O request */
  113. int iretry;
  114. u8 pgid_valid_mask; /* mask of valid PGIDs */
  115. u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
  116. u8 pgid_reset_mask; /* mask of PGIDs which were reset */
  117. u8 path_noirq_mask; /* mask of paths for which no irq was
  118. received */
  119. u8 path_notoper_mask; /* mask of paths which were found
  120. not operable */
  121. u8 path_gone_mask; /* mask of paths, that became unavailable */
  122. u8 path_new_mask; /* mask of paths, that became available */
  123. struct {
  124. unsigned int fast:1; /* post with "channel end" */
  125. unsigned int repall:1; /* report every interrupt status */
  126. unsigned int pgroup:1; /* do path grouping */
  127. unsigned int force:1; /* allow forced online */
  128. unsigned int mpath:1; /* do multipathing */
  129. } __attribute__ ((packed)) options;
  130. struct {
  131. unsigned int esid:1; /* Ext. SenseID supported by HW */
  132. unsigned int dosense:1; /* delayed SENSE required */
  133. unsigned int doverify:1; /* delayed path verification */
  134. unsigned int donotify:1; /* call notify function */
  135. unsigned int recog_done:1; /* dev. recog. complete */
  136. unsigned int fake_irb:2; /* deliver faked irb */
  137. unsigned int resuming:1; /* recognition while resume */
  138. unsigned int pgroup:1; /* pathgroup is set up */
  139. unsigned int mpath:1; /* multipathing is set up */
  140. unsigned int pgid_unknown:1;/* unknown pgid state */
  141. unsigned int initialized:1; /* set if initial reference held */
  142. } __attribute__((packed)) flags;
  143. unsigned long intparm; /* user interruption parameter */
  144. struct qdio_irq *qdio_data;
  145. struct irb irb; /* device status */
  146. int async_kill_io_rc;
  147. struct senseid senseid; /* SenseID info */
  148. struct pgid pgid[8]; /* path group IDs per chpid*/
  149. struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
  150. struct work_struct todo_work;
  151. enum cdev_todo todo;
  152. wait_queue_head_t wait_q;
  153. struct timer_list timer;
  154. void *cmb; /* measurement information */
  155. struct list_head cmb_list; /* list of measured devices */
  156. u64 cmb_start_time; /* clock value of cmb reset */
  157. void *cmb_wait; /* deferred cmb enable/disable */
  158. enum interruption_class int_class;
  159. };
  160. static inline int rsch(struct subchannel_id schid)
  161. {
  162. register struct subchannel_id reg1 asm("1") = schid;
  163. int ccode;
  164. asm volatile(
  165. " rsch\n"
  166. " ipm %0\n"
  167. " srl %0,28"
  168. : "=d" (ccode)
  169. : "d" (reg1)
  170. : "cc", "memory");
  171. return ccode;
  172. }
  173. static inline int hsch(struct subchannel_id schid)
  174. {
  175. register struct subchannel_id reg1 asm("1") = schid;
  176. int ccode;
  177. asm volatile(
  178. " hsch\n"
  179. " ipm %0\n"
  180. " srl %0,28"
  181. : "=d" (ccode)
  182. : "d" (reg1)
  183. : "cc");
  184. return ccode;
  185. }
  186. static inline int xsch(struct subchannel_id schid)
  187. {
  188. register struct subchannel_id reg1 asm("1") = schid;
  189. int ccode;
  190. asm volatile(
  191. " .insn rre,0xb2760000,%1,0\n"
  192. " ipm %0\n"
  193. " srl %0,28"
  194. : "=d" (ccode)
  195. : "d" (reg1)
  196. : "cc");
  197. return ccode;
  198. }
  199. #endif