target_core_fabric.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. #ifndef TARGET_CORE_FABRIC_H
  2. #define TARGET_CORE_FABRIC_H
  3. struct target_core_fabric_ops {
  4. struct module *module;
  5. const char *name;
  6. size_t node_acl_size;
  7. /*
  8. * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
  9. * Setting this value tells target-core to enforce this limit, and
  10. * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
  11. *
  12. * target-core will currently reset se_cmd->data_length to this
  13. * maximum size, and set UNDERFLOW residual count if length exceeds
  14. * this limit.
  15. *
  16. * XXX: Not all initiator hosts honor this block-limit EVPD
  17. * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
  18. */
  19. u32 max_data_sg_nents;
  20. char *(*get_fabric_name)(void);
  21. char *(*tpg_get_wwn)(struct se_portal_group *);
  22. u16 (*tpg_get_tag)(struct se_portal_group *);
  23. u32 (*tpg_get_default_depth)(struct se_portal_group *);
  24. int (*tpg_check_demo_mode)(struct se_portal_group *);
  25. int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
  26. int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
  27. int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
  28. /*
  29. * Optionally used by fabrics to allow demo-mode login, but not
  30. * expose any TPG LUNs, and return 'not connected' in standard
  31. * inquiry response
  32. */
  33. int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
  34. /*
  35. * Optionally used as a configfs tunable to determine when
  36. * target-core should signal the PROTECT=1 feature bit for
  37. * backends that don't support T10-PI, so that either fabric
  38. * HW offload or target-core emulation performs the associated
  39. * WRITE_STRIP and READ_INSERT operations.
  40. */
  41. int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
  42. u32 (*tpg_get_inst_index)(struct se_portal_group *);
  43. /*
  44. * Optional to release struct se_cmd and fabric dependent allocated
  45. * I/O descriptor in transport_cmd_check_stop().
  46. *
  47. * Returning 1 will signal a descriptor has been released.
  48. * Returning 0 will signal a descriptor has not been released.
  49. */
  50. int (*check_stop_free)(struct se_cmd *);
  51. void (*release_cmd)(struct se_cmd *);
  52. /*
  53. * Called with spin_lock_bh(struct se_portal_group->session_lock held.
  54. */
  55. int (*shutdown_session)(struct se_session *);
  56. void (*close_session)(struct se_session *);
  57. u32 (*sess_get_index)(struct se_session *);
  58. /*
  59. * Used only for SCSI fabrics that contain multi-value TransportIDs
  60. * (like iSCSI). All other SCSI fabrics should set this to NULL.
  61. */
  62. u32 (*sess_get_initiator_sid)(struct se_session *,
  63. unsigned char *, u32);
  64. int (*write_pending)(struct se_cmd *);
  65. int (*write_pending_status)(struct se_cmd *);
  66. void (*set_default_node_attributes)(struct se_node_acl *);
  67. int (*get_cmd_state)(struct se_cmd *);
  68. int (*queue_data_in)(struct se_cmd *);
  69. int (*queue_status)(struct se_cmd *);
  70. void (*queue_tm_rsp)(struct se_cmd *);
  71. void (*aborted_task)(struct se_cmd *);
  72. /*
  73. * fabric module calls for target_core_fabric_configfs.c
  74. */
  75. struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
  76. struct config_group *, const char *);
  77. void (*fabric_drop_wwn)(struct se_wwn *);
  78. struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
  79. struct config_group *, const char *);
  80. void (*fabric_drop_tpg)(struct se_portal_group *);
  81. int (*fabric_post_link)(struct se_portal_group *,
  82. struct se_lun *);
  83. void (*fabric_pre_unlink)(struct se_portal_group *,
  84. struct se_lun *);
  85. struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
  86. struct config_group *, const char *);
  87. void (*fabric_drop_np)(struct se_tpg_np *);
  88. int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
  89. void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
  90. struct configfs_attribute **tfc_discovery_attrs;
  91. struct configfs_attribute **tfc_wwn_attrs;
  92. struct configfs_attribute **tfc_tpg_base_attrs;
  93. struct configfs_attribute **tfc_tpg_np_base_attrs;
  94. struct configfs_attribute **tfc_tpg_attrib_attrs;
  95. struct configfs_attribute **tfc_tpg_auth_attrs;
  96. struct configfs_attribute **tfc_tpg_param_attrs;
  97. struct configfs_attribute **tfc_tpg_nacl_base_attrs;
  98. struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
  99. struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
  100. struct configfs_attribute **tfc_tpg_nacl_param_attrs;
  101. };
  102. int target_register_template(const struct target_core_fabric_ops *fo);
  103. void target_unregister_template(const struct target_core_fabric_ops *fo);
  104. int target_depend_item(struct config_item *item);
  105. void target_undepend_item(struct config_item *item);
  106. struct se_session *transport_init_session(enum target_prot_op);
  107. int transport_alloc_session_tags(struct se_session *, unsigned int,
  108. unsigned int);
  109. struct se_session *transport_init_session_tags(unsigned int, unsigned int,
  110. enum target_prot_op);
  111. void __transport_register_session(struct se_portal_group *,
  112. struct se_node_acl *, struct se_session *, void *);
  113. void transport_register_session(struct se_portal_group *,
  114. struct se_node_acl *, struct se_session *, void *);
  115. int target_get_session(struct se_session *);
  116. void target_put_session(struct se_session *);
  117. ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
  118. void transport_free_session(struct se_session *);
  119. void target_put_nacl(struct se_node_acl *);
  120. void transport_deregister_session_configfs(struct se_session *);
  121. void transport_deregister_session(struct se_session *);
  122. void transport_init_se_cmd(struct se_cmd *,
  123. const struct target_core_fabric_ops *,
  124. struct se_session *, u32, int, int, unsigned char *);
  125. sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
  126. sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
  127. int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
  128. unsigned char *, unsigned char *, u64, u32, int, int, int,
  129. struct scatterlist *, u32, struct scatterlist *, u32,
  130. struct scatterlist *, u32);
  131. int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
  132. unsigned char *, u64, u32, int, int, int);
  133. int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
  134. unsigned char *sense, u64 unpacked_lun,
  135. void *fabric_tmr_ptr, unsigned char tm_type,
  136. gfp_t, unsigned int, int);
  137. int transport_handle_cdb_direct(struct se_cmd *);
  138. sense_reason_t transport_generic_new_cmd(struct se_cmd *);
  139. void target_execute_cmd(struct se_cmd *cmd);
  140. int transport_generic_free_cmd(struct se_cmd *, int);
  141. bool transport_wait_for_tasks(struct se_cmd *);
  142. int transport_check_aborted_status(struct se_cmd *, int);
  143. int transport_send_check_condition_and_sense(struct se_cmd *,
  144. sense_reason_t, int);
  145. int target_get_sess_cmd(struct se_cmd *, bool);
  146. int target_put_sess_cmd(struct se_cmd *);
  147. void target_sess_cmd_list_set_waiting(struct se_session *);
  148. void target_wait_for_sess_cmds(struct se_session *);
  149. int core_alua_check_nonop_delay(struct se_cmd *);
  150. int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
  151. void core_tmr_release_req(struct se_tmr_req *);
  152. int transport_generic_handle_tmr(struct se_cmd *);
  153. void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
  154. int transport_lookup_tmr_lun(struct se_cmd *, u64);
  155. void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
  156. struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
  157. unsigned char *);
  158. bool target_tpg_has_node_acl(struct se_portal_group *tpg,
  159. const char *);
  160. struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
  161. unsigned char *);
  162. int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32);
  163. int core_tpg_set_initiator_node_tag(struct se_portal_group *,
  164. struct se_node_acl *, const char *);
  165. int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
  166. int core_tpg_deregister(struct se_portal_group *);
  167. /*
  168. * The LIO target core uses DMA_TO_DEVICE to mean that data is going
  169. * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
  170. * that data is coming from the target (eg handling a READ). However,
  171. * this is just the opposite of what we have to tell the DMA mapping
  172. * layer -- eg when handling a READ, the HBA will have to DMA the data
  173. * out of memory so it can send it to the initiator, which means we
  174. * need to use DMA_TO_DEVICE when we map the data.
  175. */
  176. static inline enum dma_data_direction
  177. target_reverse_dma_direction(struct se_cmd *se_cmd)
  178. {
  179. if (se_cmd->se_cmd_flags & SCF_BIDI)
  180. return DMA_BIDIRECTIONAL;
  181. switch (se_cmd->data_direction) {
  182. case DMA_TO_DEVICE:
  183. return DMA_FROM_DEVICE;
  184. case DMA_FROM_DEVICE:
  185. return DMA_TO_DEVICE;
  186. case DMA_NONE:
  187. default:
  188. return DMA_NONE;
  189. }
  190. }
  191. #endif /* TARGET_CORE_FABRICH */