iocontext.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. #ifndef IOCONTEXT_H
  2. #define IOCONTEXT_H
  3. #include <linux/radix-tree.h>
  4. #include <linux/rcupdate.h>
  5. #include <linux/workqueue.h>
  6. enum {
  7. ICQ_EXITED = 1 << 2,
  8. };
  9. /*
  10. * An io_cq (icq) is association between an io_context (ioc) and a
  11. * request_queue (q). This is used by elevators which need to track
  12. * information per ioc - q pair.
  13. *
  14. * Elevator can request use of icq by setting elevator_type->icq_size and
  15. * ->icq_align. Both size and align must be larger than that of struct
  16. * io_cq and elevator can use the tail area for private information. The
  17. * recommended way to do this is defining a struct which contains io_cq as
  18. * the first member followed by private members and using its size and
  19. * align. For example,
  20. *
  21. * struct snail_io_cq {
  22. * struct io_cq icq;
  23. * int poke_snail;
  24. * int feed_snail;
  25. * };
  26. *
  27. * struct elevator_type snail_elv_type {
  28. * .ops = { ... },
  29. * .icq_size = sizeof(struct snail_io_cq),
  30. * .icq_align = __alignof__(struct snail_io_cq),
  31. * ...
  32. * };
  33. *
  34. * If icq_size is set, block core will manage icq's. All requests will
  35. * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
  36. * is called and be holding a reference to the associated io_context.
  37. *
  38. * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
  39. * called and, on destruction, ->elevator_exit_icq_fn(). Both functions
  40. * are called with both the associated io_context and queue locks held.
  41. *
  42. * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
  43. * queue lock but the returned icq is valid only until the queue lock is
  44. * released. Elevators can not and should not try to create or destroy
  45. * icq's.
  46. *
  47. * As icq's are linked from both ioc and q, the locking rules are a bit
  48. * complex.
  49. *
  50. * - ioc lock nests inside q lock.
  51. *
  52. * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
  53. * q->icq_list and icq->q_node by q lock.
  54. *
  55. * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
  56. * itself is protected by q lock. However, both the indexes and icq
  57. * itself are also RCU managed and lookup can be performed holding only
  58. * the q lock.
  59. *
  60. * - icq's are not reference counted. They are destroyed when either the
  61. * ioc or q goes away. Each request with icq set holds an extra
  62. * reference to ioc to ensure it stays until the request is completed.
  63. *
  64. * - Linking and unlinking icq's are performed while holding both ioc and q
  65. * locks. Due to the lock ordering, q exit is simple but ioc exit
  66. * requires reverse-order double lock dance.
  67. */
  68. struct io_cq {
  69. struct request_queue *q;
  70. struct io_context *ioc;
  71. /*
  72. * q_node and ioc_node link io_cq through icq_list of q and ioc
  73. * respectively. Both fields are unused once ioc_exit_icq() is
  74. * called and shared with __rcu_icq_cache and __rcu_head which are
  75. * used for RCU free of io_cq.
  76. */
  77. union {
  78. struct list_head q_node;
  79. struct kmem_cache *__rcu_icq_cache;
  80. };
  81. union {
  82. struct hlist_node ioc_node;
  83. struct rcu_head __rcu_head;
  84. };
  85. unsigned int flags;
  86. };
  87. /*
  88. * I/O subsystem state of the associated processes. It is refcounted
  89. * and kmalloc'ed. These could be shared between processes.
  90. */
  91. struct io_context {
  92. atomic_long_t refcount;
  93. atomic_t active_ref;
  94. atomic_t nr_tasks;
  95. /* all the fields below are protected by this lock */
  96. spinlock_t lock;
  97. unsigned short ioprio;
  98. /*
  99. * For request batching
  100. */
  101. int nr_batch_requests; /* Number of requests left in the batch */
  102. unsigned long last_waited; /* Time last woken after wait for request */
  103. struct radix_tree_root icq_tree;
  104. struct io_cq __rcu *icq_hint;
  105. struct hlist_head icq_list;
  106. struct work_struct release_work;
  107. };
  108. /**
  109. * get_io_context_active - get active reference on ioc
  110. * @ioc: ioc of interest
  111. *
  112. * Only iocs with active reference can issue new IOs. This function
  113. * acquires an active reference on @ioc. The caller must already have an
  114. * active reference on @ioc.
  115. */
  116. static inline void get_io_context_active(struct io_context *ioc)
  117. {
  118. WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
  119. WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
  120. atomic_long_inc(&ioc->refcount);
  121. atomic_inc(&ioc->active_ref);
  122. }
  123. static inline void ioc_task_link(struct io_context *ioc)
  124. {
  125. get_io_context_active(ioc);
  126. WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
  127. atomic_inc(&ioc->nr_tasks);
  128. }
  129. struct task_struct;
  130. #ifdef CONFIG_BLOCK
  131. void put_io_context(struct io_context *ioc);
  132. void put_io_context_active(struct io_context *ioc);
  133. void exit_io_context(struct task_struct *task);
  134. struct io_context *get_task_io_context(struct task_struct *task,
  135. gfp_t gfp_flags, int node);
  136. #else
  137. struct io_context;
  138. static inline void put_io_context(struct io_context *ioc) { }
  139. static inline void exit_io_context(struct task_struct *task) { }
  140. #endif
  141. #endif