raid1.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. #ifndef _RAID1_H
  2. #define _RAID1_H
  3. struct raid1_info {
  4. struct md_rdev *rdev;
  5. sector_t head_position;
  6. /* When choose the best device for a read (read_balance())
  7. * we try to keep sequential reads one the same device
  8. */
  9. sector_t next_seq_sect;
  10. sector_t seq_start;
  11. };
  12. /*
  13. * memory pools need a pointer to the mddev, so they can force an unplug
  14. * when memory is tight, and a count of the number of drives that the
  15. * pool was allocated for, so they know how much to allocate and free.
  16. * mddev->raid_disks cannot be used, as it can change while a pool is active
  17. * These two datums are stored in a kmalloced struct.
  18. * The 'raid_disks' here is twice the raid_disks in r1conf.
  19. * This allows space for each 'real' device can have a replacement in the
  20. * second half of the array.
  21. */
  22. struct pool_info {
  23. struct mddev *mddev;
  24. int raid_disks;
  25. };
  26. struct r1conf {
  27. struct mddev *mddev;
  28. struct raid1_info *mirrors; /* twice 'raid_disks' to
  29. * allow for replacements.
  30. */
  31. int raid_disks;
  32. /* During resync, read_balancing is only allowed on the part
  33. * of the array that has been resynced. 'next_resync' tells us
  34. * where that is.
  35. */
  36. sector_t next_resync;
  37. /* When raid1 starts resync, we divide array into four partitions
  38. * |---------|--------------|---------------------|-------------|
  39. * next_resync start_next_window end_window
  40. * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
  41. * end_window = start_next_window + NEXT_NORMALIO_DISTANCE
  42. * current_window_requests means the count of normalIO between
  43. * start_next_window and end_window.
  44. * next_window_requests means the count of normalIO after end_window.
  45. * */
  46. sector_t start_next_window;
  47. int current_window_requests;
  48. int next_window_requests;
  49. spinlock_t device_lock;
  50. /* list of 'struct r1bio' that need to be processed by raid1d,
  51. * whether to retry a read, writeout a resync or recovery
  52. * block, or anything else.
  53. */
  54. struct list_head retry_list;
  55. /* A separate list of r1bio which just need raid_end_bio_io called.
  56. * This mustn't happen for writes which had any errors if the superblock
  57. * needs to be written.
  58. */
  59. struct list_head bio_end_io_list;
  60. /* queue pending writes to be submitted on unplug */
  61. struct bio_list pending_bio_list;
  62. int pending_count;
  63. /* for use when syncing mirrors:
  64. * We don't allow both normal IO and resync/recovery IO at
  65. * the same time - resync/recovery can only happen when there
  66. * is no other IO. So when either is active, the other has to wait.
  67. * See more details description in raid1.c near raise_barrier().
  68. */
  69. wait_queue_head_t wait_barrier;
  70. spinlock_t resync_lock;
  71. int nr_pending;
  72. int nr_waiting;
  73. int nr_queued;
  74. int barrier;
  75. int array_frozen;
  76. /* Set to 1 if a full sync is needed, (fresh device added).
  77. * Cleared when a sync completes.
  78. */
  79. int fullsync;
  80. /* When the same as mddev->recovery_disabled we don't allow
  81. * recovery to be attempted as we expect a read error.
  82. */
  83. int recovery_disabled;
  84. /* poolinfo contains information about the content of the
  85. * mempools - it changes when the array grows or shrinks
  86. */
  87. struct pool_info *poolinfo;
  88. mempool_t *r1bio_pool;
  89. mempool_t *r1buf_pool;
  90. /* temporary buffer to synchronous IO when attempting to repair
  91. * a read error.
  92. */
  93. struct page *tmppage;
  94. /* When taking over an array from a different personality, we store
  95. * the new thread here until we fully activate the array.
  96. */
  97. struct md_thread *thread;
  98. /* Keep track of cluster resync window to send to other
  99. * nodes.
  100. */
  101. sector_t cluster_sync_low;
  102. sector_t cluster_sync_high;
  103. };
  104. /*
  105. * this is our 'private' RAID1 bio.
  106. *
  107. * it contains information about what kind of IO operations were started
  108. * for this RAID1 operation, and about their status:
  109. */
  110. struct r1bio {
  111. atomic_t remaining; /* 'have we finished' count,
  112. * used from IRQ handlers
  113. */
  114. atomic_t behind_remaining; /* number of write-behind ios remaining
  115. * in this BehindIO request
  116. */
  117. sector_t sector;
  118. sector_t start_next_window;
  119. int sectors;
  120. unsigned long state;
  121. struct mddev *mddev;
  122. /*
  123. * original bio going to /dev/mdx
  124. */
  125. struct bio *master_bio;
  126. /*
  127. * if the IO is in READ direction, then this is where we read
  128. */
  129. int read_disk;
  130. struct list_head retry_list;
  131. /* Next two are only valid when R1BIO_BehindIO is set */
  132. struct bio_vec *behind_bvecs;
  133. int behind_page_count;
  134. /*
  135. * if the IO is in WRITE direction, then multiple bios are used.
  136. * We choose the number when they are allocated.
  137. */
  138. struct bio *bios[0];
  139. /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
  140. };
  141. /* bits for r1bio.state */
  142. #define R1BIO_Uptodate 0
  143. #define R1BIO_IsSync 1
  144. #define R1BIO_Degraded 2
  145. #define R1BIO_BehindIO 3
  146. /* Set ReadError on bios that experience a readerror so that
  147. * raid1d knows what to do with them.
  148. */
  149. #define R1BIO_ReadError 4
  150. /* For write-behind requests, we call bi_end_io when
  151. * the last non-write-behind device completes, providing
  152. * any write was successful. Otherwise we call when
  153. * any write-behind write succeeds, otherwise we call
  154. * with failure when last write completes (and all failed).
  155. * Record that bi_end_io was called with this flag...
  156. */
  157. #define R1BIO_Returned 6
  158. /* If a write for this request means we can clear some
  159. * known-bad-block records, we set this flag
  160. */
  161. #define R1BIO_MadeGood 7
  162. #define R1BIO_WriteError 8
  163. #endif