bcache.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. #ifndef _LINUX_BCACHE_H
  2. #define _LINUX_BCACHE_H
  3. /*
  4. * Bcache on disk data structures
  5. */
  6. #include <asm/types.h>
  7. #define BITMASK(name, type, field, offset, size) \
  8. static inline __u64 name(const type *k) \
  9. { return (k->field >> offset) & ~(~0ULL << size); } \
  10. \
  11. static inline void SET_##name(type *k, __u64 v) \
  12. { \
  13. k->field &= ~(~(~0ULL << size) << offset); \
  14. k->field |= (v & ~(~0ULL << size)) << offset; \
  15. }
  16. /* Btree keys - all units are in sectors */
  17. struct bkey {
  18. __u64 high;
  19. __u64 low;
  20. __u64 ptr[];
  21. };
  22. #define KEY_FIELD(name, field, offset, size) \
  23. BITMASK(name, struct bkey, field, offset, size)
  24. #define PTR_FIELD(name, offset, size) \
  25. static inline __u64 name(const struct bkey *k, unsigned i) \
  26. { return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
  27. \
  28. static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
  29. { \
  30. k->ptr[i] &= ~(~(~0ULL << size) << offset); \
  31. k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
  32. }
  33. #define KEY_SIZE_BITS 16
  34. #define KEY_MAX_U64S 8
  35. KEY_FIELD(KEY_PTRS, high, 60, 3)
  36. KEY_FIELD(HEADER_SIZE, high, 58, 2)
  37. KEY_FIELD(KEY_CSUM, high, 56, 2)
  38. KEY_FIELD(KEY_PINNED, high, 55, 1)
  39. KEY_FIELD(KEY_DIRTY, high, 36, 1)
  40. KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
  41. KEY_FIELD(KEY_INODE, high, 0, 20)
  42. /* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
  43. static inline __u64 KEY_OFFSET(const struct bkey *k)
  44. {
  45. return k->low;
  46. }
  47. static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
  48. {
  49. k->low = v;
  50. }
  51. /*
  52. * The high bit being set is a relic from when we used it to do binary
  53. * searches - it told you where a key started. It's not used anymore,
  54. * and can probably be safely dropped.
  55. */
  56. #define KEY(inode, offset, size) \
  57. ((struct bkey) { \
  58. .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
  59. .low = (offset) \
  60. })
  61. #define ZERO_KEY KEY(0, 0, 0)
  62. #define MAX_KEY_INODE (~(~0 << 20))
  63. #define MAX_KEY_OFFSET (~0ULL >> 1)
  64. #define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
  65. #define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
  66. #define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
  67. #define PTR_DEV_BITS 12
  68. PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
  69. PTR_FIELD(PTR_OFFSET, 8, 43)
  70. PTR_FIELD(PTR_GEN, 0, 8)
  71. #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
  72. #define MAKE_PTR(gen, offset, dev) \
  73. ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
  74. /* Bkey utility code */
  75. static inline unsigned long bkey_u64s(const struct bkey *k)
  76. {
  77. return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
  78. }
  79. static inline unsigned long bkey_bytes(const struct bkey *k)
  80. {
  81. return bkey_u64s(k) * sizeof(__u64);
  82. }
  83. #define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
  84. static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
  85. {
  86. SET_KEY_INODE(dest, KEY_INODE(src));
  87. SET_KEY_OFFSET(dest, KEY_OFFSET(src));
  88. }
  89. static inline struct bkey *bkey_next(const struct bkey *k)
  90. {
  91. __u64 *d = (void *) k;
  92. return (struct bkey *) (d + bkey_u64s(k));
  93. }
  94. static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
  95. {
  96. __u64 *d = (void *) k;
  97. return (struct bkey *) (d + nr_keys);
  98. }
  99. /* Enough for a key with 6 pointers */
  100. #define BKEY_PAD 8
  101. #define BKEY_PADDED(key) \
  102. union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
  103. /* Superblock */
  104. /* Version 0: Cache device
  105. * Version 1: Backing device
  106. * Version 2: Seed pointer into btree node checksum
  107. * Version 3: Cache device with new UUID format
  108. * Version 4: Backing device with data offset
  109. */
  110. #define BCACHE_SB_VERSION_CDEV 0
  111. #define BCACHE_SB_VERSION_BDEV 1
  112. #define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
  113. #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
  114. #define BCACHE_SB_MAX_VERSION 4
  115. #define SB_SECTOR 8
  116. #define SB_SIZE 4096
  117. #define SB_LABEL_SIZE 32
  118. #define SB_JOURNAL_BUCKETS 256U
  119. /* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
  120. #define MAX_CACHES_PER_SET 8
  121. #define BDEV_DATA_START_DEFAULT 16 /* sectors */
  122. struct cache_sb {
  123. __u64 csum;
  124. __u64 offset; /* sector where this sb was written */
  125. __u64 version;
  126. __u8 magic[16];
  127. __u8 uuid[16];
  128. union {
  129. __u8 set_uuid[16];
  130. __u64 set_magic;
  131. };
  132. __u8 label[SB_LABEL_SIZE];
  133. __u64 flags;
  134. __u64 seq;
  135. __u64 pad[8];
  136. union {
  137. struct {
  138. /* Cache devices */
  139. __u64 nbuckets; /* device size */
  140. __u16 block_size; /* sectors */
  141. __u16 bucket_size; /* sectors */
  142. __u16 nr_in_set;
  143. __u16 nr_this_dev;
  144. };
  145. struct {
  146. /* Backing devices */
  147. __u64 data_offset;
  148. /*
  149. * block_size from the cache device section is still used by
  150. * backing devices, so don't add anything here until we fix
  151. * things to not need it for backing devices anymore
  152. */
  153. };
  154. };
  155. __u32 last_mount; /* time_t */
  156. __u16 first_bucket;
  157. union {
  158. __u16 njournal_buckets;
  159. __u16 keys;
  160. };
  161. __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
  162. };
  163. static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
  164. {
  165. return sb->version == BCACHE_SB_VERSION_BDEV
  166. || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
  167. }
  168. BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
  169. BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
  170. BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
  171. #define CACHE_REPLACEMENT_LRU 0U
  172. #define CACHE_REPLACEMENT_FIFO 1U
  173. #define CACHE_REPLACEMENT_RANDOM 2U
  174. BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
  175. #define CACHE_MODE_WRITETHROUGH 0U
  176. #define CACHE_MODE_WRITEBACK 1U
  177. #define CACHE_MODE_WRITEAROUND 2U
  178. #define CACHE_MODE_NONE 3U
  179. BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
  180. #define BDEV_STATE_NONE 0U
  181. #define BDEV_STATE_CLEAN 1U
  182. #define BDEV_STATE_DIRTY 2U
  183. #define BDEV_STATE_STALE 3U
  184. /*
  185. * Magic numbers
  186. *
  187. * The various other data structures have their own magic numbers, which are
  188. * xored with the first part of the cache set's UUID
  189. */
  190. #define JSET_MAGIC 0x245235c1a3625032ULL
  191. #define PSET_MAGIC 0x6750e15f87337f91ULL
  192. #define BSET_MAGIC 0x90135c78b99e07f5ULL
  193. static inline __u64 jset_magic(struct cache_sb *sb)
  194. {
  195. return sb->set_magic ^ JSET_MAGIC;
  196. }
  197. static inline __u64 pset_magic(struct cache_sb *sb)
  198. {
  199. return sb->set_magic ^ PSET_MAGIC;
  200. }
  201. static inline __u64 bset_magic(struct cache_sb *sb)
  202. {
  203. return sb->set_magic ^ BSET_MAGIC;
  204. }
  205. /*
  206. * Journal
  207. *
  208. * On disk format for a journal entry:
  209. * seq is monotonically increasing; every journal entry has its own unique
  210. * sequence number.
  211. *
  212. * last_seq is the oldest journal entry that still has keys the btree hasn't
  213. * flushed to disk yet.
  214. *
  215. * version is for on disk format changes.
  216. */
  217. #define BCACHE_JSET_VERSION_UUIDv1 1
  218. #define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
  219. #define BCACHE_JSET_VERSION 1
  220. struct jset {
  221. __u64 csum;
  222. __u64 magic;
  223. __u64 seq;
  224. __u32 version;
  225. __u32 keys;
  226. __u64 last_seq;
  227. BKEY_PADDED(uuid_bucket);
  228. BKEY_PADDED(btree_root);
  229. __u16 btree_level;
  230. __u16 pad[3];
  231. __u64 prio_bucket[MAX_CACHES_PER_SET];
  232. union {
  233. struct bkey start[0];
  234. __u64 d[0];
  235. };
  236. };
  237. /* Bucket prios/gens */
  238. struct prio_set {
  239. __u64 csum;
  240. __u64 magic;
  241. __u64 seq;
  242. __u32 version;
  243. __u32 pad;
  244. __u64 next_bucket;
  245. struct bucket_disk {
  246. __u16 prio;
  247. __u8 gen;
  248. } __attribute((packed)) data[];
  249. };
  250. /* UUIDS - per backing device/flash only volume metadata */
  251. struct uuid_entry {
  252. union {
  253. struct {
  254. __u8 uuid[16];
  255. __u8 label[32];
  256. __u32 first_reg;
  257. __u32 last_reg;
  258. __u32 invalidated;
  259. __u32 flags;
  260. /* Size of flash only volumes */
  261. __u64 sectors;
  262. };
  263. __u8 pad[128];
  264. };
  265. };
  266. BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
  267. /* Btree nodes */
  268. /* Version 1: Seed pointer into btree node checksum
  269. */
  270. #define BCACHE_BSET_CSUM 1
  271. #define BCACHE_BSET_VERSION 1
  272. /*
  273. * Btree nodes
  274. *
  275. * On disk a btree node is a list/log of these; within each set the keys are
  276. * sorted
  277. */
  278. struct bset {
  279. __u64 csum;
  280. __u64 magic;
  281. __u64 seq;
  282. __u32 version;
  283. __u32 keys;
  284. union {
  285. struct bkey start[0];
  286. __u64 d[0];
  287. };
  288. };
  289. /* OBSOLETE */
  290. /* UUIDS - per backing device/flash only volume metadata */
  291. struct uuid_entry_v0 {
  292. __u8 uuid[16];
  293. __u8 label[32];
  294. __u32 first_reg;
  295. __u32 last_reg;
  296. __u32 invalidated;
  297. __u32 pad;
  298. };
  299. #endif /* _LINUX_BCACHE_H */