gc.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. /*
  2. * fs/f2fs/gc.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define GC_THREAD_MIN_WB_PAGES 1 /*
  12. * a threshold to determine
  13. * whether IO subsystem is idle
  14. * or not
  15. */
  16. #define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
  17. #define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
  18. #define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
  19. #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
  20. #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
  21. /* Search max. number of dirty segments to select a victim segment */
  22. #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
  23. struct f2fs_gc_kthread {
  24. struct task_struct *f2fs_gc_task;
  25. wait_queue_head_t gc_wait_queue_head;
  26. /* for gc sleep time */
  27. unsigned int min_sleep_time;
  28. unsigned int max_sleep_time;
  29. unsigned int no_gc_sleep_time;
  30. /* for changing gc mode */
  31. unsigned int gc_idle;
  32. };
  33. struct gc_inode_list {
  34. struct list_head ilist;
  35. struct radix_tree_root iroot;
  36. };
  37. /*
  38. * inline functions
  39. */
  40. static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
  41. {
  42. if (free_segments(sbi) < overprovision_segments(sbi))
  43. return 0;
  44. else
  45. return (free_segments(sbi) - overprovision_segments(sbi))
  46. << sbi->log_blocks_per_seg;
  47. }
  48. static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
  49. {
  50. return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
  51. }
  52. static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
  53. {
  54. block_t reclaimable_user_blocks = sbi->user_block_count -
  55. written_block_count(sbi);
  56. return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
  57. }
  58. static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
  59. long *wait)
  60. {
  61. if (*wait == gc_th->no_gc_sleep_time)
  62. return;
  63. *wait += gc_th->min_sleep_time;
  64. if (*wait > gc_th->max_sleep_time)
  65. *wait = gc_th->max_sleep_time;
  66. }
  67. static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
  68. long *wait)
  69. {
  70. if (*wait == gc_th->no_gc_sleep_time)
  71. *wait = gc_th->max_sleep_time;
  72. *wait -= gc_th->min_sleep_time;
  73. if (*wait <= gc_th->min_sleep_time)
  74. *wait = gc_th->min_sleep_time;
  75. }
  76. static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
  77. {
  78. block_t invalid_user_blocks = sbi->user_block_count -
  79. written_block_count(sbi);
  80. /*
  81. * Background GC is triggered with the following conditions.
  82. * 1. There are a number of invalid blocks.
  83. * 2. There is not enough free space.
  84. */
  85. if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
  86. free_user_blocks(sbi) < limit_free_user_blocks(sbi))
  87. return true;
  88. return false;
  89. }
  90. static inline int is_idle(struct f2fs_sb_info *sbi)
  91. {
  92. struct block_device *bdev = sbi->sb->s_bdev;
  93. struct request_queue *q = bdev_get_queue(bdev);
  94. struct request_list *rl = &q->root_rl;
  95. return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
  96. }