word-at-a-time.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. #ifndef _ASM_WORD_AT_A_TIME_H
  2. #define _ASM_WORD_AT_A_TIME_H
  3. #include <linux/kernel.h>
  4. #include <asm/byteorder.h>
  5. #ifdef __BIG_ENDIAN
  6. struct word_at_a_time {
  7. const unsigned long high_bits, low_bits;
  8. };
  9. #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
  10. /* Bit set in the bytes that have a zero */
  11. static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
  12. {
  13. unsigned long mask = (val & c->low_bits) + c->low_bits;
  14. return ~(mask | rhs);
  15. }
  16. #define create_zero_mask(mask) (mask)
  17. static inline long find_zero(unsigned long mask)
  18. {
  19. long byte = 0;
  20. #ifdef CONFIG_64BIT
  21. if (mask >> 32)
  22. mask >>= 32;
  23. else
  24. byte = 4;
  25. #endif
  26. if (mask >> 16)
  27. mask >>= 16;
  28. else
  29. byte += 2;
  30. return (mask >> 8) ? byte : byte + 1;
  31. }
  32. static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
  33. {
  34. unsigned long rhs = val | c->low_bits;
  35. *data = rhs;
  36. return (val + c->high_bits) & ~rhs;
  37. }
  38. #ifndef zero_bytemask
  39. #define zero_bytemask(mask) (~1ul << __fls(mask))
  40. #endif
  41. #else
  42. /*
  43. * The optimal byte mask counting is probably going to be something
  44. * that is architecture-specific. If you have a reliably fast
  45. * bit count instruction, that might be better than the multiply
  46. * and shift, for example.
  47. */
  48. struct word_at_a_time {
  49. const unsigned long one_bits, high_bits;
  50. };
  51. #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
  52. #ifdef CONFIG_64BIT
  53. /*
  54. * Jan Achrenius on G+: microoptimized version of
  55. * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
  56. * that works for the bytemasks without having to
  57. * mask them first.
  58. */
  59. static inline long count_masked_bytes(unsigned long mask)
  60. {
  61. return mask*0x0001020304050608ul >> 56;
  62. }
  63. #else /* 32-bit case */
  64. /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
  65. static inline long count_masked_bytes(long mask)
  66. {
  67. /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
  68. long a = (0x0ff0001+mask) >> 23;
  69. /* Fix the 1 for 00 case */
  70. return a & mask;
  71. }
  72. #endif
  73. /* Return nonzero if it has a zero */
  74. static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
  75. {
  76. unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
  77. *bits = mask;
  78. return mask;
  79. }
  80. static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
  81. {
  82. return bits;
  83. }
  84. static inline unsigned long create_zero_mask(unsigned long bits)
  85. {
  86. bits = (bits - 1) & ~bits;
  87. return bits >> 7;
  88. }
  89. /* The mask we created is directly usable as a bytemask */
  90. #define zero_bytemask(mask) (mask)
  91. static inline unsigned long find_zero(unsigned long mask)
  92. {
  93. return count_masked_bytes(mask);
  94. }
  95. #endif /* __BIG_ENDIAN */
  96. #endif /* _ASM_WORD_AT_A_TIME_H */