div64.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /* MN10300 64-bit division
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_DIV64
  12. #define _ASM_DIV64
  13. #include <linux/types.h>
  14. extern void ____unhandled_size_in_do_div___(void);
  15. /*
  16. * Beginning with gcc 4.6, the MDR register is represented explicitly. We
  17. * must, therefore, at least explicitly clobber the register when we make
  18. * changes to it. The following assembly fragments *could* be rearranged in
  19. * order to leave the moves to/from the MDR register to the compiler, but the
  20. * gains would be minimal at best.
  21. */
  22. #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
  23. # define CLOBBER_MDR_CC "mdr", "cc"
  24. #else
  25. # define CLOBBER_MDR_CC "cc"
  26. #endif
  27. /*
  28. * divide n by base, leaving the result in n and returning the remainder
  29. * - we can do this quite efficiently on the MN10300 by cascading the divides
  30. * through the MDR register
  31. */
  32. #define do_div(n, base) \
  33. ({ \
  34. unsigned __rem = 0; \
  35. if (sizeof(n) <= 4) { \
  36. asm("mov %1,mdr \n" \
  37. "divu %2,%0 \n" \
  38. "mov mdr,%1 \n" \
  39. : "+r"(n), "=d"(__rem) \
  40. : "r"(base), "1"(__rem) \
  41. : CLOBBER_MDR_CC \
  42. ); \
  43. } else if (sizeof(n) <= 8) { \
  44. union { \
  45. unsigned long long l; \
  46. u32 w[2]; \
  47. } __quot; \
  48. __quot.l = n; \
  49. asm("mov %0,mdr \n" /* MDR = 0 */ \
  50. "divu %3,%1 \n" \
  51. /* __quot.MSL = __div.MSL / base, */ \
  52. /* MDR = MDR:__div.MSL % base */ \
  53. "divu %3,%2 \n" \
  54. /* __quot.LSL = MDR:__div.LSL / base, */ \
  55. /* MDR = MDR:__div.LSL % base */ \
  56. "mov mdr,%0 \n" \
  57. : "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0]) \
  58. : "r"(base), "0"(__rem), "1"(__quot.w[1]), \
  59. "2"(__quot.w[0]) \
  60. : CLOBBER_MDR_CC \
  61. ); \
  62. n = __quot.l; \
  63. } else { \
  64. ____unhandled_size_in_do_div___(); \
  65. } \
  66. __rem; \
  67. })
  68. /*
  69. * do an unsigned 32-bit multiply and divide with intermediate 64-bit product
  70. * so as not to lose accuracy
  71. * - we use the MDR register to hold the MSW of the product
  72. */
  73. static inline __attribute__((const))
  74. unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
  75. {
  76. unsigned result;
  77. asm("mulu %2,%0 \n" /* MDR:val = val*mult */
  78. "divu %3,%0 \n" /* val = MDR:val/div;
  79. * MDR = MDR:val%div */
  80. : "=r"(result)
  81. : "0"(val), "ir"(mult), "r"(div)
  82. : CLOBBER_MDR_CC
  83. );
  84. return result;
  85. }
  86. /*
  87. * do a signed 32-bit multiply and divide with intermediate 64-bit product so
  88. * as not to lose accuracy
  89. * - we use the MDR register to hold the MSW of the product
  90. */
  91. static inline __attribute__((const))
  92. signed __muldiv64s(signed val, signed mult, signed div)
  93. {
  94. signed result;
  95. asm("mul %2,%0 \n" /* MDR:val = val*mult */
  96. "div %3,%0 \n" /* val = MDR:val/div;
  97. * MDR = MDR:val%div */
  98. : "=r"(result)
  99. : "0"(val), "ir"(mult), "r"(div)
  100. : CLOBBER_MDR_CC
  101. );
  102. return result;
  103. }
  104. #endif /* _ASM_DIV64 */