reg_norm.S 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /*---------------------------------------------------------------------------+
  2. | reg_norm.S |
  3. | |
  4. | Copyright (C) 1992,1993,1994,1995,1997 |
  5. | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
  6. | Australia. E-mail billm@suburbia.net |
  7. | |
  8. | Normalize the value in a FPU_REG. |
  9. | |
  10. | Call from C as: |
  11. | int FPU_normalize(FPU_REG *n) |
  12. | |
  13. | int FPU_normalize_nuo(FPU_REG *n) |
  14. | |
  15. | Return value is the tag of the answer, or-ed with FPU_Exception if |
  16. | one was raised, or -1 on internal error. |
  17. | |
  18. +---------------------------------------------------------------------------*/
  19. #include "fpu_emu.h"
  20. .text
  21. ENTRY(FPU_normalize)
  22. pushl %ebp
  23. movl %esp,%ebp
  24. pushl %ebx
  25. movl PARAM1,%ebx
  26. movl SIGH(%ebx),%edx
  27. movl SIGL(%ebx),%eax
  28. orl %edx,%edx /* ms bits */
  29. js L_done /* Already normalized */
  30. jnz L_shift_1 /* Shift left 1 - 31 bits */
  31. orl %eax,%eax
  32. jz L_zero /* The contents are zero */
  33. movl %eax,%edx
  34. xorl %eax,%eax
  35. subw $32,EXP(%ebx) /* This can cause an underflow */
  36. /* We need to shift left by 1 - 31 bits */
  37. L_shift_1:
  38. bsrl %edx,%ecx /* get the required shift in %ecx */
  39. subl $31,%ecx
  40. negl %ecx
  41. shld %cl,%eax,%edx
  42. shl %cl,%eax
  43. subw %cx,EXP(%ebx) /* This can cause an underflow */
  44. movl %edx,SIGH(%ebx)
  45. movl %eax,SIGL(%ebx)
  46. L_done:
  47. cmpw EXP_OVER,EXP(%ebx)
  48. jge L_overflow
  49. cmpw EXP_UNDER,EXP(%ebx)
  50. jle L_underflow
  51. L_exit_valid:
  52. movl TAG_Valid,%eax
  53. /* Convert the exponent to 80x87 form. */
  54. addw EXTENDED_Ebias,EXP(%ebx)
  55. andw $0x7fff,EXP(%ebx)
  56. L_exit:
  57. popl %ebx
  58. leave
  59. ret
  60. L_zero:
  61. movw $0,EXP(%ebx)
  62. movl TAG_Zero,%eax
  63. jmp L_exit
  64. L_underflow:
  65. /* Convert the exponent to 80x87 form. */
  66. addw EXTENDED_Ebias,EXP(%ebx)
  67. push %ebx
  68. call arith_underflow
  69. pop %ebx
  70. jmp L_exit
  71. L_overflow:
  72. /* Convert the exponent to 80x87 form. */
  73. addw EXTENDED_Ebias,EXP(%ebx)
  74. push %ebx
  75. call arith_overflow
  76. pop %ebx
  77. jmp L_exit
  78. /* Normalise without reporting underflow or overflow */
  79. ENTRY(FPU_normalize_nuo)
  80. pushl %ebp
  81. movl %esp,%ebp
  82. pushl %ebx
  83. movl PARAM1,%ebx
  84. movl SIGH(%ebx),%edx
  85. movl SIGL(%ebx),%eax
  86. orl %edx,%edx /* ms bits */
  87. js L_exit_nuo_valid /* Already normalized */
  88. jnz L_nuo_shift_1 /* Shift left 1 - 31 bits */
  89. orl %eax,%eax
  90. jz L_exit_nuo_zero /* The contents are zero */
  91. movl %eax,%edx
  92. xorl %eax,%eax
  93. subw $32,EXP(%ebx) /* This can cause an underflow */
  94. /* We need to shift left by 1 - 31 bits */
  95. L_nuo_shift_1:
  96. bsrl %edx,%ecx /* get the required shift in %ecx */
  97. subl $31,%ecx
  98. negl %ecx
  99. shld %cl,%eax,%edx
  100. shl %cl,%eax
  101. subw %cx,EXP(%ebx) /* This can cause an underflow */
  102. movl %edx,SIGH(%ebx)
  103. movl %eax,SIGL(%ebx)
  104. L_exit_nuo_valid:
  105. movl TAG_Valid,%eax
  106. popl %ebx
  107. leave
  108. ret
  109. L_exit_nuo_zero:
  110. movl TAG_Zero,%eax
  111. movw EXP_UNDER,EXP(%ebx)
  112. popl %ebx
  113. leave
  114. ret