fp2e_neg2.s 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. # File: dclxvi-20130329/fp2e_neg2.s
  2. # Author: Ruben Niederhagen, Peter Schwabe
  3. # Public Domain
  4. # qhasm: enter fp2e_neg2_qhasm
  5. .text
  6. .p2align 5
  7. .globl _fp2e_neg2_qhasm
  8. .globl fp2e_neg2_qhasm
  9. _fp2e_neg2_qhasm:
  10. fp2e_neg2_qhasm:
  11. mov %rsp,%r11
  12. and $31,%r11
  13. add $0,%r11
  14. sub %r11,%rsp
  15. # qhasm: int64 0rop
  16. # qhasm: input 0rop
  17. # qhasm: int6464 0r0
  18. # qhasm: int6464 0r1
  19. # qhasm: int6464 0r2
  20. # qhasm: int6464 0r3
  21. # qhasm: int6464 0r4
  22. # qhasm: int6464 0r5
  23. # qhasm: int6464 0r6
  24. # qhasm: int6464 0r7
  25. # qhasm: int6464 0r8
  26. # qhasm: int6464 0r9
  27. # qhasm: int6464 0r10
  28. # qhasm: int6464 0r11
  29. # qhasm: int6464 0t0
  30. # qhasm: int6464 0t1
  31. # qhasm: int6464 0t2
  32. # qhasm: int6464 0t3
  33. # qhasm: 0r0 = *(int128 *)(0rop + 0)
  34. # asm 1: movdqa 0(<0rop=int64#1),>0r0=int6464#1
  35. # asm 2: movdqa 0(<0rop=%rdi),>0r0=%xmm0
  36. movdqa 0(%rdi),%xmm0
  37. # qhasm: 0r1 = *(int128 *)(0rop + 16)
  38. # asm 1: movdqa 16(<0rop=int64#1),>0r1=int6464#2
  39. # asm 2: movdqa 16(<0rop=%rdi),>0r1=%xmm1
  40. movdqa 16(%rdi),%xmm1
  41. # qhasm: 0r2 = *(int128 *)(0rop + 32)
  42. # asm 1: movdqa 32(<0rop=int64#1),>0r2=int6464#3
  43. # asm 2: movdqa 32(<0rop=%rdi),>0r2=%xmm2
  44. movdqa 32(%rdi),%xmm2
  45. # qhasm: 0r3 = *(int128 *)(0rop + 48)
  46. # asm 1: movdqa 48(<0rop=int64#1),>0r3=int6464#4
  47. # asm 2: movdqa 48(<0rop=%rdi),>0r3=%xmm3
  48. movdqa 48(%rdi),%xmm3
  49. # qhasm: 0r4 = *(int128 *)(0rop + 64)
  50. # asm 1: movdqa 64(<0rop=int64#1),>0r4=int6464#5
  51. # asm 2: movdqa 64(<0rop=%rdi),>0r4=%xmm4
  52. movdqa 64(%rdi),%xmm4
  53. # qhasm: 0r5 = *(int128 *)(0rop + 80)
  54. # asm 1: movdqa 80(<0rop=int64#1),>0r5=int6464#6
  55. # asm 2: movdqa 80(<0rop=%rdi),>0r5=%xmm5
  56. movdqa 80(%rdi),%xmm5
  57. # qhasm: 0r6 = *(int128 *)(0rop + 96)
  58. # asm 1: movdqa 96(<0rop=int64#1),>0r6=int6464#7
  59. # asm 2: movdqa 96(<0rop=%rdi),>0r6=%xmm6
  60. movdqa 96(%rdi),%xmm6
  61. # qhasm: 0r7 = *(int128 *)(0rop + 112)
  62. # asm 1: movdqa 112(<0rop=int64#1),>0r7=int6464#8
  63. # asm 2: movdqa 112(<0rop=%rdi),>0r7=%xmm7
  64. movdqa 112(%rdi),%xmm7
  65. # qhasm: 0r8 = *(int128 *)(0rop + 128)
  66. # asm 1: movdqa 128(<0rop=int64#1),>0r8=int6464#9
  67. # asm 2: movdqa 128(<0rop=%rdi),>0r8=%xmm8
  68. movdqa 128(%rdi),%xmm8
  69. # qhasm: 0r9 = *(int128 *)(0rop + 144)
  70. # asm 1: movdqa 144(<0rop=int64#1),>0r9=int6464#10
  71. # asm 2: movdqa 144(<0rop=%rdi),>0r9=%xmm9
  72. movdqa 144(%rdi),%xmm9
  73. # qhasm: 0r10 = *(int128 *)(0rop + 160)
  74. # asm 1: movdqa 160(<0rop=int64#1),>0r10=int6464#11
  75. # asm 2: movdqa 160(<0rop=%rdi),>0r10=%xmm10
  76. movdqa 160(%rdi),%xmm10
  77. # qhasm: 0r11 = *(int128 *)(0rop + 176)
  78. # asm 1: movdqa 176(<0rop=int64#1),>0r11=int6464#12
  79. # asm 2: movdqa 176(<0rop=%rdi),>0r11=%xmm11
  80. movdqa 176(%rdi),%xmm11
  81. # qhasm: int6464 1t0
  82. # qhasm: 1t0 = MONE_MONE
  83. # asm 1: movdqa MONE_MONE,<1t0=int6464#13
  84. # asm 2: movdqa MONE_MONE,<1t0=%xmm12
  85. movdqa MONE_MONE,%xmm12
  86. # qhasm: float6464 0r0 *= 1t0
  87. # asm 1: mulpd <1t0=int6464#13,<0r0=int6464#1
  88. # asm 2: mulpd <1t0=%xmm12,<0r0=%xmm0
  89. mulpd %xmm12,%xmm0
  90. # qhasm: float6464 0r1 *= 1t0
  91. # asm 1: mulpd <1t0=int6464#13,<0r1=int6464#2
  92. # asm 2: mulpd <1t0=%xmm12,<0r1=%xmm1
  93. mulpd %xmm12,%xmm1
  94. # qhasm: float6464 0r2 *= 1t0
  95. # asm 1: mulpd <1t0=int6464#13,<0r2=int6464#3
  96. # asm 2: mulpd <1t0=%xmm12,<0r2=%xmm2
  97. mulpd %xmm12,%xmm2
  98. # qhasm: float6464 0r3 *= 1t0
  99. # asm 1: mulpd <1t0=int6464#13,<0r3=int6464#4
  100. # asm 2: mulpd <1t0=%xmm12,<0r3=%xmm3
  101. mulpd %xmm12,%xmm3
  102. # qhasm: float6464 0r4 *= 1t0
  103. # asm 1: mulpd <1t0=int6464#13,<0r4=int6464#5
  104. # asm 2: mulpd <1t0=%xmm12,<0r4=%xmm4
  105. mulpd %xmm12,%xmm4
  106. # qhasm: float6464 0r5 *= 1t0
  107. # asm 1: mulpd <1t0=int6464#13,<0r5=int6464#6
  108. # asm 2: mulpd <1t0=%xmm12,<0r5=%xmm5
  109. mulpd %xmm12,%xmm5
  110. # qhasm: float6464 0r6 *= 1t0
  111. # asm 1: mulpd <1t0=int6464#13,<0r6=int6464#7
  112. # asm 2: mulpd <1t0=%xmm12,<0r6=%xmm6
  113. mulpd %xmm12,%xmm6
  114. # qhasm: float6464 0r7 *= 1t0
  115. # asm 1: mulpd <1t0=int6464#13,<0r7=int6464#8
  116. # asm 2: mulpd <1t0=%xmm12,<0r7=%xmm7
  117. mulpd %xmm12,%xmm7
  118. # qhasm: float6464 0r8 *= 1t0
  119. # asm 1: mulpd <1t0=int6464#13,<0r8=int6464#9
  120. # asm 2: mulpd <1t0=%xmm12,<0r8=%xmm8
  121. mulpd %xmm12,%xmm8
  122. # qhasm: float6464 0r9 *= 1t0
  123. # asm 1: mulpd <1t0=int6464#13,<0r9=int6464#10
  124. # asm 2: mulpd <1t0=%xmm12,<0r9=%xmm9
  125. mulpd %xmm12,%xmm9
  126. # qhasm: float6464 0r10 *= 1t0
  127. # asm 1: mulpd <1t0=int6464#13,<0r10=int6464#11
  128. # asm 2: mulpd <1t0=%xmm12,<0r10=%xmm10
  129. mulpd %xmm12,%xmm10
  130. # qhasm: float6464 0r11 *= 1t0
  131. # asm 1: mulpd <1t0=int6464#13,<0r11=int6464#12
  132. # asm 2: mulpd <1t0=%xmm12,<0r11=%xmm11
  133. mulpd %xmm12,%xmm11
  134. # qhasm: *(int128 *)(0rop + 0) = 0r0
  135. # asm 1: movdqa <0r0=int6464#1,0(<0rop=int64#1)
  136. # asm 2: movdqa <0r0=%xmm0,0(<0rop=%rdi)
  137. movdqa %xmm0,0(%rdi)
  138. # qhasm: *(int128 *)(0rop + 16) = 0r1
  139. # asm 1: movdqa <0r1=int6464#2,16(<0rop=int64#1)
  140. # asm 2: movdqa <0r1=%xmm1,16(<0rop=%rdi)
  141. movdqa %xmm1,16(%rdi)
  142. # qhasm: *(int128 *)(0rop + 32) = 0r2
  143. # asm 1: movdqa <0r2=int6464#3,32(<0rop=int64#1)
  144. # asm 2: movdqa <0r2=%xmm2,32(<0rop=%rdi)
  145. movdqa %xmm2,32(%rdi)
  146. # qhasm: *(int128 *)(0rop + 48) = 0r3
  147. # asm 1: movdqa <0r3=int6464#4,48(<0rop=int64#1)
  148. # asm 2: movdqa <0r3=%xmm3,48(<0rop=%rdi)
  149. movdqa %xmm3,48(%rdi)
  150. # qhasm: *(int128 *)(0rop + 64) = 0r4
  151. # asm 1: movdqa <0r4=int6464#5,64(<0rop=int64#1)
  152. # asm 2: movdqa <0r4=%xmm4,64(<0rop=%rdi)
  153. movdqa %xmm4,64(%rdi)
  154. # qhasm: *(int128 *)(0rop + 80) = 0r5
  155. # asm 1: movdqa <0r5=int6464#6,80(<0rop=int64#1)
  156. # asm 2: movdqa <0r5=%xmm5,80(<0rop=%rdi)
  157. movdqa %xmm5,80(%rdi)
  158. # qhasm: *(int128 *)(0rop + 96) = 0r6
  159. # asm 1: movdqa <0r6=int6464#7,96(<0rop=int64#1)
  160. # asm 2: movdqa <0r6=%xmm6,96(<0rop=%rdi)
  161. movdqa %xmm6,96(%rdi)
  162. # qhasm: *(int128 *)(0rop + 112) = 0r7
  163. # asm 1: movdqa <0r7=int6464#8,112(<0rop=int64#1)
  164. # asm 2: movdqa <0r7=%xmm7,112(<0rop=%rdi)
  165. movdqa %xmm7,112(%rdi)
  166. # qhasm: *(int128 *)(0rop + 128) = 0r8
  167. # asm 1: movdqa <0r8=int6464#9,128(<0rop=int64#1)
  168. # asm 2: movdqa <0r8=%xmm8,128(<0rop=%rdi)
  169. movdqa %xmm8,128(%rdi)
  170. # qhasm: *(int128 *)(0rop + 144) = 0r9
  171. # asm 1: movdqa <0r9=int6464#10,144(<0rop=int64#1)
  172. # asm 2: movdqa <0r9=%xmm9,144(<0rop=%rdi)
  173. movdqa %xmm9,144(%rdi)
  174. # qhasm: *(int128 *)(0rop + 160) = 0r10
  175. # asm 1: movdqa <0r10=int6464#11,160(<0rop=int64#1)
  176. # asm 2: movdqa <0r10=%xmm10,160(<0rop=%rdi)
  177. movdqa %xmm10,160(%rdi)
  178. # qhasm: *(int128 *)(0rop + 176) = 0r11
  179. # asm 1: movdqa <0r11=int6464#12,176(<0rop=int64#1)
  180. # asm 2: movdqa <0r11=%xmm11,176(<0rop=%rdi)
  181. movdqa %xmm11,176(%rdi)
  182. # qhasm: leave
  183. add %r11,%rsp
  184. mov %rdi,%rax
  185. mov %rsi,%rdx
  186. ret