fp2e_triple.s 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. # File: dclxvi-20130329/fp2e_triple.s
  2. # Author: Ruben Niederhagen, Peter Schwabe
  3. # Public Domain
  4. # qhasm: enter fp2e_triple_qhasm
  5. .text
  6. .p2align 5
  7. .globl _fp2e_triple_qhasm
  8. .globl fp2e_triple_qhasm
  9. _fp2e_triple_qhasm:
  10. fp2e_triple_qhasm:
  11. mov %rsp,%r11
  12. and $31,%r11
  13. add $0,%r11
  14. sub %r11,%rsp
  15. # qhasm: int64 0rop
  16. # qhasm: int64 0op
  17. # qhasm: input 0rop
  18. # qhasm: input 0op
  19. # qhasm: int6464 0r0
  20. # qhasm: int6464 0r1
  21. # qhasm: int6464 0r2
  22. # qhasm: int6464 0r3
  23. # qhasm: int6464 0r4
  24. # qhasm: int6464 0r5
  25. # qhasm: int6464 0r6
  26. # qhasm: int6464 0r7
  27. # qhasm: int6464 0r8
  28. # qhasm: int6464 0r9
  29. # qhasm: int6464 0r10
  30. # qhasm: int6464 0r11
  31. # qhasm: int6464 0t0
  32. # qhasm: int6464 0t1
  33. # qhasm: int6464 0t2
  34. # qhasm: int6464 0t3
  35. # qhasm: 0r0 = *(int128 *)(0op + 0)
  36. # asm 1: movdqa 0(<0op=int64#2),>0r0=int6464#1
  37. # asm 2: movdqa 0(<0op=%rsi),>0r0=%xmm0
  38. movdqa 0(%rsi),%xmm0
  39. # qhasm: 0r1 = *(int128 *)(0op + 16)
  40. # asm 1: movdqa 16(<0op=int64#2),>0r1=int6464#2
  41. # asm 2: movdqa 16(<0op=%rsi),>0r1=%xmm1
  42. movdqa 16(%rsi),%xmm1
  43. # qhasm: 0r2 = *(int128 *)(0op + 32)
  44. # asm 1: movdqa 32(<0op=int64#2),>0r2=int6464#3
  45. # asm 2: movdqa 32(<0op=%rsi),>0r2=%xmm2
  46. movdqa 32(%rsi),%xmm2
  47. # qhasm: 0r3 = *(int128 *)(0op + 48)
  48. # asm 1: movdqa 48(<0op=int64#2),>0r3=int6464#4
  49. # asm 2: movdqa 48(<0op=%rsi),>0r3=%xmm3
  50. movdqa 48(%rsi),%xmm3
  51. # qhasm: 0r4 = *(int128 *)(0op + 64)
  52. # asm 1: movdqa 64(<0op=int64#2),>0r4=int6464#5
  53. # asm 2: movdqa 64(<0op=%rsi),>0r4=%xmm4
  54. movdqa 64(%rsi),%xmm4
  55. # qhasm: 0r5 = *(int128 *)(0op + 80)
  56. # asm 1: movdqa 80(<0op=int64#2),>0r5=int6464#6
  57. # asm 2: movdqa 80(<0op=%rsi),>0r5=%xmm5
  58. movdqa 80(%rsi),%xmm5
  59. # qhasm: 0r6 = *(int128 *)(0op + 96)
  60. # asm 1: movdqa 96(<0op=int64#2),>0r6=int6464#7
  61. # asm 2: movdqa 96(<0op=%rsi),>0r6=%xmm6
  62. movdqa 96(%rsi),%xmm6
  63. # qhasm: 0r7 = *(int128 *)(0op + 112)
  64. # asm 1: movdqa 112(<0op=int64#2),>0r7=int6464#8
  65. # asm 2: movdqa 112(<0op=%rsi),>0r7=%xmm7
  66. movdqa 112(%rsi),%xmm7
  67. # qhasm: 0r8 = *(int128 *)(0op + 128)
  68. # asm 1: movdqa 128(<0op=int64#2),>0r8=int6464#9
  69. # asm 2: movdqa 128(<0op=%rsi),>0r8=%xmm8
  70. movdqa 128(%rsi),%xmm8
  71. # qhasm: 0r9 = *(int128 *)(0op + 144)
  72. # asm 1: movdqa 144(<0op=int64#2),>0r9=int6464#10
  73. # asm 2: movdqa 144(<0op=%rsi),>0r9=%xmm9
  74. movdqa 144(%rsi),%xmm9
  75. # qhasm: 0r10 = *(int128 *)(0op + 160)
  76. # asm 1: movdqa 160(<0op=int64#2),>0r10=int6464#11
  77. # asm 2: movdqa 160(<0op=%rsi),>0r10=%xmm10
  78. movdqa 160(%rsi),%xmm10
  79. # qhasm: 0r11 = *(int128 *)(0op + 176)
  80. # asm 1: movdqa 176(<0op=int64#2),>0r11=int6464#12
  81. # asm 2: movdqa 176(<0op=%rsi),>0r11=%xmm11
  82. movdqa 176(%rsi),%xmm11
  83. # qhasm: int6464 1t0
  84. # qhasm: 1t0 = THREE_THREE
  85. # asm 1: movdqa THREE_THREE,<1t0=int6464#13
  86. # asm 2: movdqa THREE_THREE,<1t0=%xmm12
  87. movdqa THREE_THREE,%xmm12
  88. # qhasm: float6464 0r0 *= 1t0
  89. # asm 1: mulpd <1t0=int6464#13,<0r0=int6464#1
  90. # asm 2: mulpd <1t0=%xmm12,<0r0=%xmm0
  91. mulpd %xmm12,%xmm0
  92. # qhasm: float6464 0r1 *= 1t0
  93. # asm 1: mulpd <1t0=int6464#13,<0r1=int6464#2
  94. # asm 2: mulpd <1t0=%xmm12,<0r1=%xmm1
  95. mulpd %xmm12,%xmm1
  96. # qhasm: float6464 0r2 *= 1t0
  97. # asm 1: mulpd <1t0=int6464#13,<0r2=int6464#3
  98. # asm 2: mulpd <1t0=%xmm12,<0r2=%xmm2
  99. mulpd %xmm12,%xmm2
  100. # qhasm: float6464 0r3 *= 1t0
  101. # asm 1: mulpd <1t0=int6464#13,<0r3=int6464#4
  102. # asm 2: mulpd <1t0=%xmm12,<0r3=%xmm3
  103. mulpd %xmm12,%xmm3
  104. # qhasm: float6464 0r4 *= 1t0
  105. # asm 1: mulpd <1t0=int6464#13,<0r4=int6464#5
  106. # asm 2: mulpd <1t0=%xmm12,<0r4=%xmm4
  107. mulpd %xmm12,%xmm4
  108. # qhasm: float6464 0r5 *= 1t0
  109. # asm 1: mulpd <1t0=int6464#13,<0r5=int6464#6
  110. # asm 2: mulpd <1t0=%xmm12,<0r5=%xmm5
  111. mulpd %xmm12,%xmm5
  112. # qhasm: float6464 0r6 *= 1t0
  113. # asm 1: mulpd <1t0=int6464#13,<0r6=int6464#7
  114. # asm 2: mulpd <1t0=%xmm12,<0r6=%xmm6
  115. mulpd %xmm12,%xmm6
  116. # qhasm: float6464 0r7 *= 1t0
  117. # asm 1: mulpd <1t0=int6464#13,<0r7=int6464#8
  118. # asm 2: mulpd <1t0=%xmm12,<0r7=%xmm7
  119. mulpd %xmm12,%xmm7
  120. # qhasm: float6464 0r8 *= 1t0
  121. # asm 1: mulpd <1t0=int6464#13,<0r8=int6464#9
  122. # asm 2: mulpd <1t0=%xmm12,<0r8=%xmm8
  123. mulpd %xmm12,%xmm8
  124. # qhasm: float6464 0r9 *= 1t0
  125. # asm 1: mulpd <1t0=int6464#13,<0r9=int6464#10
  126. # asm 2: mulpd <1t0=%xmm12,<0r9=%xmm9
  127. mulpd %xmm12,%xmm9
  128. # qhasm: float6464 0r10 *= 1t0
  129. # asm 1: mulpd <1t0=int6464#13,<0r10=int6464#11
  130. # asm 2: mulpd <1t0=%xmm12,<0r10=%xmm10
  131. mulpd %xmm12,%xmm10
  132. # qhasm: float6464 0r11 *= 1t0
  133. # asm 1: mulpd <1t0=int6464#13,<0r11=int6464#12
  134. # asm 2: mulpd <1t0=%xmm12,<0r11=%xmm11
  135. mulpd %xmm12,%xmm11
  136. # qhasm: *(int128 *)(0rop + 0) = 0r0
  137. # asm 1: movdqa <0r0=int6464#1,0(<0rop=int64#1)
  138. # asm 2: movdqa <0r0=%xmm0,0(<0rop=%rdi)
  139. movdqa %xmm0,0(%rdi)
  140. # qhasm: *(int128 *)(0rop + 16) = 0r1
  141. # asm 1: movdqa <0r1=int6464#2,16(<0rop=int64#1)
  142. # asm 2: movdqa <0r1=%xmm1,16(<0rop=%rdi)
  143. movdqa %xmm1,16(%rdi)
  144. # qhasm: *(int128 *)(0rop + 32) = 0r2
  145. # asm 1: movdqa <0r2=int6464#3,32(<0rop=int64#1)
  146. # asm 2: movdqa <0r2=%xmm2,32(<0rop=%rdi)
  147. movdqa %xmm2,32(%rdi)
  148. # qhasm: *(int128 *)(0rop + 48) = 0r3
  149. # asm 1: movdqa <0r3=int6464#4,48(<0rop=int64#1)
  150. # asm 2: movdqa <0r3=%xmm3,48(<0rop=%rdi)
  151. movdqa %xmm3,48(%rdi)
  152. # qhasm: *(int128 *)(0rop + 64) = 0r4
  153. # asm 1: movdqa <0r4=int6464#5,64(<0rop=int64#1)
  154. # asm 2: movdqa <0r4=%xmm4,64(<0rop=%rdi)
  155. movdqa %xmm4,64(%rdi)
  156. # qhasm: *(int128 *)(0rop + 80) = 0r5
  157. # asm 1: movdqa <0r5=int6464#6,80(<0rop=int64#1)
  158. # asm 2: movdqa <0r5=%xmm5,80(<0rop=%rdi)
  159. movdqa %xmm5,80(%rdi)
  160. # qhasm: *(int128 *)(0rop + 96) = 0r6
  161. # asm 1: movdqa <0r6=int6464#7,96(<0rop=int64#1)
  162. # asm 2: movdqa <0r6=%xmm6,96(<0rop=%rdi)
  163. movdqa %xmm6,96(%rdi)
  164. # qhasm: *(int128 *)(0rop + 112) = 0r7
  165. # asm 1: movdqa <0r7=int6464#8,112(<0rop=int64#1)
  166. # asm 2: movdqa <0r7=%xmm7,112(<0rop=%rdi)
  167. movdqa %xmm7,112(%rdi)
  168. # qhasm: *(int128 *)(0rop + 128) = 0r8
  169. # asm 1: movdqa <0r8=int6464#9,128(<0rop=int64#1)
  170. # asm 2: movdqa <0r8=%xmm8,128(<0rop=%rdi)
  171. movdqa %xmm8,128(%rdi)
  172. # qhasm: *(int128 *)(0rop + 144) = 0r9
  173. # asm 1: movdqa <0r9=int6464#10,144(<0rop=int64#1)
  174. # asm 2: movdqa <0r9=%xmm9,144(<0rop=%rdi)
  175. movdqa %xmm9,144(%rdi)
  176. # qhasm: *(int128 *)(0rop + 160) = 0r10
  177. # asm 1: movdqa <0r10=int6464#11,160(<0rop=int64#1)
  178. # asm 2: movdqa <0r10=%xmm10,160(<0rop=%rdi)
  179. movdqa %xmm10,160(%rdi)
  180. # qhasm: *(int128 *)(0rop + 176) = 0r11
  181. # asm 1: movdqa <0r11=int6464#12,176(<0rop=int64#1)
  182. # asm 2: movdqa <0r11=%xmm11,176(<0rop=%rdi)
  183. movdqa %xmm11,176(%rdi)
  184. # qhasm: leave
  185. add %r11,%rsp
  186. mov %rdi,%rax
  187. mov %rsi,%rdx
  188. ret