| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249 | # File:   dclxvi-20130329/fp2e_triple.s# Author: Ruben Niederhagen, Peter Schwabe# Public Domain# qhasm: enter fp2e_triple_qhasm.text.p2align 5.globl _fp2e_triple_qhasm.globl fp2e_triple_qhasm_fp2e_triple_qhasm:fp2e_triple_qhasm:mov %rsp,%r11and $31,%r11add $0,%r11sub %r11,%rsp# qhasm: int64 0rop# qhasm: int64 0op# qhasm: input 0rop# qhasm: input 0op# qhasm: int6464 0r0# qhasm: int6464 0r1# qhasm: int6464 0r2# qhasm: int6464 0r3# qhasm: int6464 0r4# qhasm: int6464 0r5# qhasm: int6464 0r6# qhasm: int6464 0r7# qhasm: int6464 0r8# qhasm: int6464 0r9# qhasm: int6464 0r10# qhasm: int6464 0r11# qhasm: int6464 0t0# qhasm: int6464 0t1# qhasm: int6464 0t2# qhasm: int6464 0t3# qhasm: 0r0  = *(int128 *)(0op +   0)# asm 1: movdqa 0(<0op=int64#2),>0r0=int6464#1# asm 2: movdqa 0(<0op=%rsi),>0r0=%xmm0movdqa 0(%rsi),%xmm0# qhasm: 0r1  = *(int128 *)(0op +  16)# asm 1: movdqa 16(<0op=int64#2),>0r1=int6464#2# asm 2: movdqa 16(<0op=%rsi),>0r1=%xmm1movdqa 16(%rsi),%xmm1# qhasm: 0r2  = *(int128 *)(0op +  32)# asm 1: movdqa 32(<0op=int64#2),>0r2=int6464#3# asm 2: movdqa 32(<0op=%rsi),>0r2=%xmm2movdqa 32(%rsi),%xmm2# qhasm: 0r3  = *(int128 *)(0op +  48)# asm 1: movdqa 48(<0op=int64#2),>0r3=int6464#4# asm 2: movdqa 48(<0op=%rsi),>0r3=%xmm3movdqa 48(%rsi),%xmm3# qhasm: 0r4  = *(int128 *)(0op +  64)# asm 1: movdqa 64(<0op=int64#2),>0r4=int6464#5# asm 2: movdqa 64(<0op=%rsi),>0r4=%xmm4movdqa 64(%rsi),%xmm4# qhasm: 0r5  = *(int128 *)(0op +  80)# asm 1: movdqa 80(<0op=int64#2),>0r5=int6464#6# asm 2: movdqa 80(<0op=%rsi),>0r5=%xmm5movdqa 80(%rsi),%xmm5# qhasm: 0r6  = *(int128 *)(0op +  96)# asm 1: movdqa 96(<0op=int64#2),>0r6=int6464#7# asm 2: movdqa 96(<0op=%rsi),>0r6=%xmm6movdqa 96(%rsi),%xmm6# qhasm: 0r7  = *(int128 *)(0op + 112)# asm 1: movdqa 112(<0op=int64#2),>0r7=int6464#8# asm 2: movdqa 112(<0op=%rsi),>0r7=%xmm7movdqa 112(%rsi),%xmm7# qhasm: 0r8  = *(int128 *)(0op + 128)# asm 1: movdqa 128(<0op=int64#2),>0r8=int6464#9# asm 2: movdqa 128(<0op=%rsi),>0r8=%xmm8movdqa 128(%rsi),%xmm8# qhasm: 0r9  = *(int128 *)(0op + 144)# asm 1: movdqa 144(<0op=int64#2),>0r9=int6464#10# asm 2: movdqa 144(<0op=%rsi),>0r9=%xmm9movdqa 144(%rsi),%xmm9# qhasm: 0r10 = *(int128 *)(0op + 160)# asm 1: movdqa 160(<0op=int64#2),>0r10=int6464#11# asm 2: movdqa 160(<0op=%rsi),>0r10=%xmm10movdqa 160(%rsi),%xmm10# qhasm: 0r11 = *(int128 *)(0op + 176)# asm 1: movdqa 176(<0op=int64#2),>0r11=int6464#12# asm 2: movdqa 176(<0op=%rsi),>0r11=%xmm11movdqa 176(%rsi),%xmm11# qhasm: int6464 1t0# qhasm: 1t0 = THREE_THREE# asm 1: movdqa THREE_THREE,<1t0=int6464#13# asm 2: movdqa THREE_THREE,<1t0=%xmm12movdqa THREE_THREE,%xmm12# qhasm: float6464 0r0  *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r0=int6464#1# asm 2: mulpd <1t0=%xmm12,<0r0=%xmm0mulpd %xmm12,%xmm0# qhasm: float6464 0r1  *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r1=int6464#2# asm 2: mulpd <1t0=%xmm12,<0r1=%xmm1mulpd %xmm12,%xmm1# qhasm: float6464 0r2  *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r2=int6464#3# asm 2: mulpd <1t0=%xmm12,<0r2=%xmm2mulpd %xmm12,%xmm2# qhasm: float6464 0r3  *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r3=int6464#4# asm 2: mulpd <1t0=%xmm12,<0r3=%xmm3mulpd %xmm12,%xmm3# qhasm: float6464 0r4  *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r4=int6464#5# asm 2: mulpd <1t0=%xmm12,<0r4=%xmm4mulpd %xmm12,%xmm4# qhasm: float6464 0r5  *= 1t0 # asm 1: mulpd <1t0=int6464#13,<0r5=int6464#6# asm 2: mulpd <1t0=%xmm12,<0r5=%xmm5mulpd %xmm12,%xmm5# qhasm: float6464 0r6  *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r6=int6464#7# asm 2: mulpd <1t0=%xmm12,<0r6=%xmm6mulpd %xmm12,%xmm6# qhasm: float6464 0r7  *= 1t0 # asm 1: mulpd <1t0=int6464#13,<0r7=int6464#8# asm 2: mulpd <1t0=%xmm12,<0r7=%xmm7mulpd %xmm12,%xmm7# qhasm: float6464 0r8  *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r8=int6464#9# asm 2: mulpd <1t0=%xmm12,<0r8=%xmm8mulpd %xmm12,%xmm8# qhasm: float6464 0r9  *= 1t0 # asm 1: mulpd <1t0=int6464#13,<0r9=int6464#10# asm 2: mulpd <1t0=%xmm12,<0r9=%xmm9mulpd %xmm12,%xmm9# qhasm: float6464 0r10 *= 1t0# asm 1: mulpd <1t0=int6464#13,<0r10=int6464#11# asm 2: mulpd <1t0=%xmm12,<0r10=%xmm10mulpd %xmm12,%xmm10# qhasm: float6464 0r11 *= 1t0 # asm 1: mulpd <1t0=int6464#13,<0r11=int6464#12# asm 2: mulpd <1t0=%xmm12,<0r11=%xmm11mulpd %xmm12,%xmm11# qhasm: *(int128 *)(0rop +   0) =  0r0# asm 1: movdqa <0r0=int6464#1,0(<0rop=int64#1)# asm 2: movdqa <0r0=%xmm0,0(<0rop=%rdi)movdqa %xmm0,0(%rdi)# qhasm: *(int128 *)(0rop +  16) =  0r1# asm 1: movdqa <0r1=int6464#2,16(<0rop=int64#1)# asm 2: movdqa <0r1=%xmm1,16(<0rop=%rdi)movdqa %xmm1,16(%rdi)# qhasm: *(int128 *)(0rop +  32) =  0r2# asm 1: movdqa <0r2=int6464#3,32(<0rop=int64#1)# asm 2: movdqa <0r2=%xmm2,32(<0rop=%rdi)movdqa %xmm2,32(%rdi)# qhasm: *(int128 *)(0rop +  48) =  0r3# asm 1: movdqa <0r3=int6464#4,48(<0rop=int64#1)# asm 2: movdqa <0r3=%xmm3,48(<0rop=%rdi)movdqa %xmm3,48(%rdi)# qhasm: *(int128 *)(0rop +  64) =  0r4# asm 1: movdqa <0r4=int6464#5,64(<0rop=int64#1)# asm 2: movdqa <0r4=%xmm4,64(<0rop=%rdi)movdqa %xmm4,64(%rdi)# qhasm: *(int128 *)(0rop +  80) =  0r5# asm 1: movdqa <0r5=int6464#6,80(<0rop=int64#1)# asm 2: movdqa <0r5=%xmm5,80(<0rop=%rdi)movdqa %xmm5,80(%rdi)# qhasm: *(int128 *)(0rop +  96) =  0r6# asm 1: movdqa <0r6=int6464#7,96(<0rop=int64#1)# asm 2: movdqa <0r6=%xmm6,96(<0rop=%rdi)movdqa %xmm6,96(%rdi)# qhasm: *(int128 *)(0rop + 112) =  0r7# asm 1: movdqa <0r7=int6464#8,112(<0rop=int64#1)# asm 2: movdqa <0r7=%xmm7,112(<0rop=%rdi)movdqa %xmm7,112(%rdi)# qhasm: *(int128 *)(0rop + 128) =  0r8# asm 1: movdqa <0r8=int6464#9,128(<0rop=int64#1)# asm 2: movdqa <0r8=%xmm8,128(<0rop=%rdi)movdqa %xmm8,128(%rdi)# qhasm: *(int128 *)(0rop + 144) =  0r9# asm 1: movdqa <0r9=int6464#10,144(<0rop=int64#1)# asm 2: movdqa <0r9=%xmm9,144(<0rop=%rdi)movdqa %xmm9,144(%rdi)# qhasm: *(int128 *)(0rop + 160) = 0r10# asm 1: movdqa <0r10=int6464#11,160(<0rop=int64#1)# asm 2: movdqa <0r10=%xmm10,160(<0rop=%rdi)movdqa %xmm10,160(%rdi)# qhasm: *(int128 *)(0rop + 176) = 0r11# asm 1: movdqa <0r11=int6464#12,176(<0rop=int64#1)# asm 2: movdqa <0r11=%xmm11,176(<0rop=%rdi)movdqa %xmm11,176(%rdi)# qhasm: leaveadd %r11,%rspmov %rdi,%raxmov %rsi,%rdxret
 |