fp2e_square.s 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362
  1. # File: dclxvi-20130329/fp2e_square.s
  2. # Author: Ruben Niederhagen, Peter Schwabe
  3. # Public Domain
  4. # qhasm: int64 rop
  5. # qhasm: int64 op
  6. # qhasm: input rop
  7. # qhasm: input op
  8. # qhasm: int6464 r0
  9. # qhasm: int6464 r1
  10. # qhasm: int6464 r2
  11. # qhasm: int6464 r3
  12. # qhasm: int6464 r4
  13. # qhasm: int6464 r5
  14. # qhasm: int6464 r6
  15. # qhasm: int6464 r7
  16. # qhasm: int6464 r8
  17. # qhasm: int6464 r9
  18. # qhasm: int6464 r10
  19. # qhasm: int6464 r11
  20. # qhasm: int6464 r12
  21. # qhasm: int6464 r13
  22. # qhasm: int6464 r14
  23. # qhasm: int6464 r15
  24. # qhasm: int6464 r16
  25. # qhasm: int6464 r17
  26. # qhasm: int6464 r18
  27. # qhasm: int6464 r19
  28. # qhasm: int6464 r20
  29. # qhasm: int6464 r21
  30. # qhasm: int6464 r22
  31. # qhasm: int6464 tmp0
  32. # qhasm: int6464 tmp1
  33. # qhasm: int6464 tmp2
  34. # qhasm: int6464 tmp3
  35. # qhasm: int6464 tmp4
  36. # qhasm: int6464 tmp5
  37. # qhasm: int6464 tmp6
  38. # qhasm: int6464 tmp7
  39. # qhasm: int6464 tmp8
  40. # qhasm: int6464 tmp9
  41. # qhasm: int6464 tmp10
  42. # qhasm: int6464 tmp11
  43. # qhasm: int64 t1p
  44. # qhasm: int64 t2p
  45. # qhasm: int64 rp
  46. # qhasm: int6464 0yoff
  47. # qhasm: int6464 t0
  48. # qhasm: int6464 t1
  49. # qhasm: int6464 t2
  50. # qhasm: int6464 t3
  51. # qhasm: int6464 t4
  52. # qhasm: int6464 t5
  53. # qhasm: int6464 t6
  54. # qhasm: int6464 t7
  55. # qhasm: int6464 t8
  56. # qhasm: int6464 t9
  57. # qhasm: int6464 t10
  58. # qhasm: int6464 t11
  59. # qhasm: int6464 t12
  60. # qhasm: int6464 t13
  61. # qhasm: int6464 t14
  62. # qhasm: int6464 t15
  63. # qhasm: int6464 t16
  64. # qhasm: int6464 t17
  65. # qhasm: int6464 t18
  66. # qhasm: int6464 t19
  67. # qhasm: int6464 t20
  68. # qhasm: int6464 t21
  69. # qhasm: int6464 t22
  70. # qhasm: int6464 ab0
  71. # qhasm: int6464 ab1
  72. # qhasm: int6464 ab2
  73. # qhasm: int6464 ab3
  74. # qhasm: int6464 ab4
  75. # qhasm: int6464 ab5
  76. # qhasm: int6464 ab6
  77. # qhasm: int6464 ab7
  78. # qhasm: int6464 ab8
  79. # qhasm: int6464 ab9
  80. # qhasm: int6464 ab10
  81. # qhasm: int6464 ab11
  82. # qhasm: int6464 ab0six
  83. # qhasm: int6464 ab1six
  84. # qhasm: int6464 ab2six
  85. # qhasm: int6464 ab3six
  86. # qhasm: int6464 ab4six
  87. # qhasm: int6464 ab5six
  88. # qhasm: int6464 ab6six
  89. # qhasm: int6464 ab7six
  90. # qhasm: int6464 ab8six
  91. # qhasm: int6464 ab9six
  92. # qhasm: int6464 ab10six
  93. # qhasm: int6464 ab11six
  94. # qhasm: int64 myp
  95. # qhasm: int6464 round
  96. # qhasm: int6464 carry
  97. # qhasm: int6464 2t6
  98. # qhasm: stack6144 mystack
  99. # qhasm: enter fp2e_square_qhasm
  100. .text
  101. .p2align 5
  102. .globl _fp2e_square_qhasm
  103. .globl fp2e_square_qhasm
  104. _fp2e_square_qhasm:
  105. fp2e_square_qhasm:
  106. mov %rsp,%r11
  107. and $31,%r11
  108. add $768,%r11
  109. sub %r11,%rsp
  110. # qhasm: myp = &mystack
  111. # asm 1: leaq <mystack=stack6144#1,>myp=int64#3
  112. # asm 2: leaq <mystack=0(%rsp),>myp=%rdx
  113. leaq 0(%rsp),%rdx
  114. # qhasm: r0 = *(int128 *)(op + 0)
  115. # asm 1: movdqa 0(<op=int64#2),>r0=int6464#1
  116. # asm 2: movdqa 0(<op=%rsi),>r0=%xmm0
  117. movdqa 0(%rsi),%xmm0
  118. # qhasm: tmp0 = r0
  119. # asm 1: movdqa <r0=int6464#1,>tmp0=int6464#2
  120. # asm 2: movdqa <r0=%xmm0,>tmp0=%xmm1
  121. movdqa %xmm0,%xmm1
  122. # qhasm: tmp0 = shuffle float64 of tmp0 and tmp0 by 0x1
  123. # asm 1: shufpd $0x1,<tmp0=int6464#2,<tmp0=int6464#2
  124. # asm 2: shufpd $0x1,<tmp0=%xmm1,<tmp0=%xmm1
  125. shufpd $0x1,%xmm1,%xmm1
  126. # qhasm: float6464 r0[0] -= tmp0[0]
  127. # asm 1: subsd <tmp0=int6464#2,<r0=int6464#1
  128. # asm 2: subsd <tmp0=%xmm1,<r0=%xmm0
  129. subsd %xmm1,%xmm0
  130. # qhasm: *(int128 *)(myp + 0) = r0
  131. # asm 1: movdqa <r0=int6464#1,0(<myp=int64#3)
  132. # asm 2: movdqa <r0=%xmm0,0(<myp=%rdx)
  133. movdqa %xmm0,0(%rdx)
  134. # qhasm: r0 = tmp0
  135. # asm 1: movdqa <tmp0=int6464#2,>r0=int6464#1
  136. # asm 2: movdqa <tmp0=%xmm1,>r0=%xmm0
  137. movdqa %xmm1,%xmm0
  138. # qhasm: r0 = unpack high double of r0 and r0
  139. # asm 1: unpckhpd <r0=int6464#1,<r0=int6464#1
  140. # asm 2: unpckhpd <r0=%xmm0,<r0=%xmm0
  141. unpckhpd %xmm0,%xmm0
  142. # qhasm: float6464 tmp0 += r0
  143. # asm 1: addpd <r0=int6464#1,<tmp0=int6464#2
  144. # asm 2: addpd <r0=%xmm0,<tmp0=%xmm1
  145. addpd %xmm0,%xmm1
  146. # qhasm: *(int128 *)(myp + 192) = tmp0
  147. # asm 1: movdqa <tmp0=int6464#2,192(<myp=int64#3)
  148. # asm 2: movdqa <tmp0=%xmm1,192(<myp=%rdx)
  149. movdqa %xmm1,192(%rdx)
  150. # qhasm: r1 = *(int128 *)(op + 16)
  151. # asm 1: movdqa 16(<op=int64#2),>r1=int6464#1
  152. # asm 2: movdqa 16(<op=%rsi),>r1=%xmm0
  153. movdqa 16(%rsi),%xmm0
  154. # qhasm: tmp1 = r1
  155. # asm 1: movdqa <r1=int6464#1,>tmp1=int6464#2
  156. # asm 2: movdqa <r1=%xmm0,>tmp1=%xmm1
  157. movdqa %xmm0,%xmm1
  158. # qhasm: tmp1 = shuffle float64 of tmp1 and tmp1 by 0x1
  159. # asm 1: shufpd $0x1,<tmp1=int6464#2,<tmp1=int6464#2
  160. # asm 2: shufpd $0x1,<tmp1=%xmm1,<tmp1=%xmm1
  161. shufpd $0x1,%xmm1,%xmm1
  162. # qhasm: float6464 r1[0] -= tmp1[0]
  163. # asm 1: subsd <tmp1=int6464#2,<r1=int6464#1
  164. # asm 2: subsd <tmp1=%xmm1,<r1=%xmm0
  165. subsd %xmm1,%xmm0
  166. # qhasm: *(int128 *)(myp + 16) = r1
  167. # asm 1: movdqa <r1=int6464#1,16(<myp=int64#3)
  168. # asm 2: movdqa <r1=%xmm0,16(<myp=%rdx)
  169. movdqa %xmm0,16(%rdx)
  170. # qhasm: r1 = tmp1
  171. # asm 1: movdqa <tmp1=int6464#2,>r1=int6464#1
  172. # asm 2: movdqa <tmp1=%xmm1,>r1=%xmm0
  173. movdqa %xmm1,%xmm0
  174. # qhasm: r1 = unpack high double of r1 and r1
  175. # asm 1: unpckhpd <r1=int6464#1,<r1=int6464#1
  176. # asm 2: unpckhpd <r1=%xmm0,<r1=%xmm0
  177. unpckhpd %xmm0,%xmm0
  178. # qhasm: float6464 tmp1 += r1
  179. # asm 1: addpd <r1=int6464#1,<tmp1=int6464#2
  180. # asm 2: addpd <r1=%xmm0,<tmp1=%xmm1
  181. addpd %xmm0,%xmm1
  182. # qhasm: *(int128 *)(myp + 208) = tmp1
  183. # asm 1: movdqa <tmp1=int6464#2,208(<myp=int64#3)
  184. # asm 2: movdqa <tmp1=%xmm1,208(<myp=%rdx)
  185. movdqa %xmm1,208(%rdx)
  186. # qhasm: r2 = *(int128 *)(op + 32)
  187. # asm 1: movdqa 32(<op=int64#2),>r2=int6464#1
  188. # asm 2: movdqa 32(<op=%rsi),>r2=%xmm0
  189. movdqa 32(%rsi),%xmm0
  190. # qhasm: tmp2 = r2
  191. # asm 1: movdqa <r2=int6464#1,>tmp2=int6464#2
  192. # asm 2: movdqa <r2=%xmm0,>tmp2=%xmm1
  193. movdqa %xmm0,%xmm1
  194. # qhasm: tmp2 = shuffle float64 of tmp2 and tmp2 by 0x1
  195. # asm 1: shufpd $0x1,<tmp2=int6464#2,<tmp2=int6464#2
  196. # asm 2: shufpd $0x1,<tmp2=%xmm1,<tmp2=%xmm1
  197. shufpd $0x1,%xmm1,%xmm1
  198. # qhasm: float6464 r2[0] -= tmp2[0]
  199. # asm 1: subsd <tmp2=int6464#2,<r2=int6464#1
  200. # asm 2: subsd <tmp2=%xmm1,<r2=%xmm0
  201. subsd %xmm1,%xmm0
  202. # qhasm: *(int128 *)(myp + 32) = r2
  203. # asm 1: movdqa <r2=int6464#1,32(<myp=int64#3)
  204. # asm 2: movdqa <r2=%xmm0,32(<myp=%rdx)
  205. movdqa %xmm0,32(%rdx)
  206. # qhasm: r2 = tmp2
  207. # asm 1: movdqa <tmp2=int6464#2,>r2=int6464#1
  208. # asm 2: movdqa <tmp2=%xmm1,>r2=%xmm0
  209. movdqa %xmm1,%xmm0
  210. # qhasm: r2 = unpack high double of r2 and r2
  211. # asm 1: unpckhpd <r2=int6464#1,<r2=int6464#1
  212. # asm 2: unpckhpd <r2=%xmm0,<r2=%xmm0
  213. unpckhpd %xmm0,%xmm0
  214. # qhasm: float6464 tmp2 += r2
  215. # asm 1: addpd <r2=int6464#1,<tmp2=int6464#2
  216. # asm 2: addpd <r2=%xmm0,<tmp2=%xmm1
  217. addpd %xmm0,%xmm1
  218. # qhasm: *(int128 *)(myp + 224) = tmp2
  219. # asm 1: movdqa <tmp2=int6464#2,224(<myp=int64#3)
  220. # asm 2: movdqa <tmp2=%xmm1,224(<myp=%rdx)
  221. movdqa %xmm1,224(%rdx)
  222. # qhasm: r3 = *(int128 *)(op + 48)
  223. # asm 1: movdqa 48(<op=int64#2),>r3=int6464#1
  224. # asm 2: movdqa 48(<op=%rsi),>r3=%xmm0
  225. movdqa 48(%rsi),%xmm0
  226. # qhasm: tmp3 = r3
  227. # asm 1: movdqa <r3=int6464#1,>tmp3=int6464#2
  228. # asm 2: movdqa <r3=%xmm0,>tmp3=%xmm1
  229. movdqa %xmm0,%xmm1
  230. # qhasm: tmp3 = shuffle float64 of tmp3 and tmp3 by 0x1
  231. # asm 1: shufpd $0x1,<tmp3=int6464#2,<tmp3=int6464#2
  232. # asm 2: shufpd $0x1,<tmp3=%xmm1,<tmp3=%xmm1
  233. shufpd $0x1,%xmm1,%xmm1
  234. # qhasm: float6464 r3[0] -= tmp3[0]
  235. # asm 1: subsd <tmp3=int6464#2,<r3=int6464#1
  236. # asm 2: subsd <tmp3=%xmm1,<r3=%xmm0
  237. subsd %xmm1,%xmm0
  238. # qhasm: *(int128 *)(myp + 48) = r3
  239. # asm 1: movdqa <r3=int6464#1,48(<myp=int64#3)
  240. # asm 2: movdqa <r3=%xmm0,48(<myp=%rdx)
  241. movdqa %xmm0,48(%rdx)
  242. # qhasm: r3 = tmp3
  243. # asm 1: movdqa <tmp3=int6464#2,>r3=int6464#1
  244. # asm 2: movdqa <tmp3=%xmm1,>r3=%xmm0
  245. movdqa %xmm1,%xmm0
  246. # qhasm: r3 = unpack high double of r3 and r3
  247. # asm 1: unpckhpd <r3=int6464#1,<r3=int6464#1
  248. # asm 2: unpckhpd <r3=%xmm0,<r3=%xmm0
  249. unpckhpd %xmm0,%xmm0
  250. # qhasm: float6464 tmp3 += r3
  251. # asm 1: addpd <r3=int6464#1,<tmp3=int6464#2
  252. # asm 2: addpd <r3=%xmm0,<tmp3=%xmm1
  253. addpd %xmm0,%xmm1
  254. # qhasm: *(int128 *)(myp + 240) = tmp3
  255. # asm 1: movdqa <tmp3=int6464#2,240(<myp=int64#3)
  256. # asm 2: movdqa <tmp3=%xmm1,240(<myp=%rdx)
  257. movdqa %xmm1,240(%rdx)
  258. # qhasm: r4 = *(int128 *)(op + 64)
  259. # asm 1: movdqa 64(<op=int64#2),>r4=int6464#1
  260. # asm 2: movdqa 64(<op=%rsi),>r4=%xmm0
  261. movdqa 64(%rsi),%xmm0
  262. # qhasm: tmp4 = r4
  263. # asm 1: movdqa <r4=int6464#1,>tmp4=int6464#2
  264. # asm 2: movdqa <r4=%xmm0,>tmp4=%xmm1
  265. movdqa %xmm0,%xmm1
  266. # qhasm: tmp4 = shuffle float64 of tmp4 and tmp4 by 0x1
  267. # asm 1: shufpd $0x1,<tmp4=int6464#2,<tmp4=int6464#2
  268. # asm 2: shufpd $0x1,<tmp4=%xmm1,<tmp4=%xmm1
  269. shufpd $0x1,%xmm1,%xmm1
  270. # qhasm: float6464 r4[0] -= tmp4[0]
  271. # asm 1: subsd <tmp4=int6464#2,<r4=int6464#1
  272. # asm 2: subsd <tmp4=%xmm1,<r4=%xmm0
  273. subsd %xmm1,%xmm0
  274. # qhasm: *(int128 *)(myp + 64) = r4
  275. # asm 1: movdqa <r4=int6464#1,64(<myp=int64#3)
  276. # asm 2: movdqa <r4=%xmm0,64(<myp=%rdx)
  277. movdqa %xmm0,64(%rdx)
  278. # qhasm: r4 = tmp4
  279. # asm 1: movdqa <tmp4=int6464#2,>r4=int6464#1
  280. # asm 2: movdqa <tmp4=%xmm1,>r4=%xmm0
  281. movdqa %xmm1,%xmm0
  282. # qhasm: r4 = unpack high double of r4 and r4
  283. # asm 1: unpckhpd <r4=int6464#1,<r4=int6464#1
  284. # asm 2: unpckhpd <r4=%xmm0,<r4=%xmm0
  285. unpckhpd %xmm0,%xmm0
  286. # qhasm: float6464 tmp4 += r4
  287. # asm 1: addpd <r4=int6464#1,<tmp4=int6464#2
  288. # asm 2: addpd <r4=%xmm0,<tmp4=%xmm1
  289. addpd %xmm0,%xmm1
  290. # qhasm: *(int128 *)(myp + 256) = tmp4
  291. # asm 1: movdqa <tmp4=int6464#2,256(<myp=int64#3)
  292. # asm 2: movdqa <tmp4=%xmm1,256(<myp=%rdx)
  293. movdqa %xmm1,256(%rdx)
  294. # qhasm: r5 = *(int128 *)(op + 80)
  295. # asm 1: movdqa 80(<op=int64#2),>r5=int6464#1
  296. # asm 2: movdqa 80(<op=%rsi),>r5=%xmm0
  297. movdqa 80(%rsi),%xmm0
  298. # qhasm: tmp5 = r5
  299. # asm 1: movdqa <r5=int6464#1,>tmp5=int6464#2
  300. # asm 2: movdqa <r5=%xmm0,>tmp5=%xmm1
  301. movdqa %xmm0,%xmm1
  302. # qhasm: tmp5 = shuffle float64 of tmp5 and tmp5 by 0x1
  303. # asm 1: shufpd $0x1,<tmp5=int6464#2,<tmp5=int6464#2
  304. # asm 2: shufpd $0x1,<tmp5=%xmm1,<tmp5=%xmm1
  305. shufpd $0x1,%xmm1,%xmm1
  306. # qhasm: float6464 r5[0] -= tmp5[0]
  307. # asm 1: subsd <tmp5=int6464#2,<r5=int6464#1
  308. # asm 2: subsd <tmp5=%xmm1,<r5=%xmm0
  309. subsd %xmm1,%xmm0
  310. # qhasm: *(int128 *)(myp + 80) = r5
  311. # asm 1: movdqa <r5=int6464#1,80(<myp=int64#3)
  312. # asm 2: movdqa <r5=%xmm0,80(<myp=%rdx)
  313. movdqa %xmm0,80(%rdx)
  314. # qhasm: r5 = tmp5
  315. # asm 1: movdqa <tmp5=int6464#2,>r5=int6464#1
  316. # asm 2: movdqa <tmp5=%xmm1,>r5=%xmm0
  317. movdqa %xmm1,%xmm0
  318. # qhasm: r5 = unpack high double of r5 and r5
  319. # asm 1: unpckhpd <r5=int6464#1,<r5=int6464#1
  320. # asm 2: unpckhpd <r5=%xmm0,<r5=%xmm0
  321. unpckhpd %xmm0,%xmm0
  322. # qhasm: float6464 tmp5 += r5
  323. # asm 1: addpd <r5=int6464#1,<tmp5=int6464#2
  324. # asm 2: addpd <r5=%xmm0,<tmp5=%xmm1
  325. addpd %xmm0,%xmm1
  326. # qhasm: *(int128 *)(myp + 272) = tmp5
  327. # asm 1: movdqa <tmp5=int6464#2,272(<myp=int64#3)
  328. # asm 2: movdqa <tmp5=%xmm1,272(<myp=%rdx)
  329. movdqa %xmm1,272(%rdx)
  330. # qhasm: r6 = *(int128 *)(op + 96)
  331. # asm 1: movdqa 96(<op=int64#2),>r6=int6464#1
  332. # asm 2: movdqa 96(<op=%rsi),>r6=%xmm0
  333. movdqa 96(%rsi),%xmm0
  334. # qhasm: tmp6 = r6
  335. # asm 1: movdqa <r6=int6464#1,>tmp6=int6464#2
  336. # asm 2: movdqa <r6=%xmm0,>tmp6=%xmm1
  337. movdqa %xmm0,%xmm1
  338. # qhasm: tmp6 = shuffle float64 of tmp6 and tmp6 by 0x1
  339. # asm 1: shufpd $0x1,<tmp6=int6464#2,<tmp6=int6464#2
  340. # asm 2: shufpd $0x1,<tmp6=%xmm1,<tmp6=%xmm1
  341. shufpd $0x1,%xmm1,%xmm1
  342. # qhasm: float6464 r6[0] -= tmp6[0]
  343. # asm 1: subsd <tmp6=int6464#2,<r6=int6464#1
  344. # asm 2: subsd <tmp6=%xmm1,<r6=%xmm0
  345. subsd %xmm1,%xmm0
  346. # qhasm: *(int128 *)(myp + 96) = r6
  347. # asm 1: movdqa <r6=int6464#1,96(<myp=int64#3)
  348. # asm 2: movdqa <r6=%xmm0,96(<myp=%rdx)
  349. movdqa %xmm0,96(%rdx)
  350. # qhasm: r6 = tmp6
  351. # asm 1: movdqa <tmp6=int6464#2,>r6=int6464#1
  352. # asm 2: movdqa <tmp6=%xmm1,>r6=%xmm0
  353. movdqa %xmm1,%xmm0
  354. # qhasm: r6 = unpack high double of r6 and r6
  355. # asm 1: unpckhpd <r6=int6464#1,<r6=int6464#1
  356. # asm 2: unpckhpd <r6=%xmm0,<r6=%xmm0
  357. unpckhpd %xmm0,%xmm0
  358. # qhasm: float6464 tmp6 += r6
  359. # asm 1: addpd <r6=int6464#1,<tmp6=int6464#2
  360. # asm 2: addpd <r6=%xmm0,<tmp6=%xmm1
  361. addpd %xmm0,%xmm1
  362. # qhasm: *(int128 *)(myp + 288) = tmp6
  363. # asm 1: movdqa <tmp6=int6464#2,288(<myp=int64#3)
  364. # asm 2: movdqa <tmp6=%xmm1,288(<myp=%rdx)
  365. movdqa %xmm1,288(%rdx)
  366. # qhasm: r7 = *(int128 *)(op + 112)
  367. # asm 1: movdqa 112(<op=int64#2),>r7=int6464#1
  368. # asm 2: movdqa 112(<op=%rsi),>r7=%xmm0
  369. movdqa 112(%rsi),%xmm0
  370. # qhasm: tmp7 = r7
  371. # asm 1: movdqa <r7=int6464#1,>tmp7=int6464#2
  372. # asm 2: movdqa <r7=%xmm0,>tmp7=%xmm1
  373. movdqa %xmm0,%xmm1
  374. # qhasm: tmp7 = shuffle float64 of tmp7 and tmp7 by 0x1
  375. # asm 1: shufpd $0x1,<tmp7=int6464#2,<tmp7=int6464#2
  376. # asm 2: shufpd $0x1,<tmp7=%xmm1,<tmp7=%xmm1
  377. shufpd $0x1,%xmm1,%xmm1
  378. # qhasm: float6464 r7[0] -= tmp7[0]
  379. # asm 1: subsd <tmp7=int6464#2,<r7=int6464#1
  380. # asm 2: subsd <tmp7=%xmm1,<r7=%xmm0
  381. subsd %xmm1,%xmm0
  382. # qhasm: *(int128 *)(myp + 112) = r7
  383. # asm 1: movdqa <r7=int6464#1,112(<myp=int64#3)
  384. # asm 2: movdqa <r7=%xmm0,112(<myp=%rdx)
  385. movdqa %xmm0,112(%rdx)
  386. # qhasm: r7 = tmp7
  387. # asm 1: movdqa <tmp7=int6464#2,>r7=int6464#1
  388. # asm 2: movdqa <tmp7=%xmm1,>r7=%xmm0
  389. movdqa %xmm1,%xmm0
  390. # qhasm: r7 = unpack high double of r7 and r7
  391. # asm 1: unpckhpd <r7=int6464#1,<r7=int6464#1
  392. # asm 2: unpckhpd <r7=%xmm0,<r7=%xmm0
  393. unpckhpd %xmm0,%xmm0
  394. # qhasm: float6464 tmp7 += r7
  395. # asm 1: addpd <r7=int6464#1,<tmp7=int6464#2
  396. # asm 2: addpd <r7=%xmm0,<tmp7=%xmm1
  397. addpd %xmm0,%xmm1
  398. # qhasm: *(int128 *)(myp + 304) = tmp7
  399. # asm 1: movdqa <tmp7=int6464#2,304(<myp=int64#3)
  400. # asm 2: movdqa <tmp7=%xmm1,304(<myp=%rdx)
  401. movdqa %xmm1,304(%rdx)
  402. # qhasm: r8 = *(int128 *)(op + 128)
  403. # asm 1: movdqa 128(<op=int64#2),>r8=int6464#1
  404. # asm 2: movdqa 128(<op=%rsi),>r8=%xmm0
  405. movdqa 128(%rsi),%xmm0
  406. # qhasm: tmp8 = r8
  407. # asm 1: movdqa <r8=int6464#1,>tmp8=int6464#2
  408. # asm 2: movdqa <r8=%xmm0,>tmp8=%xmm1
  409. movdqa %xmm0,%xmm1
  410. # qhasm: tmp8 = shuffle float64 of tmp8 and tmp8 by 0x1
  411. # asm 1: shufpd $0x1,<tmp8=int6464#2,<tmp8=int6464#2
  412. # asm 2: shufpd $0x1,<tmp8=%xmm1,<tmp8=%xmm1
  413. shufpd $0x1,%xmm1,%xmm1
  414. # qhasm: float6464 r8[0] -= tmp8[0]
  415. # asm 1: subsd <tmp8=int6464#2,<r8=int6464#1
  416. # asm 2: subsd <tmp8=%xmm1,<r8=%xmm0
  417. subsd %xmm1,%xmm0
  418. # qhasm: *(int128 *)(myp + 128) = r8
  419. # asm 1: movdqa <r8=int6464#1,128(<myp=int64#3)
  420. # asm 2: movdqa <r8=%xmm0,128(<myp=%rdx)
  421. movdqa %xmm0,128(%rdx)
  422. # qhasm: r8 = tmp8
  423. # asm 1: movdqa <tmp8=int6464#2,>r8=int6464#1
  424. # asm 2: movdqa <tmp8=%xmm1,>r8=%xmm0
  425. movdqa %xmm1,%xmm0
  426. # qhasm: r8 = unpack high double of r8 and r8
  427. # asm 1: unpckhpd <r8=int6464#1,<r8=int6464#1
  428. # asm 2: unpckhpd <r8=%xmm0,<r8=%xmm0
  429. unpckhpd %xmm0,%xmm0
  430. # qhasm: float6464 tmp8 += r8
  431. # asm 1: addpd <r8=int6464#1,<tmp8=int6464#2
  432. # asm 2: addpd <r8=%xmm0,<tmp8=%xmm1
  433. addpd %xmm0,%xmm1
  434. # qhasm: *(int128 *)(myp + 320) = tmp8
  435. # asm 1: movdqa <tmp8=int6464#2,320(<myp=int64#3)
  436. # asm 2: movdqa <tmp8=%xmm1,320(<myp=%rdx)
  437. movdqa %xmm1,320(%rdx)
  438. # qhasm: r9 = *(int128 *)(op + 144)
  439. # asm 1: movdqa 144(<op=int64#2),>r9=int6464#1
  440. # asm 2: movdqa 144(<op=%rsi),>r9=%xmm0
  441. movdqa 144(%rsi),%xmm0
  442. # qhasm: tmp9 = r9
  443. # asm 1: movdqa <r9=int6464#1,>tmp9=int6464#2
  444. # asm 2: movdqa <r9=%xmm0,>tmp9=%xmm1
  445. movdqa %xmm0,%xmm1
  446. # qhasm: tmp9 = shuffle float64 of tmp9 and tmp9 by 0x1
  447. # asm 1: shufpd $0x1,<tmp9=int6464#2,<tmp9=int6464#2
  448. # asm 2: shufpd $0x1,<tmp9=%xmm1,<tmp9=%xmm1
  449. shufpd $0x1,%xmm1,%xmm1
  450. # qhasm: float6464 r9[0] -= tmp9[0]
  451. # asm 1: subsd <tmp9=int6464#2,<r9=int6464#1
  452. # asm 2: subsd <tmp9=%xmm1,<r9=%xmm0
  453. subsd %xmm1,%xmm0
  454. # qhasm: *(int128 *)(myp + 144) = r9
  455. # asm 1: movdqa <r9=int6464#1,144(<myp=int64#3)
  456. # asm 2: movdqa <r9=%xmm0,144(<myp=%rdx)
  457. movdqa %xmm0,144(%rdx)
  458. # qhasm: r9 = tmp9
  459. # asm 1: movdqa <tmp9=int6464#2,>r9=int6464#1
  460. # asm 2: movdqa <tmp9=%xmm1,>r9=%xmm0
  461. movdqa %xmm1,%xmm0
  462. # qhasm: r9 = unpack high double of r9 and r9
  463. # asm 1: unpckhpd <r9=int6464#1,<r9=int6464#1
  464. # asm 2: unpckhpd <r9=%xmm0,<r9=%xmm0
  465. unpckhpd %xmm0,%xmm0
  466. # qhasm: float6464 tmp9 += r9
  467. # asm 1: addpd <r9=int6464#1,<tmp9=int6464#2
  468. # asm 2: addpd <r9=%xmm0,<tmp9=%xmm1
  469. addpd %xmm0,%xmm1
  470. # qhasm: *(int128 *)(myp + 336) = tmp9
  471. # asm 1: movdqa <tmp9=int6464#2,336(<myp=int64#3)
  472. # asm 2: movdqa <tmp9=%xmm1,336(<myp=%rdx)
  473. movdqa %xmm1,336(%rdx)
  474. # qhasm: r10 = *(int128 *)(op + 160)
  475. # asm 1: movdqa 160(<op=int64#2),>r10=int6464#1
  476. # asm 2: movdqa 160(<op=%rsi),>r10=%xmm0
  477. movdqa 160(%rsi),%xmm0
  478. # qhasm: tmp10 = r10
  479. # asm 1: movdqa <r10=int6464#1,>tmp10=int6464#2
  480. # asm 2: movdqa <r10=%xmm0,>tmp10=%xmm1
  481. movdqa %xmm0,%xmm1
  482. # qhasm: tmp10 = shuffle float64 of tmp10 and tmp10 by 0x1
  483. # asm 1: shufpd $0x1,<tmp10=int6464#2,<tmp10=int6464#2
  484. # asm 2: shufpd $0x1,<tmp10=%xmm1,<tmp10=%xmm1
  485. shufpd $0x1,%xmm1,%xmm1
  486. # qhasm: float6464 r10[0] -= tmp10[0]
  487. # asm 1: subsd <tmp10=int6464#2,<r10=int6464#1
  488. # asm 2: subsd <tmp10=%xmm1,<r10=%xmm0
  489. subsd %xmm1,%xmm0
  490. # qhasm: *(int128 *)(myp + 160) = r10
  491. # asm 1: movdqa <r10=int6464#1,160(<myp=int64#3)
  492. # asm 2: movdqa <r10=%xmm0,160(<myp=%rdx)
  493. movdqa %xmm0,160(%rdx)
  494. # qhasm: r10 = tmp10
  495. # asm 1: movdqa <tmp10=int6464#2,>r10=int6464#1
  496. # asm 2: movdqa <tmp10=%xmm1,>r10=%xmm0
  497. movdqa %xmm1,%xmm0
  498. # qhasm: r10 = unpack high double of r10 and r10
  499. # asm 1: unpckhpd <r10=int6464#1,<r10=int6464#1
  500. # asm 2: unpckhpd <r10=%xmm0,<r10=%xmm0
  501. unpckhpd %xmm0,%xmm0
  502. # qhasm: float6464 tmp10 += r10
  503. # asm 1: addpd <r10=int6464#1,<tmp10=int6464#2
  504. # asm 2: addpd <r10=%xmm0,<tmp10=%xmm1
  505. addpd %xmm0,%xmm1
  506. # qhasm: *(int128 *)(myp + 352) = tmp10
  507. # asm 1: movdqa <tmp10=int6464#2,352(<myp=int64#3)
  508. # asm 2: movdqa <tmp10=%xmm1,352(<myp=%rdx)
  509. movdqa %xmm1,352(%rdx)
  510. # qhasm: r11 = *(int128 *)(op + 176)
  511. # asm 1: movdqa 176(<op=int64#2),>r11=int6464#1
  512. # asm 2: movdqa 176(<op=%rsi),>r11=%xmm0
  513. movdqa 176(%rsi),%xmm0
  514. # qhasm: tmp11 = r11
  515. # asm 1: movdqa <r11=int6464#1,>tmp11=int6464#2
  516. # asm 2: movdqa <r11=%xmm0,>tmp11=%xmm1
  517. movdqa %xmm0,%xmm1
  518. # qhasm: tmp11 = shuffle float64 of tmp11 and tmp11 by 0x1
  519. # asm 1: shufpd $0x1,<tmp11=int6464#2,<tmp11=int6464#2
  520. # asm 2: shufpd $0x1,<tmp11=%xmm1,<tmp11=%xmm1
  521. shufpd $0x1,%xmm1,%xmm1
  522. # qhasm: float6464 r11[0] -= tmp11[0]
  523. # asm 1: subsd <tmp11=int6464#2,<r11=int6464#1
  524. # asm 2: subsd <tmp11=%xmm1,<r11=%xmm0
  525. subsd %xmm1,%xmm0
  526. # qhasm: *(int128 *)(myp + 176) = r11
  527. # asm 1: movdqa <r11=int6464#1,176(<myp=int64#3)
  528. # asm 2: movdqa <r11=%xmm0,176(<myp=%rdx)
  529. movdqa %xmm0,176(%rdx)
  530. # qhasm: r11 = tmp11
  531. # asm 1: movdqa <tmp11=int6464#2,>r11=int6464#1
  532. # asm 2: movdqa <tmp11=%xmm1,>r11=%xmm0
  533. movdqa %xmm1,%xmm0
  534. # qhasm: r11 = unpack high double of r11 and r11
  535. # asm 1: unpckhpd <r11=int6464#1,<r11=int6464#1
  536. # asm 2: unpckhpd <r11=%xmm0,<r11=%xmm0
  537. unpckhpd %xmm0,%xmm0
  538. # qhasm: float6464 tmp11 += r11
  539. # asm 1: addpd <r11=int6464#1,<tmp11=int6464#2
  540. # asm 2: addpd <r11=%xmm0,<tmp11=%xmm1
  541. addpd %xmm0,%xmm1
  542. # qhasm: *(int128 *)(myp + 368) = tmp11
  543. # asm 1: movdqa <tmp11=int6464#2,368(<myp=int64#3)
  544. # asm 2: movdqa <tmp11=%xmm1,368(<myp=%rdx)
  545. movdqa %xmm1,368(%rdx)
  546. # qhasm: t1p = myp
  547. # asm 1: mov <myp=int64#3,>t1p=int64#2
  548. # asm 2: mov <myp=%rdx,>t1p=%rsi
  549. mov %rdx,%rsi
  550. # qhasm: t2p = myp + 192
  551. # asm 1: lea 192(<myp=int64#3),>t2p=int64#4
  552. # asm 2: lea 192(<myp=%rdx),>t2p=%rcx
  553. lea 192(%rdx),%rcx
  554. # qhasm: rp = myp + 384
  555. # asm 1: lea 384(<myp=int64#3),>rp=int64#3
  556. # asm 2: lea 384(<myp=%rdx),>rp=%rdx
  557. lea 384(%rdx),%rdx
  558. # qhasm: ab0 = *(int128 *)(t1p + 0)
  559. # asm 1: movdqa 0(<t1p=int64#2),>ab0=int6464#1
  560. # asm 2: movdqa 0(<t1p=%rsi),>ab0=%xmm0
  561. movdqa 0(%rsi),%xmm0
  562. # qhasm: t0 = ab0
  563. # asm 1: movdqa <ab0=int6464#1,>t0=int6464#2
  564. # asm 2: movdqa <ab0=%xmm0,>t0=%xmm1
  565. movdqa %xmm0,%xmm1
  566. # qhasm: float6464 t0 *= *(int128 *)(t2p + 0)
  567. # asm 1: mulpd 0(<t2p=int64#4),<t0=int6464#2
  568. # asm 2: mulpd 0(<t2p=%rcx),<t0=%xmm1
  569. mulpd 0(%rcx),%xmm1
  570. # qhasm: r0 =t0
  571. # asm 1: movdqa <t0=int6464#2,>r0=int6464#2
  572. # asm 2: movdqa <t0=%xmm1,>r0=%xmm1
  573. movdqa %xmm1,%xmm1
  574. # qhasm: t1 = ab0
  575. # asm 1: movdqa <ab0=int6464#1,>t1=int6464#3
  576. # asm 2: movdqa <ab0=%xmm0,>t1=%xmm2
  577. movdqa %xmm0,%xmm2
  578. # qhasm: float6464 t1 *= *(int128 *)(t2p + 16)
  579. # asm 1: mulpd 16(<t2p=int64#4),<t1=int6464#3
  580. # asm 2: mulpd 16(<t2p=%rcx),<t1=%xmm2
  581. mulpd 16(%rcx),%xmm2
  582. # qhasm: r1 =t1
  583. # asm 1: movdqa <t1=int6464#3,>r1=int6464#3
  584. # asm 2: movdqa <t1=%xmm2,>r1=%xmm2
  585. movdqa %xmm2,%xmm2
  586. # qhasm: t2 = ab0
  587. # asm 1: movdqa <ab0=int6464#1,>t2=int6464#4
  588. # asm 2: movdqa <ab0=%xmm0,>t2=%xmm3
  589. movdqa %xmm0,%xmm3
  590. # qhasm: float6464 t2 *= *(int128 *)(t2p + 32)
  591. # asm 1: mulpd 32(<t2p=int64#4),<t2=int6464#4
  592. # asm 2: mulpd 32(<t2p=%rcx),<t2=%xmm3
  593. mulpd 32(%rcx),%xmm3
  594. # qhasm: r2 =t2
  595. # asm 1: movdqa <t2=int6464#4,>r2=int6464#4
  596. # asm 2: movdqa <t2=%xmm3,>r2=%xmm3
  597. movdqa %xmm3,%xmm3
  598. # qhasm: t3 = ab0
  599. # asm 1: movdqa <ab0=int6464#1,>t3=int6464#5
  600. # asm 2: movdqa <ab0=%xmm0,>t3=%xmm4
  601. movdqa %xmm0,%xmm4
  602. # qhasm: float6464 t3 *= *(int128 *)(t2p + 48)
  603. # asm 1: mulpd 48(<t2p=int64#4),<t3=int6464#5
  604. # asm 2: mulpd 48(<t2p=%rcx),<t3=%xmm4
  605. mulpd 48(%rcx),%xmm4
  606. # qhasm: r3 =t3
  607. # asm 1: movdqa <t3=int6464#5,>r3=int6464#5
  608. # asm 2: movdqa <t3=%xmm4,>r3=%xmm4
  609. movdqa %xmm4,%xmm4
  610. # qhasm: t4 = ab0
  611. # asm 1: movdqa <ab0=int6464#1,>t4=int6464#6
  612. # asm 2: movdqa <ab0=%xmm0,>t4=%xmm5
  613. movdqa %xmm0,%xmm5
  614. # qhasm: float6464 t4 *= *(int128 *)(t2p + 64)
  615. # asm 1: mulpd 64(<t2p=int64#4),<t4=int6464#6
  616. # asm 2: mulpd 64(<t2p=%rcx),<t4=%xmm5
  617. mulpd 64(%rcx),%xmm5
  618. # qhasm: r4 =t4
  619. # asm 1: movdqa <t4=int6464#6,>r4=int6464#6
  620. # asm 2: movdqa <t4=%xmm5,>r4=%xmm5
  621. movdqa %xmm5,%xmm5
  622. # qhasm: t5 = ab0
  623. # asm 1: movdqa <ab0=int6464#1,>t5=int6464#7
  624. # asm 2: movdqa <ab0=%xmm0,>t5=%xmm6
  625. movdqa %xmm0,%xmm6
  626. # qhasm: float6464 t5 *= *(int128 *)(t2p + 80)
  627. # asm 1: mulpd 80(<t2p=int64#4),<t5=int6464#7
  628. # asm 2: mulpd 80(<t2p=%rcx),<t5=%xmm6
  629. mulpd 80(%rcx),%xmm6
  630. # qhasm: r5 =t5
  631. # asm 1: movdqa <t5=int6464#7,>r5=int6464#7
  632. # asm 2: movdqa <t5=%xmm6,>r5=%xmm6
  633. movdqa %xmm6,%xmm6
  634. # qhasm: t6 = ab0
  635. # asm 1: movdqa <ab0=int6464#1,>t6=int6464#8
  636. # asm 2: movdqa <ab0=%xmm0,>t6=%xmm7
  637. movdqa %xmm0,%xmm7
  638. # qhasm: float6464 t6 *= *(int128 *)(t2p + 96)
  639. # asm 1: mulpd 96(<t2p=int64#4),<t6=int6464#8
  640. # asm 2: mulpd 96(<t2p=%rcx),<t6=%xmm7
  641. mulpd 96(%rcx),%xmm7
  642. # qhasm: r6 =t6
  643. # asm 1: movdqa <t6=int6464#8,>r6=int6464#8
  644. # asm 2: movdqa <t6=%xmm7,>r6=%xmm7
  645. movdqa %xmm7,%xmm7
  646. # qhasm: t7 = ab0
  647. # asm 1: movdqa <ab0=int6464#1,>t7=int6464#9
  648. # asm 2: movdqa <ab0=%xmm0,>t7=%xmm8
  649. movdqa %xmm0,%xmm8
  650. # qhasm: float6464 t7 *= *(int128 *)(t2p + 112)
  651. # asm 1: mulpd 112(<t2p=int64#4),<t7=int6464#9
  652. # asm 2: mulpd 112(<t2p=%rcx),<t7=%xmm8
  653. mulpd 112(%rcx),%xmm8
  654. # qhasm: r7 =t7
  655. # asm 1: movdqa <t7=int6464#9,>r7=int6464#9
  656. # asm 2: movdqa <t7=%xmm8,>r7=%xmm8
  657. movdqa %xmm8,%xmm8
  658. # qhasm: t8 = ab0
  659. # asm 1: movdqa <ab0=int6464#1,>t8=int6464#10
  660. # asm 2: movdqa <ab0=%xmm0,>t8=%xmm9
  661. movdqa %xmm0,%xmm9
  662. # qhasm: float6464 t8 *= *(int128 *)(t2p + 128)
  663. # asm 1: mulpd 128(<t2p=int64#4),<t8=int6464#10
  664. # asm 2: mulpd 128(<t2p=%rcx),<t8=%xmm9
  665. mulpd 128(%rcx),%xmm9
  666. # qhasm: r8 =t8
  667. # asm 1: movdqa <t8=int6464#10,>r8=int6464#10
  668. # asm 2: movdqa <t8=%xmm9,>r8=%xmm9
  669. movdqa %xmm9,%xmm9
  670. # qhasm: t9 = ab0
  671. # asm 1: movdqa <ab0=int6464#1,>t9=int6464#11
  672. # asm 2: movdqa <ab0=%xmm0,>t9=%xmm10
  673. movdqa %xmm0,%xmm10
  674. # qhasm: float6464 t9 *= *(int128 *)(t2p + 144)
  675. # asm 1: mulpd 144(<t2p=int64#4),<t9=int6464#11
  676. # asm 2: mulpd 144(<t2p=%rcx),<t9=%xmm10
  677. mulpd 144(%rcx),%xmm10
  678. # qhasm: r9 =t9
  679. # asm 1: movdqa <t9=int6464#11,>r9=int6464#11
  680. # asm 2: movdqa <t9=%xmm10,>r9=%xmm10
  681. movdqa %xmm10,%xmm10
  682. # qhasm: t10 = ab0
  683. # asm 1: movdqa <ab0=int6464#1,>t10=int6464#12
  684. # asm 2: movdqa <ab0=%xmm0,>t10=%xmm11
  685. movdqa %xmm0,%xmm11
  686. # qhasm: float6464 t10 *= *(int128 *)(t2p + 160)
  687. # asm 1: mulpd 160(<t2p=int64#4),<t10=int6464#12
  688. # asm 2: mulpd 160(<t2p=%rcx),<t10=%xmm11
  689. mulpd 160(%rcx),%xmm11
  690. # qhasm: r10 =t10
  691. # asm 1: movdqa <t10=int6464#12,>r10=int6464#12
  692. # asm 2: movdqa <t10=%xmm11,>r10=%xmm11
  693. movdqa %xmm11,%xmm11
  694. # qhasm: r11 = ab0
  695. # asm 1: movdqa <ab0=int6464#1,>r11=int6464#1
  696. # asm 2: movdqa <ab0=%xmm0,>r11=%xmm0
  697. movdqa %xmm0,%xmm0
  698. # qhasm: float6464 r11 *= *(int128 *)(t2p + 176)
  699. # asm 1: mulpd 176(<t2p=int64#4),<r11=int6464#1
  700. # asm 2: mulpd 176(<t2p=%rcx),<r11=%xmm0
  701. mulpd 176(%rcx),%xmm0
  702. # qhasm: *(int128 *)(rp + 0) = r0
  703. # asm 1: movdqa <r0=int6464#2,0(<rp=int64#3)
  704. # asm 2: movdqa <r0=%xmm1,0(<rp=%rdx)
  705. movdqa %xmm1,0(%rdx)
  706. # qhasm: ab1 = *(int128 *)(t1p + 16)
  707. # asm 1: movdqa 16(<t1p=int64#2),>ab1=int6464#2
  708. # asm 2: movdqa 16(<t1p=%rsi),>ab1=%xmm1
  709. movdqa 16(%rsi),%xmm1
  710. # qhasm: ab1six = ab1
  711. # asm 1: movdqa <ab1=int6464#2,>ab1six=int6464#13
  712. # asm 2: movdqa <ab1=%xmm1,>ab1six=%xmm12
  713. movdqa %xmm1,%xmm12
  714. # qhasm: float6464 ab1six *= SIX_SIX
  715. # asm 1: mulpd SIX_SIX,<ab1six=int6464#13
  716. # asm 2: mulpd SIX_SIX,<ab1six=%xmm12
  717. mulpd SIX_SIX,%xmm12
  718. # qhasm: t1 = ab1
  719. # asm 1: movdqa <ab1=int6464#2,>t1=int6464#14
  720. # asm 2: movdqa <ab1=%xmm1,>t1=%xmm13
  721. movdqa %xmm1,%xmm13
  722. # qhasm: float6464 t1 *= *(int128 *)(t2p + 0)
  723. # asm 1: mulpd 0(<t2p=int64#4),<t1=int6464#14
  724. # asm 2: mulpd 0(<t2p=%rcx),<t1=%xmm13
  725. mulpd 0(%rcx),%xmm13
  726. # qhasm: float6464 r1 +=t1
  727. # asm 1: addpd <t1=int6464#14,<r1=int6464#3
  728. # asm 2: addpd <t1=%xmm13,<r1=%xmm2
  729. addpd %xmm13,%xmm2
  730. # qhasm: t7 = ab1
  731. # asm 1: movdqa <ab1=int6464#2,>t7=int6464#2
  732. # asm 2: movdqa <ab1=%xmm1,>t7=%xmm1
  733. movdqa %xmm1,%xmm1
  734. # qhasm: float6464 t7 *= *(int128 *)(t2p + 96)
  735. # asm 1: mulpd 96(<t2p=int64#4),<t7=int6464#2
  736. # asm 2: mulpd 96(<t2p=%rcx),<t7=%xmm1
  737. mulpd 96(%rcx),%xmm1
  738. # qhasm: float6464 r7 +=t7
  739. # asm 1: addpd <t7=int6464#2,<r7=int6464#9
  740. # asm 2: addpd <t7=%xmm1,<r7=%xmm8
  741. addpd %xmm1,%xmm8
  742. # qhasm: t2 = ab1six
  743. # asm 1: movdqa <ab1six=int6464#13,>t2=int6464#2
  744. # asm 2: movdqa <ab1six=%xmm12,>t2=%xmm1
  745. movdqa %xmm12,%xmm1
  746. # qhasm: float6464 t2 *= *(int128 *)(t2p + 16)
  747. # asm 1: mulpd 16(<t2p=int64#4),<t2=int6464#2
  748. # asm 2: mulpd 16(<t2p=%rcx),<t2=%xmm1
  749. mulpd 16(%rcx),%xmm1
  750. # qhasm: float6464 r2 +=t2
  751. # asm 1: addpd <t2=int6464#2,<r2=int6464#4
  752. # asm 2: addpd <t2=%xmm1,<r2=%xmm3
  753. addpd %xmm1,%xmm3
  754. # qhasm: t3 = ab1six
  755. # asm 1: movdqa <ab1six=int6464#13,>t3=int6464#2
  756. # asm 2: movdqa <ab1six=%xmm12,>t3=%xmm1
  757. movdqa %xmm12,%xmm1
  758. # qhasm: float6464 t3 *= *(int128 *)(t2p + 32)
  759. # asm 1: mulpd 32(<t2p=int64#4),<t3=int6464#2
  760. # asm 2: mulpd 32(<t2p=%rcx),<t3=%xmm1
  761. mulpd 32(%rcx),%xmm1
  762. # qhasm: float6464 r3 +=t3
  763. # asm 1: addpd <t3=int6464#2,<r3=int6464#5
  764. # asm 2: addpd <t3=%xmm1,<r3=%xmm4
  765. addpd %xmm1,%xmm4
  766. # qhasm: t4 = ab1six
  767. # asm 1: movdqa <ab1six=int6464#13,>t4=int6464#2
  768. # asm 2: movdqa <ab1six=%xmm12,>t4=%xmm1
  769. movdqa %xmm12,%xmm1
  770. # qhasm: float6464 t4 *= *(int128 *)(t2p + 48)
  771. # asm 1: mulpd 48(<t2p=int64#4),<t4=int6464#2
  772. # asm 2: mulpd 48(<t2p=%rcx),<t4=%xmm1
  773. mulpd 48(%rcx),%xmm1
  774. # qhasm: float6464 r4 +=t4
  775. # asm 1: addpd <t4=int6464#2,<r4=int6464#6
  776. # asm 2: addpd <t4=%xmm1,<r4=%xmm5
  777. addpd %xmm1,%xmm5
  778. # qhasm: t5 = ab1six
  779. # asm 1: movdqa <ab1six=int6464#13,>t5=int6464#2
  780. # asm 2: movdqa <ab1six=%xmm12,>t5=%xmm1
  781. movdqa %xmm12,%xmm1
  782. # qhasm: float6464 t5 *= *(int128 *)(t2p + 64)
  783. # asm 1: mulpd 64(<t2p=int64#4),<t5=int6464#2
  784. # asm 2: mulpd 64(<t2p=%rcx),<t5=%xmm1
  785. mulpd 64(%rcx),%xmm1
  786. # qhasm: float6464 r5 +=t5
  787. # asm 1: addpd <t5=int6464#2,<r5=int6464#7
  788. # asm 2: addpd <t5=%xmm1,<r5=%xmm6
  789. addpd %xmm1,%xmm6
  790. # qhasm: t6 = ab1six
  791. # asm 1: movdqa <ab1six=int6464#13,>t6=int6464#2
  792. # asm 2: movdqa <ab1six=%xmm12,>t6=%xmm1
  793. movdqa %xmm12,%xmm1
  794. # qhasm: float6464 t6 *= *(int128 *)(t2p + 80)
  795. # asm 1: mulpd 80(<t2p=int64#4),<t6=int6464#2
  796. # asm 2: mulpd 80(<t2p=%rcx),<t6=%xmm1
  797. mulpd 80(%rcx),%xmm1
  798. # qhasm: float6464 r6 +=t6
  799. # asm 1: addpd <t6=int6464#2,<r6=int6464#8
  800. # asm 2: addpd <t6=%xmm1,<r6=%xmm7
  801. addpd %xmm1,%xmm7
  802. # qhasm: t8 = ab1six
  803. # asm 1: movdqa <ab1six=int6464#13,>t8=int6464#2
  804. # asm 2: movdqa <ab1six=%xmm12,>t8=%xmm1
  805. movdqa %xmm12,%xmm1
  806. # qhasm: float6464 t8 *= *(int128 *)(t2p + 112)
  807. # asm 1: mulpd 112(<t2p=int64#4),<t8=int6464#2
  808. # asm 2: mulpd 112(<t2p=%rcx),<t8=%xmm1
  809. mulpd 112(%rcx),%xmm1
  810. # qhasm: float6464 r8 +=t8
  811. # asm 1: addpd <t8=int6464#2,<r8=int6464#10
  812. # asm 2: addpd <t8=%xmm1,<r8=%xmm9
  813. addpd %xmm1,%xmm9
  814. # qhasm: t9 = ab1six
  815. # asm 1: movdqa <ab1six=int6464#13,>t9=int6464#2
  816. # asm 2: movdqa <ab1six=%xmm12,>t9=%xmm1
  817. movdqa %xmm12,%xmm1
  818. # qhasm: float6464 t9 *= *(int128 *)(t2p + 128)
  819. # asm 1: mulpd 128(<t2p=int64#4),<t9=int6464#2
  820. # asm 2: mulpd 128(<t2p=%rcx),<t9=%xmm1
  821. mulpd 128(%rcx),%xmm1
  822. # qhasm: float6464 r9 +=t9
  823. # asm 1: addpd <t9=int6464#2,<r9=int6464#11
  824. # asm 2: addpd <t9=%xmm1,<r9=%xmm10
  825. addpd %xmm1,%xmm10
  826. # qhasm: t10 = ab1six
  827. # asm 1: movdqa <ab1six=int6464#13,>t10=int6464#2
  828. # asm 2: movdqa <ab1six=%xmm12,>t10=%xmm1
  829. movdqa %xmm12,%xmm1
  830. # qhasm: float6464 t10 *= *(int128 *)(t2p + 144)
  831. # asm 1: mulpd 144(<t2p=int64#4),<t10=int6464#2
  832. # asm 2: mulpd 144(<t2p=%rcx),<t10=%xmm1
  833. mulpd 144(%rcx),%xmm1
  834. # qhasm: float6464 r10 +=t10
  835. # asm 1: addpd <t10=int6464#2,<r10=int6464#12
  836. # asm 2: addpd <t10=%xmm1,<r10=%xmm11
  837. addpd %xmm1,%xmm11
  838. # qhasm: t11 = ab1six
  839. # asm 1: movdqa <ab1six=int6464#13,>t11=int6464#2
  840. # asm 2: movdqa <ab1six=%xmm12,>t11=%xmm1
  841. movdqa %xmm12,%xmm1
  842. # qhasm: float6464 t11 *= *(int128 *)(t2p + 160)
  843. # asm 1: mulpd 160(<t2p=int64#4),<t11=int6464#2
  844. # asm 2: mulpd 160(<t2p=%rcx),<t11=%xmm1
  845. mulpd 160(%rcx),%xmm1
  846. # qhasm: float6464 r11 +=t11
  847. # asm 1: addpd <t11=int6464#2,<r11=int6464#1
  848. # asm 2: addpd <t11=%xmm1,<r11=%xmm0
  849. addpd %xmm1,%xmm0
  850. # qhasm: r12 = ab1six
  851. # asm 1: movdqa <ab1six=int6464#13,>r12=int6464#2
  852. # asm 2: movdqa <ab1six=%xmm12,>r12=%xmm1
  853. movdqa %xmm12,%xmm1
  854. # qhasm: float6464 r12 *= *(int128 *)(t2p + 176)
  855. # asm 1: mulpd 176(<t2p=int64#4),<r12=int6464#2
  856. # asm 2: mulpd 176(<t2p=%rcx),<r12=%xmm1
  857. mulpd 176(%rcx),%xmm1
  858. # qhasm: *(int128 *)(rp + 16) = r1
  859. # asm 1: movdqa <r1=int6464#3,16(<rp=int64#3)
  860. # asm 2: movdqa <r1=%xmm2,16(<rp=%rdx)
  861. movdqa %xmm2,16(%rdx)
  862. # qhasm: ab2 = *(int128 *)(t1p + 32)
  863. # asm 1: movdqa 32(<t1p=int64#2),>ab2=int6464#3
  864. # asm 2: movdqa 32(<t1p=%rsi),>ab2=%xmm2
  865. movdqa 32(%rsi),%xmm2
  866. # qhasm: ab2six = ab2
  867. # asm 1: movdqa <ab2=int6464#3,>ab2six=int6464#13
  868. # asm 2: movdqa <ab2=%xmm2,>ab2six=%xmm12
  869. movdqa %xmm2,%xmm12
  870. # qhasm: float6464 ab2six *= SIX_SIX
  871. # asm 1: mulpd SIX_SIX,<ab2six=int6464#13
  872. # asm 2: mulpd SIX_SIX,<ab2six=%xmm12
  873. mulpd SIX_SIX,%xmm12
  874. # qhasm: t2 = ab2
  875. # asm 1: movdqa <ab2=int6464#3,>t2=int6464#14
  876. # asm 2: movdqa <ab2=%xmm2,>t2=%xmm13
  877. movdqa %xmm2,%xmm13
  878. # qhasm: float6464 t2 *= *(int128 *)(t2p + 0)
  879. # asm 1: mulpd 0(<t2p=int64#4),<t2=int6464#14
  880. # asm 2: mulpd 0(<t2p=%rcx),<t2=%xmm13
  881. mulpd 0(%rcx),%xmm13
  882. # qhasm: float6464 r2 +=t2
  883. # asm 1: addpd <t2=int6464#14,<r2=int6464#4
  884. # asm 2: addpd <t2=%xmm13,<r2=%xmm3
  885. addpd %xmm13,%xmm3
  886. # qhasm: t7 = ab2
  887. # asm 1: movdqa <ab2=int6464#3,>t7=int6464#14
  888. # asm 2: movdqa <ab2=%xmm2,>t7=%xmm13
  889. movdqa %xmm2,%xmm13
  890. # qhasm: float6464 t7 *= *(int128 *)(t2p + 80)
  891. # asm 1: mulpd 80(<t2p=int64#4),<t7=int6464#14
  892. # asm 2: mulpd 80(<t2p=%rcx),<t7=%xmm13
  893. mulpd 80(%rcx),%xmm13
  894. # qhasm: float6464 r7 +=t7
  895. # asm 1: addpd <t7=int6464#14,<r7=int6464#9
  896. # asm 2: addpd <t7=%xmm13,<r7=%xmm8
  897. addpd %xmm13,%xmm8
  898. # qhasm: t8 = ab2
  899. # asm 1: movdqa <ab2=int6464#3,>t8=int6464#14
  900. # asm 2: movdqa <ab2=%xmm2,>t8=%xmm13
  901. movdqa %xmm2,%xmm13
  902. # qhasm: float6464 t8 *= *(int128 *)(t2p + 96)
  903. # asm 1: mulpd 96(<t2p=int64#4),<t8=int6464#14
  904. # asm 2: mulpd 96(<t2p=%rcx),<t8=%xmm13
  905. mulpd 96(%rcx),%xmm13
  906. # qhasm: float6464 r8 +=t8
  907. # asm 1: addpd <t8=int6464#14,<r8=int6464#10
  908. # asm 2: addpd <t8=%xmm13,<r8=%xmm9
  909. addpd %xmm13,%xmm9
  910. # qhasm: r13 = ab2
  911. # asm 1: movdqa <ab2=int6464#3,>r13=int6464#3
  912. # asm 2: movdqa <ab2=%xmm2,>r13=%xmm2
  913. movdqa %xmm2,%xmm2
  914. # qhasm: float6464 r13 *= *(int128 *)(t2p + 176)
  915. # asm 1: mulpd 176(<t2p=int64#4),<r13=int6464#3
  916. # asm 2: mulpd 176(<t2p=%rcx),<r13=%xmm2
  917. mulpd 176(%rcx),%xmm2
  918. # qhasm: t3 = ab2six
  919. # asm 1: movdqa <ab2six=int6464#13,>t3=int6464#14
  920. # asm 2: movdqa <ab2six=%xmm12,>t3=%xmm13
  921. movdqa %xmm12,%xmm13
  922. # qhasm: float6464 t3 *= *(int128 *)(t2p + 16)
  923. # asm 1: mulpd 16(<t2p=int64#4),<t3=int6464#14
  924. # asm 2: mulpd 16(<t2p=%rcx),<t3=%xmm13
  925. mulpd 16(%rcx),%xmm13
  926. # qhasm: float6464 r3 +=t3
  927. # asm 1: addpd <t3=int6464#14,<r3=int6464#5
  928. # asm 2: addpd <t3=%xmm13,<r3=%xmm4
  929. addpd %xmm13,%xmm4
  930. # qhasm: t4 = ab2six
  931. # asm 1: movdqa <ab2six=int6464#13,>t4=int6464#14
  932. # asm 2: movdqa <ab2six=%xmm12,>t4=%xmm13
  933. movdqa %xmm12,%xmm13
  934. # qhasm: float6464 t4 *= *(int128 *)(t2p + 32)
  935. # asm 1: mulpd 32(<t2p=int64#4),<t4=int6464#14
  936. # asm 2: mulpd 32(<t2p=%rcx),<t4=%xmm13
  937. mulpd 32(%rcx),%xmm13
  938. # qhasm: float6464 r4 +=t4
  939. # asm 1: addpd <t4=int6464#14,<r4=int6464#6
  940. # asm 2: addpd <t4=%xmm13,<r4=%xmm5
  941. addpd %xmm13,%xmm5
  942. # qhasm: t5 = ab2six
  943. # asm 1: movdqa <ab2six=int6464#13,>t5=int6464#14
  944. # asm 2: movdqa <ab2six=%xmm12,>t5=%xmm13
  945. movdqa %xmm12,%xmm13
  946. # qhasm: float6464 t5 *= *(int128 *)(t2p + 48)
  947. # asm 1: mulpd 48(<t2p=int64#4),<t5=int6464#14
  948. # asm 2: mulpd 48(<t2p=%rcx),<t5=%xmm13
  949. mulpd 48(%rcx),%xmm13
  950. # qhasm: float6464 r5 +=t5
  951. # asm 1: addpd <t5=int6464#14,<r5=int6464#7
  952. # asm 2: addpd <t5=%xmm13,<r5=%xmm6
  953. addpd %xmm13,%xmm6
  954. # qhasm: t6 = ab2six
  955. # asm 1: movdqa <ab2six=int6464#13,>t6=int6464#14
  956. # asm 2: movdqa <ab2six=%xmm12,>t6=%xmm13
  957. movdqa %xmm12,%xmm13
  958. # qhasm: float6464 t6 *= *(int128 *)(t2p + 64)
  959. # asm 1: mulpd 64(<t2p=int64#4),<t6=int6464#14
  960. # asm 2: mulpd 64(<t2p=%rcx),<t6=%xmm13
  961. mulpd 64(%rcx),%xmm13
  962. # qhasm: float6464 r6 +=t6
  963. # asm 1: addpd <t6=int6464#14,<r6=int6464#8
  964. # asm 2: addpd <t6=%xmm13,<r6=%xmm7
  965. addpd %xmm13,%xmm7
  966. # qhasm: t9 = ab2six
  967. # asm 1: movdqa <ab2six=int6464#13,>t9=int6464#14
  968. # asm 2: movdqa <ab2six=%xmm12,>t9=%xmm13
  969. movdqa %xmm12,%xmm13
  970. # qhasm: float6464 t9 *= *(int128 *)(t2p + 112)
  971. # asm 1: mulpd 112(<t2p=int64#4),<t9=int6464#14
  972. # asm 2: mulpd 112(<t2p=%rcx),<t9=%xmm13
  973. mulpd 112(%rcx),%xmm13
  974. # qhasm: float6464 r9 +=t9
  975. # asm 1: addpd <t9=int6464#14,<r9=int6464#11
  976. # asm 2: addpd <t9=%xmm13,<r9=%xmm10
  977. addpd %xmm13,%xmm10
  978. # qhasm: t10 = ab2six
  979. # asm 1: movdqa <ab2six=int6464#13,>t10=int6464#14
  980. # asm 2: movdqa <ab2six=%xmm12,>t10=%xmm13
  981. movdqa %xmm12,%xmm13
  982. # qhasm: float6464 t10 *= *(int128 *)(t2p + 128)
  983. # asm 1: mulpd 128(<t2p=int64#4),<t10=int6464#14
  984. # asm 2: mulpd 128(<t2p=%rcx),<t10=%xmm13
  985. mulpd 128(%rcx),%xmm13
  986. # qhasm: float6464 r10 +=t10
  987. # asm 1: addpd <t10=int6464#14,<r10=int6464#12
  988. # asm 2: addpd <t10=%xmm13,<r10=%xmm11
  989. addpd %xmm13,%xmm11
  990. # qhasm: t11 = ab2six
  991. # asm 1: movdqa <ab2six=int6464#13,>t11=int6464#14
  992. # asm 2: movdqa <ab2six=%xmm12,>t11=%xmm13
  993. movdqa %xmm12,%xmm13
  994. # qhasm: float6464 t11 *= *(int128 *)(t2p + 144)
  995. # asm 1: mulpd 144(<t2p=int64#4),<t11=int6464#14
  996. # asm 2: mulpd 144(<t2p=%rcx),<t11=%xmm13
  997. mulpd 144(%rcx),%xmm13
  998. # qhasm: float6464 r11 +=t11
  999. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  1000. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  1001. addpd %xmm13,%xmm0
  1002. # qhasm: t12 = ab2six
  1003. # asm 1: movdqa <ab2six=int6464#13,>t12=int6464#13
  1004. # asm 2: movdqa <ab2six=%xmm12,>t12=%xmm12
  1005. movdqa %xmm12,%xmm12
  1006. # qhasm: float6464 t12 *= *(int128 *)(t2p + 160)
  1007. # asm 1: mulpd 160(<t2p=int64#4),<t12=int6464#13
  1008. # asm 2: mulpd 160(<t2p=%rcx),<t12=%xmm12
  1009. mulpd 160(%rcx),%xmm12
  1010. # qhasm: float6464 r12 +=t12
  1011. # asm 1: addpd <t12=int6464#13,<r12=int6464#2
  1012. # asm 2: addpd <t12=%xmm12,<r12=%xmm1
  1013. addpd %xmm12,%xmm1
  1014. # qhasm: *(int128 *)(rp + 32) = r2
  1015. # asm 1: movdqa <r2=int6464#4,32(<rp=int64#3)
  1016. # asm 2: movdqa <r2=%xmm3,32(<rp=%rdx)
  1017. movdqa %xmm3,32(%rdx)
  1018. # qhasm: ab3 = *(int128 *)(t1p + 48)
  1019. # asm 1: movdqa 48(<t1p=int64#2),>ab3=int6464#4
  1020. # asm 2: movdqa 48(<t1p=%rsi),>ab3=%xmm3
  1021. movdqa 48(%rsi),%xmm3
  1022. # qhasm: ab3six = ab3
  1023. # asm 1: movdqa <ab3=int6464#4,>ab3six=int6464#13
  1024. # asm 2: movdqa <ab3=%xmm3,>ab3six=%xmm12
  1025. movdqa %xmm3,%xmm12
  1026. # qhasm: float6464 ab3six *= SIX_SIX
  1027. # asm 1: mulpd SIX_SIX,<ab3six=int6464#13
  1028. # asm 2: mulpd SIX_SIX,<ab3six=%xmm12
  1029. mulpd SIX_SIX,%xmm12
  1030. # qhasm: t3 = ab3
  1031. # asm 1: movdqa <ab3=int6464#4,>t3=int6464#14
  1032. # asm 2: movdqa <ab3=%xmm3,>t3=%xmm13
  1033. movdqa %xmm3,%xmm13
  1034. # qhasm: float6464 t3 *= *(int128 *)(t2p + 0)
  1035. # asm 1: mulpd 0(<t2p=int64#4),<t3=int6464#14
  1036. # asm 2: mulpd 0(<t2p=%rcx),<t3=%xmm13
  1037. mulpd 0(%rcx),%xmm13
  1038. # qhasm: float6464 r3 +=t3
  1039. # asm 1: addpd <t3=int6464#14,<r3=int6464#5
  1040. # asm 2: addpd <t3=%xmm13,<r3=%xmm4
  1041. addpd %xmm13,%xmm4
  1042. # qhasm: t7 = ab3
  1043. # asm 1: movdqa <ab3=int6464#4,>t7=int6464#14
  1044. # asm 2: movdqa <ab3=%xmm3,>t7=%xmm13
  1045. movdqa %xmm3,%xmm13
  1046. # qhasm: float6464 t7 *= *(int128 *)(t2p + 64)
  1047. # asm 1: mulpd 64(<t2p=int64#4),<t7=int6464#14
  1048. # asm 2: mulpd 64(<t2p=%rcx),<t7=%xmm13
  1049. mulpd 64(%rcx),%xmm13
  1050. # qhasm: float6464 r7 +=t7
  1051. # asm 1: addpd <t7=int6464#14,<r7=int6464#9
  1052. # asm 2: addpd <t7=%xmm13,<r7=%xmm8
  1053. addpd %xmm13,%xmm8
  1054. # qhasm: t8 = ab3
  1055. # asm 1: movdqa <ab3=int6464#4,>t8=int6464#14
  1056. # asm 2: movdqa <ab3=%xmm3,>t8=%xmm13
  1057. movdqa %xmm3,%xmm13
  1058. # qhasm: float6464 t8 *= *(int128 *)(t2p + 80)
  1059. # asm 1: mulpd 80(<t2p=int64#4),<t8=int6464#14
  1060. # asm 2: mulpd 80(<t2p=%rcx),<t8=%xmm13
  1061. mulpd 80(%rcx),%xmm13
  1062. # qhasm: float6464 r8 +=t8
  1063. # asm 1: addpd <t8=int6464#14,<r8=int6464#10
  1064. # asm 2: addpd <t8=%xmm13,<r8=%xmm9
  1065. addpd %xmm13,%xmm9
  1066. # qhasm: t9 = ab3
  1067. # asm 1: movdqa <ab3=int6464#4,>t9=int6464#14
  1068. # asm 2: movdqa <ab3=%xmm3,>t9=%xmm13
  1069. movdqa %xmm3,%xmm13
  1070. # qhasm: float6464 t9 *= *(int128 *)(t2p + 96)
  1071. # asm 1: mulpd 96(<t2p=int64#4),<t9=int6464#14
  1072. # asm 2: mulpd 96(<t2p=%rcx),<t9=%xmm13
  1073. mulpd 96(%rcx),%xmm13
  1074. # qhasm: float6464 r9 +=t9
  1075. # asm 1: addpd <t9=int6464#14,<r9=int6464#11
  1076. # asm 2: addpd <t9=%xmm13,<r9=%xmm10
  1077. addpd %xmm13,%xmm10
  1078. # qhasm: t13 = ab3
  1079. # asm 1: movdqa <ab3=int6464#4,>t13=int6464#14
  1080. # asm 2: movdqa <ab3=%xmm3,>t13=%xmm13
  1081. movdqa %xmm3,%xmm13
  1082. # qhasm: float6464 t13 *= *(int128 *)(t2p + 160)
  1083. # asm 1: mulpd 160(<t2p=int64#4),<t13=int6464#14
  1084. # asm 2: mulpd 160(<t2p=%rcx),<t13=%xmm13
  1085. mulpd 160(%rcx),%xmm13
  1086. # qhasm: float6464 r13 +=t13
  1087. # asm 1: addpd <t13=int6464#14,<r13=int6464#3
  1088. # asm 2: addpd <t13=%xmm13,<r13=%xmm2
  1089. addpd %xmm13,%xmm2
  1090. # qhasm: r14 = ab3
  1091. # asm 1: movdqa <ab3=int6464#4,>r14=int6464#4
  1092. # asm 2: movdqa <ab3=%xmm3,>r14=%xmm3
  1093. movdqa %xmm3,%xmm3
  1094. # qhasm: float6464 r14 *= *(int128 *)(t2p + 176)
  1095. # asm 1: mulpd 176(<t2p=int64#4),<r14=int6464#4
  1096. # asm 2: mulpd 176(<t2p=%rcx),<r14=%xmm3
  1097. mulpd 176(%rcx),%xmm3
  1098. # qhasm: t4 = ab3six
  1099. # asm 1: movdqa <ab3six=int6464#13,>t4=int6464#14
  1100. # asm 2: movdqa <ab3six=%xmm12,>t4=%xmm13
  1101. movdqa %xmm12,%xmm13
  1102. # qhasm: float6464 t4 *= *(int128 *)(t2p + 16)
  1103. # asm 1: mulpd 16(<t2p=int64#4),<t4=int6464#14
  1104. # asm 2: mulpd 16(<t2p=%rcx),<t4=%xmm13
  1105. mulpd 16(%rcx),%xmm13
  1106. # qhasm: float6464 r4 +=t4
  1107. # asm 1: addpd <t4=int6464#14,<r4=int6464#6
  1108. # asm 2: addpd <t4=%xmm13,<r4=%xmm5
  1109. addpd %xmm13,%xmm5
  1110. # qhasm: t5 = ab3six
  1111. # asm 1: movdqa <ab3six=int6464#13,>t5=int6464#14
  1112. # asm 2: movdqa <ab3six=%xmm12,>t5=%xmm13
  1113. movdqa %xmm12,%xmm13
  1114. # qhasm: float6464 t5 *= *(int128 *)(t2p + 32)
  1115. # asm 1: mulpd 32(<t2p=int64#4),<t5=int6464#14
  1116. # asm 2: mulpd 32(<t2p=%rcx),<t5=%xmm13
  1117. mulpd 32(%rcx),%xmm13
  1118. # qhasm: float6464 r5 +=t5
  1119. # asm 1: addpd <t5=int6464#14,<r5=int6464#7
  1120. # asm 2: addpd <t5=%xmm13,<r5=%xmm6
  1121. addpd %xmm13,%xmm6
  1122. # qhasm: t6 = ab3six
  1123. # asm 1: movdqa <ab3six=int6464#13,>t6=int6464#14
  1124. # asm 2: movdqa <ab3six=%xmm12,>t6=%xmm13
  1125. movdqa %xmm12,%xmm13
  1126. # qhasm: float6464 t6 *= *(int128 *)(t2p + 48)
  1127. # asm 1: mulpd 48(<t2p=int64#4),<t6=int6464#14
  1128. # asm 2: mulpd 48(<t2p=%rcx),<t6=%xmm13
  1129. mulpd 48(%rcx),%xmm13
  1130. # qhasm: float6464 r6 +=t6
  1131. # asm 1: addpd <t6=int6464#14,<r6=int6464#8
  1132. # asm 2: addpd <t6=%xmm13,<r6=%xmm7
  1133. addpd %xmm13,%xmm7
  1134. # qhasm: t10 = ab3six
  1135. # asm 1: movdqa <ab3six=int6464#13,>t10=int6464#14
  1136. # asm 2: movdqa <ab3six=%xmm12,>t10=%xmm13
  1137. movdqa %xmm12,%xmm13
  1138. # qhasm: float6464 t10 *= *(int128 *)(t2p + 112)
  1139. # asm 1: mulpd 112(<t2p=int64#4),<t10=int6464#14
  1140. # asm 2: mulpd 112(<t2p=%rcx),<t10=%xmm13
  1141. mulpd 112(%rcx),%xmm13
  1142. # qhasm: float6464 r10 +=t10
  1143. # asm 1: addpd <t10=int6464#14,<r10=int6464#12
  1144. # asm 2: addpd <t10=%xmm13,<r10=%xmm11
  1145. addpd %xmm13,%xmm11
  1146. # qhasm: t11 = ab3six
  1147. # asm 1: movdqa <ab3six=int6464#13,>t11=int6464#14
  1148. # asm 2: movdqa <ab3six=%xmm12,>t11=%xmm13
  1149. movdqa %xmm12,%xmm13
  1150. # qhasm: float6464 t11 *= *(int128 *)(t2p + 128)
  1151. # asm 1: mulpd 128(<t2p=int64#4),<t11=int6464#14
  1152. # asm 2: mulpd 128(<t2p=%rcx),<t11=%xmm13
  1153. mulpd 128(%rcx),%xmm13
  1154. # qhasm: float6464 r11 +=t11
  1155. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  1156. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  1157. addpd %xmm13,%xmm0
  1158. # qhasm: t12 = ab3six
  1159. # asm 1: movdqa <ab3six=int6464#13,>t12=int6464#13
  1160. # asm 2: movdqa <ab3six=%xmm12,>t12=%xmm12
  1161. movdqa %xmm12,%xmm12
  1162. # qhasm: float6464 t12 *= *(int128 *)(t2p + 144)
  1163. # asm 1: mulpd 144(<t2p=int64#4),<t12=int6464#13
  1164. # asm 2: mulpd 144(<t2p=%rcx),<t12=%xmm12
  1165. mulpd 144(%rcx),%xmm12
  1166. # qhasm: float6464 r12 +=t12
  1167. # asm 1: addpd <t12=int6464#13,<r12=int6464#2
  1168. # asm 2: addpd <t12=%xmm12,<r12=%xmm1
  1169. addpd %xmm12,%xmm1
  1170. # qhasm: *(int128 *)(rp + 48) = r3
  1171. # asm 1: movdqa <r3=int6464#5,48(<rp=int64#3)
  1172. # asm 2: movdqa <r3=%xmm4,48(<rp=%rdx)
  1173. movdqa %xmm4,48(%rdx)
  1174. # qhasm: ab4 = *(int128 *)(t1p + 64)
  1175. # asm 1: movdqa 64(<t1p=int64#2),>ab4=int6464#5
  1176. # asm 2: movdqa 64(<t1p=%rsi),>ab4=%xmm4
  1177. movdqa 64(%rsi),%xmm4
  1178. # qhasm: ab4six = ab4
  1179. # asm 1: movdqa <ab4=int6464#5,>ab4six=int6464#13
  1180. # asm 2: movdqa <ab4=%xmm4,>ab4six=%xmm12
  1181. movdqa %xmm4,%xmm12
  1182. # qhasm: float6464 ab4six *= SIX_SIX
  1183. # asm 1: mulpd SIX_SIX,<ab4six=int6464#13
  1184. # asm 2: mulpd SIX_SIX,<ab4six=%xmm12
  1185. mulpd SIX_SIX,%xmm12
  1186. # qhasm: t4 = ab4
  1187. # asm 1: movdqa <ab4=int6464#5,>t4=int6464#14
  1188. # asm 2: movdqa <ab4=%xmm4,>t4=%xmm13
  1189. movdqa %xmm4,%xmm13
  1190. # qhasm: float6464 t4 *= *(int128 *)(t2p + 0)
  1191. # asm 1: mulpd 0(<t2p=int64#4),<t4=int6464#14
  1192. # asm 2: mulpd 0(<t2p=%rcx),<t4=%xmm13
  1193. mulpd 0(%rcx),%xmm13
  1194. # qhasm: float6464 r4 +=t4
  1195. # asm 1: addpd <t4=int6464#14,<r4=int6464#6
  1196. # asm 2: addpd <t4=%xmm13,<r4=%xmm5
  1197. addpd %xmm13,%xmm5
  1198. # qhasm: t7 = ab4
  1199. # asm 1: movdqa <ab4=int6464#5,>t7=int6464#14
  1200. # asm 2: movdqa <ab4=%xmm4,>t7=%xmm13
  1201. movdqa %xmm4,%xmm13
  1202. # qhasm: float6464 t7 *= *(int128 *)(t2p + 48)
  1203. # asm 1: mulpd 48(<t2p=int64#4),<t7=int6464#14
  1204. # asm 2: mulpd 48(<t2p=%rcx),<t7=%xmm13
  1205. mulpd 48(%rcx),%xmm13
  1206. # qhasm: float6464 r7 +=t7
  1207. # asm 1: addpd <t7=int6464#14,<r7=int6464#9
  1208. # asm 2: addpd <t7=%xmm13,<r7=%xmm8
  1209. addpd %xmm13,%xmm8
  1210. # qhasm: t8 = ab4
  1211. # asm 1: movdqa <ab4=int6464#5,>t8=int6464#14
  1212. # asm 2: movdqa <ab4=%xmm4,>t8=%xmm13
  1213. movdqa %xmm4,%xmm13
  1214. # qhasm: float6464 t8 *= *(int128 *)(t2p + 64)
  1215. # asm 1: mulpd 64(<t2p=int64#4),<t8=int6464#14
  1216. # asm 2: mulpd 64(<t2p=%rcx),<t8=%xmm13
  1217. mulpd 64(%rcx),%xmm13
  1218. # qhasm: float6464 r8 +=t8
  1219. # asm 1: addpd <t8=int6464#14,<r8=int6464#10
  1220. # asm 2: addpd <t8=%xmm13,<r8=%xmm9
  1221. addpd %xmm13,%xmm9
  1222. # qhasm: t9 = ab4
  1223. # asm 1: movdqa <ab4=int6464#5,>t9=int6464#14
  1224. # asm 2: movdqa <ab4=%xmm4,>t9=%xmm13
  1225. movdqa %xmm4,%xmm13
  1226. # qhasm: float6464 t9 *= *(int128 *)(t2p + 80)
  1227. # asm 1: mulpd 80(<t2p=int64#4),<t9=int6464#14
  1228. # asm 2: mulpd 80(<t2p=%rcx),<t9=%xmm13
  1229. mulpd 80(%rcx),%xmm13
  1230. # qhasm: float6464 r9 +=t9
  1231. # asm 1: addpd <t9=int6464#14,<r9=int6464#11
  1232. # asm 2: addpd <t9=%xmm13,<r9=%xmm10
  1233. addpd %xmm13,%xmm10
  1234. # qhasm: t10 = ab4
  1235. # asm 1: movdqa <ab4=int6464#5,>t10=int6464#14
  1236. # asm 2: movdqa <ab4=%xmm4,>t10=%xmm13
  1237. movdqa %xmm4,%xmm13
  1238. # qhasm: float6464 t10 *= *(int128 *)(t2p + 96)
  1239. # asm 1: mulpd 96(<t2p=int64#4),<t10=int6464#14
  1240. # asm 2: mulpd 96(<t2p=%rcx),<t10=%xmm13
  1241. mulpd 96(%rcx),%xmm13
  1242. # qhasm: float6464 r10 +=t10
  1243. # asm 1: addpd <t10=int6464#14,<r10=int6464#12
  1244. # asm 2: addpd <t10=%xmm13,<r10=%xmm11
  1245. addpd %xmm13,%xmm11
  1246. # qhasm: t13 = ab4
  1247. # asm 1: movdqa <ab4=int6464#5,>t13=int6464#14
  1248. # asm 2: movdqa <ab4=%xmm4,>t13=%xmm13
  1249. movdqa %xmm4,%xmm13
  1250. # qhasm: float6464 t13 *= *(int128 *)(t2p + 144)
  1251. # asm 1: mulpd 144(<t2p=int64#4),<t13=int6464#14
  1252. # asm 2: mulpd 144(<t2p=%rcx),<t13=%xmm13
  1253. mulpd 144(%rcx),%xmm13
  1254. # qhasm: float6464 r13 +=t13
  1255. # asm 1: addpd <t13=int6464#14,<r13=int6464#3
  1256. # asm 2: addpd <t13=%xmm13,<r13=%xmm2
  1257. addpd %xmm13,%xmm2
  1258. # qhasm: t14 = ab4
  1259. # asm 1: movdqa <ab4=int6464#5,>t14=int6464#14
  1260. # asm 2: movdqa <ab4=%xmm4,>t14=%xmm13
  1261. movdqa %xmm4,%xmm13
  1262. # qhasm: float6464 t14 *= *(int128 *)(t2p + 160)
  1263. # asm 1: mulpd 160(<t2p=int64#4),<t14=int6464#14
  1264. # asm 2: mulpd 160(<t2p=%rcx),<t14=%xmm13
  1265. mulpd 160(%rcx),%xmm13
  1266. # qhasm: float6464 r14 +=t14
  1267. # asm 1: addpd <t14=int6464#14,<r14=int6464#4
  1268. # asm 2: addpd <t14=%xmm13,<r14=%xmm3
  1269. addpd %xmm13,%xmm3
  1270. # qhasm: r15 = ab4
  1271. # asm 1: movdqa <ab4=int6464#5,>r15=int6464#5
  1272. # asm 2: movdqa <ab4=%xmm4,>r15=%xmm4
  1273. movdqa %xmm4,%xmm4
  1274. # qhasm: float6464 r15 *= *(int128 *)(t2p + 176)
  1275. # asm 1: mulpd 176(<t2p=int64#4),<r15=int6464#5
  1276. # asm 2: mulpd 176(<t2p=%rcx),<r15=%xmm4
  1277. mulpd 176(%rcx),%xmm4
  1278. # qhasm: t5 = ab4six
  1279. # asm 1: movdqa <ab4six=int6464#13,>t5=int6464#14
  1280. # asm 2: movdqa <ab4six=%xmm12,>t5=%xmm13
  1281. movdqa %xmm12,%xmm13
  1282. # qhasm: float6464 t5 *= *(int128 *)(t2p + 16)
  1283. # asm 1: mulpd 16(<t2p=int64#4),<t5=int6464#14
  1284. # asm 2: mulpd 16(<t2p=%rcx),<t5=%xmm13
  1285. mulpd 16(%rcx),%xmm13
  1286. # qhasm: float6464 r5 +=t5
  1287. # asm 1: addpd <t5=int6464#14,<r5=int6464#7
  1288. # asm 2: addpd <t5=%xmm13,<r5=%xmm6
  1289. addpd %xmm13,%xmm6
  1290. # qhasm: t6 = ab4six
  1291. # asm 1: movdqa <ab4six=int6464#13,>t6=int6464#14
  1292. # asm 2: movdqa <ab4six=%xmm12,>t6=%xmm13
  1293. movdqa %xmm12,%xmm13
  1294. # qhasm: float6464 t6 *= *(int128 *)(t2p + 32)
  1295. # asm 1: mulpd 32(<t2p=int64#4),<t6=int6464#14
  1296. # asm 2: mulpd 32(<t2p=%rcx),<t6=%xmm13
  1297. mulpd 32(%rcx),%xmm13
  1298. # qhasm: float6464 r6 +=t6
  1299. # asm 1: addpd <t6=int6464#14,<r6=int6464#8
  1300. # asm 2: addpd <t6=%xmm13,<r6=%xmm7
  1301. addpd %xmm13,%xmm7
  1302. # qhasm: t11 = ab4six
  1303. # asm 1: movdqa <ab4six=int6464#13,>t11=int6464#14
  1304. # asm 2: movdqa <ab4six=%xmm12,>t11=%xmm13
  1305. movdqa %xmm12,%xmm13
  1306. # qhasm: float6464 t11 *= *(int128 *)(t2p + 112)
  1307. # asm 1: mulpd 112(<t2p=int64#4),<t11=int6464#14
  1308. # asm 2: mulpd 112(<t2p=%rcx),<t11=%xmm13
  1309. mulpd 112(%rcx),%xmm13
  1310. # qhasm: float6464 r11 +=t11
  1311. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  1312. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  1313. addpd %xmm13,%xmm0
  1314. # qhasm: t12 = ab4six
  1315. # asm 1: movdqa <ab4six=int6464#13,>t12=int6464#13
  1316. # asm 2: movdqa <ab4six=%xmm12,>t12=%xmm12
  1317. movdqa %xmm12,%xmm12
  1318. # qhasm: float6464 t12 *= *(int128 *)(t2p + 128)
  1319. # asm 1: mulpd 128(<t2p=int64#4),<t12=int6464#13
  1320. # asm 2: mulpd 128(<t2p=%rcx),<t12=%xmm12
  1321. mulpd 128(%rcx),%xmm12
  1322. # qhasm: float6464 r12 +=t12
  1323. # asm 1: addpd <t12=int6464#13,<r12=int6464#2
  1324. # asm 2: addpd <t12=%xmm12,<r12=%xmm1
  1325. addpd %xmm12,%xmm1
  1326. # qhasm: *(int128 *)(rp + 64) = r4
  1327. # asm 1: movdqa <r4=int6464#6,64(<rp=int64#3)
  1328. # asm 2: movdqa <r4=%xmm5,64(<rp=%rdx)
  1329. movdqa %xmm5,64(%rdx)
  1330. # qhasm: ab5 = *(int128 *)(t1p + 80)
  1331. # asm 1: movdqa 80(<t1p=int64#2),>ab5=int6464#6
  1332. # asm 2: movdqa 80(<t1p=%rsi),>ab5=%xmm5
  1333. movdqa 80(%rsi),%xmm5
  1334. # qhasm: ab5six = ab5
  1335. # asm 1: movdqa <ab5=int6464#6,>ab5six=int6464#13
  1336. # asm 2: movdqa <ab5=%xmm5,>ab5six=%xmm12
  1337. movdqa %xmm5,%xmm12
  1338. # qhasm: float6464 ab5six *= SIX_SIX
  1339. # asm 1: mulpd SIX_SIX,<ab5six=int6464#13
  1340. # asm 2: mulpd SIX_SIX,<ab5six=%xmm12
  1341. mulpd SIX_SIX,%xmm12
  1342. # qhasm: t5 = ab5
  1343. # asm 1: movdqa <ab5=int6464#6,>t5=int6464#14
  1344. # asm 2: movdqa <ab5=%xmm5,>t5=%xmm13
  1345. movdqa %xmm5,%xmm13
  1346. # qhasm: float6464 t5 *= *(int128 *)(t2p + 0)
  1347. # asm 1: mulpd 0(<t2p=int64#4),<t5=int6464#14
  1348. # asm 2: mulpd 0(<t2p=%rcx),<t5=%xmm13
  1349. mulpd 0(%rcx),%xmm13
  1350. # qhasm: float6464 r5 +=t5
  1351. # asm 1: addpd <t5=int6464#14,<r5=int6464#7
  1352. # asm 2: addpd <t5=%xmm13,<r5=%xmm6
  1353. addpd %xmm13,%xmm6
  1354. # qhasm: t7 = ab5
  1355. # asm 1: movdqa <ab5=int6464#6,>t7=int6464#14
  1356. # asm 2: movdqa <ab5=%xmm5,>t7=%xmm13
  1357. movdqa %xmm5,%xmm13
  1358. # qhasm: float6464 t7 *= *(int128 *)(t2p + 32)
  1359. # asm 1: mulpd 32(<t2p=int64#4),<t7=int6464#14
  1360. # asm 2: mulpd 32(<t2p=%rcx),<t7=%xmm13
  1361. mulpd 32(%rcx),%xmm13
  1362. # qhasm: float6464 r7 +=t7
  1363. # asm 1: addpd <t7=int6464#14,<r7=int6464#9
  1364. # asm 2: addpd <t7=%xmm13,<r7=%xmm8
  1365. addpd %xmm13,%xmm8
  1366. # qhasm: t8 = ab5
  1367. # asm 1: movdqa <ab5=int6464#6,>t8=int6464#14
  1368. # asm 2: movdqa <ab5=%xmm5,>t8=%xmm13
  1369. movdqa %xmm5,%xmm13
  1370. # qhasm: float6464 t8 *= *(int128 *)(t2p + 48)
  1371. # asm 1: mulpd 48(<t2p=int64#4),<t8=int6464#14
  1372. # asm 2: mulpd 48(<t2p=%rcx),<t8=%xmm13
  1373. mulpd 48(%rcx),%xmm13
  1374. # qhasm: float6464 r8 +=t8
  1375. # asm 1: addpd <t8=int6464#14,<r8=int6464#10
  1376. # asm 2: addpd <t8=%xmm13,<r8=%xmm9
  1377. addpd %xmm13,%xmm9
  1378. # qhasm: t9 = ab5
  1379. # asm 1: movdqa <ab5=int6464#6,>t9=int6464#14
  1380. # asm 2: movdqa <ab5=%xmm5,>t9=%xmm13
  1381. movdqa %xmm5,%xmm13
  1382. # qhasm: float6464 t9 *= *(int128 *)(t2p + 64)
  1383. # asm 1: mulpd 64(<t2p=int64#4),<t9=int6464#14
  1384. # asm 2: mulpd 64(<t2p=%rcx),<t9=%xmm13
  1385. mulpd 64(%rcx),%xmm13
  1386. # qhasm: float6464 r9 +=t9
  1387. # asm 1: addpd <t9=int6464#14,<r9=int6464#11
  1388. # asm 2: addpd <t9=%xmm13,<r9=%xmm10
  1389. addpd %xmm13,%xmm10
  1390. # qhasm: t10 = ab5
  1391. # asm 1: movdqa <ab5=int6464#6,>t10=int6464#14
  1392. # asm 2: movdqa <ab5=%xmm5,>t10=%xmm13
  1393. movdqa %xmm5,%xmm13
  1394. # qhasm: float6464 t10 *= *(int128 *)(t2p + 80)
  1395. # asm 1: mulpd 80(<t2p=int64#4),<t10=int6464#14
  1396. # asm 2: mulpd 80(<t2p=%rcx),<t10=%xmm13
  1397. mulpd 80(%rcx),%xmm13
  1398. # qhasm: float6464 r10 +=t10
  1399. # asm 1: addpd <t10=int6464#14,<r10=int6464#12
  1400. # asm 2: addpd <t10=%xmm13,<r10=%xmm11
  1401. addpd %xmm13,%xmm11
  1402. # qhasm: t11 = ab5
  1403. # asm 1: movdqa <ab5=int6464#6,>t11=int6464#14
  1404. # asm 2: movdqa <ab5=%xmm5,>t11=%xmm13
  1405. movdqa %xmm5,%xmm13
  1406. # qhasm: float6464 t11 *= *(int128 *)(t2p + 96)
  1407. # asm 1: mulpd 96(<t2p=int64#4),<t11=int6464#14
  1408. # asm 2: mulpd 96(<t2p=%rcx),<t11=%xmm13
  1409. mulpd 96(%rcx),%xmm13
  1410. # qhasm: float6464 r11 +=t11
  1411. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  1412. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  1413. addpd %xmm13,%xmm0
  1414. # qhasm: t13 = ab5
  1415. # asm 1: movdqa <ab5=int6464#6,>t13=int6464#14
  1416. # asm 2: movdqa <ab5=%xmm5,>t13=%xmm13
  1417. movdqa %xmm5,%xmm13
  1418. # qhasm: float6464 t13 *= *(int128 *)(t2p + 128)
  1419. # asm 1: mulpd 128(<t2p=int64#4),<t13=int6464#14
  1420. # asm 2: mulpd 128(<t2p=%rcx),<t13=%xmm13
  1421. mulpd 128(%rcx),%xmm13
  1422. # qhasm: float6464 r13 +=t13
  1423. # asm 1: addpd <t13=int6464#14,<r13=int6464#3
  1424. # asm 2: addpd <t13=%xmm13,<r13=%xmm2
  1425. addpd %xmm13,%xmm2
  1426. # qhasm: t14 = ab5
  1427. # asm 1: movdqa <ab5=int6464#6,>t14=int6464#14
  1428. # asm 2: movdqa <ab5=%xmm5,>t14=%xmm13
  1429. movdqa %xmm5,%xmm13
  1430. # qhasm: float6464 t14 *= *(int128 *)(t2p + 144)
  1431. # asm 1: mulpd 144(<t2p=int64#4),<t14=int6464#14
  1432. # asm 2: mulpd 144(<t2p=%rcx),<t14=%xmm13
  1433. mulpd 144(%rcx),%xmm13
  1434. # qhasm: float6464 r14 +=t14
  1435. # asm 1: addpd <t14=int6464#14,<r14=int6464#4
  1436. # asm 2: addpd <t14=%xmm13,<r14=%xmm3
  1437. addpd %xmm13,%xmm3
  1438. # qhasm: t15 = ab5
  1439. # asm 1: movdqa <ab5=int6464#6,>t15=int6464#14
  1440. # asm 2: movdqa <ab5=%xmm5,>t15=%xmm13
  1441. movdqa %xmm5,%xmm13
  1442. # qhasm: float6464 t15 *= *(int128 *)(t2p + 160)
  1443. # asm 1: mulpd 160(<t2p=int64#4),<t15=int6464#14
  1444. # asm 2: mulpd 160(<t2p=%rcx),<t15=%xmm13
  1445. mulpd 160(%rcx),%xmm13
  1446. # qhasm: float6464 r15 +=t15
  1447. # asm 1: addpd <t15=int6464#14,<r15=int6464#5
  1448. # asm 2: addpd <t15=%xmm13,<r15=%xmm4
  1449. addpd %xmm13,%xmm4
  1450. # qhasm: r16 = ab5
  1451. # asm 1: movdqa <ab5=int6464#6,>r16=int6464#6
  1452. # asm 2: movdqa <ab5=%xmm5,>r16=%xmm5
  1453. movdqa %xmm5,%xmm5
  1454. # qhasm: float6464 r16 *= *(int128 *)(t2p + 176)
  1455. # asm 1: mulpd 176(<t2p=int64#4),<r16=int6464#6
  1456. # asm 2: mulpd 176(<t2p=%rcx),<r16=%xmm5
  1457. mulpd 176(%rcx),%xmm5
  1458. # qhasm: t6 = ab5six
  1459. # asm 1: movdqa <ab5six=int6464#13,>t6=int6464#14
  1460. # asm 2: movdqa <ab5six=%xmm12,>t6=%xmm13
  1461. movdqa %xmm12,%xmm13
  1462. # qhasm: float6464 t6 *= *(int128 *)(t2p + 16)
  1463. # asm 1: mulpd 16(<t2p=int64#4),<t6=int6464#14
  1464. # asm 2: mulpd 16(<t2p=%rcx),<t6=%xmm13
  1465. mulpd 16(%rcx),%xmm13
  1466. # qhasm: float6464 r6 +=t6
  1467. # asm 1: addpd <t6=int6464#14,<r6=int6464#8
  1468. # asm 2: addpd <t6=%xmm13,<r6=%xmm7
  1469. addpd %xmm13,%xmm7
  1470. # qhasm: t12 = ab5six
  1471. # asm 1: movdqa <ab5six=int6464#13,>t12=int6464#13
  1472. # asm 2: movdqa <ab5six=%xmm12,>t12=%xmm12
  1473. movdqa %xmm12,%xmm12
  1474. # qhasm: float6464 t12 *= *(int128 *)(t2p + 112)
  1475. # asm 1: mulpd 112(<t2p=int64#4),<t12=int6464#13
  1476. # asm 2: mulpd 112(<t2p=%rcx),<t12=%xmm12
  1477. mulpd 112(%rcx),%xmm12
  1478. # qhasm: float6464 r12 +=t12
  1479. # asm 1: addpd <t12=int6464#13,<r12=int6464#2
  1480. # asm 2: addpd <t12=%xmm12,<r12=%xmm1
  1481. addpd %xmm12,%xmm1
  1482. # qhasm: *(int128 *)(rp + 80) = r5
  1483. # asm 1: movdqa <r5=int6464#7,80(<rp=int64#3)
  1484. # asm 2: movdqa <r5=%xmm6,80(<rp=%rdx)
  1485. movdqa %xmm6,80(%rdx)
  1486. # qhasm: ab6 = *(int128 *)(t1p + 96)
  1487. # asm 1: movdqa 96(<t1p=int64#2),>ab6=int6464#7
  1488. # asm 2: movdqa 96(<t1p=%rsi),>ab6=%xmm6
  1489. movdqa 96(%rsi),%xmm6
  1490. # qhasm: t6 = ab6
  1491. # asm 1: movdqa <ab6=int6464#7,>t6=int6464#13
  1492. # asm 2: movdqa <ab6=%xmm6,>t6=%xmm12
  1493. movdqa %xmm6,%xmm12
  1494. # qhasm: float6464 t6 *= *(int128 *)(t2p + 0)
  1495. # asm 1: mulpd 0(<t2p=int64#4),<t6=int6464#13
  1496. # asm 2: mulpd 0(<t2p=%rcx),<t6=%xmm12
  1497. mulpd 0(%rcx),%xmm12
  1498. # qhasm: float6464 r6 +=t6
  1499. # asm 1: addpd <t6=int6464#13,<r6=int6464#8
  1500. # asm 2: addpd <t6=%xmm12,<r6=%xmm7
  1501. addpd %xmm12,%xmm7
  1502. # qhasm: t7 = ab6
  1503. # asm 1: movdqa <ab6=int6464#7,>t7=int6464#13
  1504. # asm 2: movdqa <ab6=%xmm6,>t7=%xmm12
  1505. movdqa %xmm6,%xmm12
  1506. # qhasm: float6464 t7 *= *(int128 *)(t2p + 16)
  1507. # asm 1: mulpd 16(<t2p=int64#4),<t7=int6464#13
  1508. # asm 2: mulpd 16(<t2p=%rcx),<t7=%xmm12
  1509. mulpd 16(%rcx),%xmm12
  1510. # qhasm: float6464 r7 +=t7
  1511. # asm 1: addpd <t7=int6464#13,<r7=int6464#9
  1512. # asm 2: addpd <t7=%xmm12,<r7=%xmm8
  1513. addpd %xmm12,%xmm8
  1514. # qhasm: t8 = ab6
  1515. # asm 1: movdqa <ab6=int6464#7,>t8=int6464#13
  1516. # asm 2: movdqa <ab6=%xmm6,>t8=%xmm12
  1517. movdqa %xmm6,%xmm12
  1518. # qhasm: float6464 t8 *= *(int128 *)(t2p + 32)
  1519. # asm 1: mulpd 32(<t2p=int64#4),<t8=int6464#13
  1520. # asm 2: mulpd 32(<t2p=%rcx),<t8=%xmm12
  1521. mulpd 32(%rcx),%xmm12
  1522. # qhasm: float6464 r8 +=t8
  1523. # asm 1: addpd <t8=int6464#13,<r8=int6464#10
  1524. # asm 2: addpd <t8=%xmm12,<r8=%xmm9
  1525. addpd %xmm12,%xmm9
  1526. # qhasm: t9 = ab6
  1527. # asm 1: movdqa <ab6=int6464#7,>t9=int6464#13
  1528. # asm 2: movdqa <ab6=%xmm6,>t9=%xmm12
  1529. movdqa %xmm6,%xmm12
  1530. # qhasm: float6464 t9 *= *(int128 *)(t2p + 48)
  1531. # asm 1: mulpd 48(<t2p=int64#4),<t9=int6464#13
  1532. # asm 2: mulpd 48(<t2p=%rcx),<t9=%xmm12
  1533. mulpd 48(%rcx),%xmm12
  1534. # qhasm: float6464 r9 +=t9
  1535. # asm 1: addpd <t9=int6464#13,<r9=int6464#11
  1536. # asm 2: addpd <t9=%xmm12,<r9=%xmm10
  1537. addpd %xmm12,%xmm10
  1538. # qhasm: t10 = ab6
  1539. # asm 1: movdqa <ab6=int6464#7,>t10=int6464#13
  1540. # asm 2: movdqa <ab6=%xmm6,>t10=%xmm12
  1541. movdqa %xmm6,%xmm12
  1542. # qhasm: float6464 t10 *= *(int128 *)(t2p + 64)
  1543. # asm 1: mulpd 64(<t2p=int64#4),<t10=int6464#13
  1544. # asm 2: mulpd 64(<t2p=%rcx),<t10=%xmm12
  1545. mulpd 64(%rcx),%xmm12
  1546. # qhasm: float6464 r10 +=t10
  1547. # asm 1: addpd <t10=int6464#13,<r10=int6464#12
  1548. # asm 2: addpd <t10=%xmm12,<r10=%xmm11
  1549. addpd %xmm12,%xmm11
  1550. # qhasm: t11 = ab6
  1551. # asm 1: movdqa <ab6=int6464#7,>t11=int6464#13
  1552. # asm 2: movdqa <ab6=%xmm6,>t11=%xmm12
  1553. movdqa %xmm6,%xmm12
  1554. # qhasm: float6464 t11 *= *(int128 *)(t2p + 80)
  1555. # asm 1: mulpd 80(<t2p=int64#4),<t11=int6464#13
  1556. # asm 2: mulpd 80(<t2p=%rcx),<t11=%xmm12
  1557. mulpd 80(%rcx),%xmm12
  1558. # qhasm: float6464 r11 +=t11
  1559. # asm 1: addpd <t11=int6464#13,<r11=int6464#1
  1560. # asm 2: addpd <t11=%xmm12,<r11=%xmm0
  1561. addpd %xmm12,%xmm0
  1562. # qhasm: t12 = ab6
  1563. # asm 1: movdqa <ab6=int6464#7,>t12=int6464#13
  1564. # asm 2: movdqa <ab6=%xmm6,>t12=%xmm12
  1565. movdqa %xmm6,%xmm12
  1566. # qhasm: float6464 t12 *= *(int128 *)(t2p + 96)
  1567. # asm 1: mulpd 96(<t2p=int64#4),<t12=int6464#13
  1568. # asm 2: mulpd 96(<t2p=%rcx),<t12=%xmm12
  1569. mulpd 96(%rcx),%xmm12
  1570. # qhasm: float6464 r12 +=t12
  1571. # asm 1: addpd <t12=int6464#13,<r12=int6464#2
  1572. # asm 2: addpd <t12=%xmm12,<r12=%xmm1
  1573. addpd %xmm12,%xmm1
  1574. # qhasm: t13 = ab6
  1575. # asm 1: movdqa <ab6=int6464#7,>t13=int6464#13
  1576. # asm 2: movdqa <ab6=%xmm6,>t13=%xmm12
  1577. movdqa %xmm6,%xmm12
  1578. # qhasm: float6464 t13 *= *(int128 *)(t2p + 112)
  1579. # asm 1: mulpd 112(<t2p=int64#4),<t13=int6464#13
  1580. # asm 2: mulpd 112(<t2p=%rcx),<t13=%xmm12
  1581. mulpd 112(%rcx),%xmm12
  1582. # qhasm: float6464 r13 +=t13
  1583. # asm 1: addpd <t13=int6464#13,<r13=int6464#3
  1584. # asm 2: addpd <t13=%xmm12,<r13=%xmm2
  1585. addpd %xmm12,%xmm2
  1586. # qhasm: t14 = ab6
  1587. # asm 1: movdqa <ab6=int6464#7,>t14=int6464#13
  1588. # asm 2: movdqa <ab6=%xmm6,>t14=%xmm12
  1589. movdqa %xmm6,%xmm12
  1590. # qhasm: float6464 t14 *= *(int128 *)(t2p + 128)
  1591. # asm 1: mulpd 128(<t2p=int64#4),<t14=int6464#13
  1592. # asm 2: mulpd 128(<t2p=%rcx),<t14=%xmm12
  1593. mulpd 128(%rcx),%xmm12
  1594. # qhasm: float6464 r14 +=t14
  1595. # asm 1: addpd <t14=int6464#13,<r14=int6464#4
  1596. # asm 2: addpd <t14=%xmm12,<r14=%xmm3
  1597. addpd %xmm12,%xmm3
  1598. # qhasm: t15 = ab6
  1599. # asm 1: movdqa <ab6=int6464#7,>t15=int6464#13
  1600. # asm 2: movdqa <ab6=%xmm6,>t15=%xmm12
  1601. movdqa %xmm6,%xmm12
  1602. # qhasm: float6464 t15 *= *(int128 *)(t2p + 144)
  1603. # asm 1: mulpd 144(<t2p=int64#4),<t15=int6464#13
  1604. # asm 2: mulpd 144(<t2p=%rcx),<t15=%xmm12
  1605. mulpd 144(%rcx),%xmm12
  1606. # qhasm: float6464 r15 +=t15
  1607. # asm 1: addpd <t15=int6464#13,<r15=int6464#5
  1608. # asm 2: addpd <t15=%xmm12,<r15=%xmm4
  1609. addpd %xmm12,%xmm4
  1610. # qhasm: t16 = ab6
  1611. # asm 1: movdqa <ab6=int6464#7,>t16=int6464#13
  1612. # asm 2: movdqa <ab6=%xmm6,>t16=%xmm12
  1613. movdqa %xmm6,%xmm12
  1614. # qhasm: float6464 t16 *= *(int128 *)(t2p + 160)
  1615. # asm 1: mulpd 160(<t2p=int64#4),<t16=int6464#13
  1616. # asm 2: mulpd 160(<t2p=%rcx),<t16=%xmm12
  1617. mulpd 160(%rcx),%xmm12
  1618. # qhasm: float6464 r16 +=t16
  1619. # asm 1: addpd <t16=int6464#13,<r16=int6464#6
  1620. # asm 2: addpd <t16=%xmm12,<r16=%xmm5
  1621. addpd %xmm12,%xmm5
  1622. # qhasm: r17 = ab6
  1623. # asm 1: movdqa <ab6=int6464#7,>r17=int6464#7
  1624. # asm 2: movdqa <ab6=%xmm6,>r17=%xmm6
  1625. movdqa %xmm6,%xmm6
  1626. # qhasm: float6464 r17 *= *(int128 *)(t2p + 176)
  1627. # asm 1: mulpd 176(<t2p=int64#4),<r17=int6464#7
  1628. # asm 2: mulpd 176(<t2p=%rcx),<r17=%xmm6
  1629. mulpd 176(%rcx),%xmm6
  1630. # qhasm: *(int128 *)(rp + 96) = r6
  1631. # asm 1: movdqa <r6=int6464#8,96(<rp=int64#3)
  1632. # asm 2: movdqa <r6=%xmm7,96(<rp=%rdx)
  1633. movdqa %xmm7,96(%rdx)
  1634. # qhasm: ab7 = *(int128 *)(t1p + 112)
  1635. # asm 1: movdqa 112(<t1p=int64#2),>ab7=int6464#8
  1636. # asm 2: movdqa 112(<t1p=%rsi),>ab7=%xmm7
  1637. movdqa 112(%rsi),%xmm7
  1638. # qhasm: ab7six = ab7
  1639. # asm 1: movdqa <ab7=int6464#8,>ab7six=int6464#13
  1640. # asm 2: movdqa <ab7=%xmm7,>ab7six=%xmm12
  1641. movdqa %xmm7,%xmm12
  1642. # qhasm: float6464 ab7six *= SIX_SIX
  1643. # asm 1: mulpd SIX_SIX,<ab7six=int6464#13
  1644. # asm 2: mulpd SIX_SIX,<ab7six=%xmm12
  1645. mulpd SIX_SIX,%xmm12
  1646. # qhasm: t7 = ab7
  1647. # asm 1: movdqa <ab7=int6464#8,>t7=int6464#14
  1648. # asm 2: movdqa <ab7=%xmm7,>t7=%xmm13
  1649. movdqa %xmm7,%xmm13
  1650. # qhasm: float6464 t7 *= *(int128 *)(t2p + 0)
  1651. # asm 1: mulpd 0(<t2p=int64#4),<t7=int6464#14
  1652. # asm 2: mulpd 0(<t2p=%rcx),<t7=%xmm13
  1653. mulpd 0(%rcx),%xmm13
  1654. # qhasm: float6464 r7 +=t7
  1655. # asm 1: addpd <t7=int6464#14,<r7=int6464#9
  1656. # asm 2: addpd <t7=%xmm13,<r7=%xmm8
  1657. addpd %xmm13,%xmm8
  1658. # qhasm: t13 = ab7
  1659. # asm 1: movdqa <ab7=int6464#8,>t13=int6464#8
  1660. # asm 2: movdqa <ab7=%xmm7,>t13=%xmm7
  1661. movdqa %xmm7,%xmm7
  1662. # qhasm: float6464 t13 *= *(int128 *)(t2p + 96)
  1663. # asm 1: mulpd 96(<t2p=int64#4),<t13=int6464#8
  1664. # asm 2: mulpd 96(<t2p=%rcx),<t13=%xmm7
  1665. mulpd 96(%rcx),%xmm7
  1666. # qhasm: float6464 r13 +=t13
  1667. # asm 1: addpd <t13=int6464#8,<r13=int6464#3
  1668. # asm 2: addpd <t13=%xmm7,<r13=%xmm2
  1669. addpd %xmm7,%xmm2
  1670. # qhasm: t8 = ab7six
  1671. # asm 1: movdqa <ab7six=int6464#13,>t8=int6464#8
  1672. # asm 2: movdqa <ab7six=%xmm12,>t8=%xmm7
  1673. movdqa %xmm12,%xmm7
  1674. # qhasm: float6464 t8 *= *(int128 *)(t2p + 16)
  1675. # asm 1: mulpd 16(<t2p=int64#4),<t8=int6464#8
  1676. # asm 2: mulpd 16(<t2p=%rcx),<t8=%xmm7
  1677. mulpd 16(%rcx),%xmm7
  1678. # qhasm: float6464 r8 +=t8
  1679. # asm 1: addpd <t8=int6464#8,<r8=int6464#10
  1680. # asm 2: addpd <t8=%xmm7,<r8=%xmm9
  1681. addpd %xmm7,%xmm9
  1682. # qhasm: t9 = ab7six
  1683. # asm 1: movdqa <ab7six=int6464#13,>t9=int6464#8
  1684. # asm 2: movdqa <ab7six=%xmm12,>t9=%xmm7
  1685. movdqa %xmm12,%xmm7
  1686. # qhasm: float6464 t9 *= *(int128 *)(t2p + 32)
  1687. # asm 1: mulpd 32(<t2p=int64#4),<t9=int6464#8
  1688. # asm 2: mulpd 32(<t2p=%rcx),<t9=%xmm7
  1689. mulpd 32(%rcx),%xmm7
  1690. # qhasm: float6464 r9 +=t9
  1691. # asm 1: addpd <t9=int6464#8,<r9=int6464#11
  1692. # asm 2: addpd <t9=%xmm7,<r9=%xmm10
  1693. addpd %xmm7,%xmm10
  1694. # qhasm: t10 = ab7six
  1695. # asm 1: movdqa <ab7six=int6464#13,>t10=int6464#8
  1696. # asm 2: movdqa <ab7six=%xmm12,>t10=%xmm7
  1697. movdqa %xmm12,%xmm7
  1698. # qhasm: float6464 t10 *= *(int128 *)(t2p + 48)
  1699. # asm 1: mulpd 48(<t2p=int64#4),<t10=int6464#8
  1700. # asm 2: mulpd 48(<t2p=%rcx),<t10=%xmm7
  1701. mulpd 48(%rcx),%xmm7
  1702. # qhasm: float6464 r10 +=t10
  1703. # asm 1: addpd <t10=int6464#8,<r10=int6464#12
  1704. # asm 2: addpd <t10=%xmm7,<r10=%xmm11
  1705. addpd %xmm7,%xmm11
  1706. # qhasm: t11 = ab7six
  1707. # asm 1: movdqa <ab7six=int6464#13,>t11=int6464#8
  1708. # asm 2: movdqa <ab7six=%xmm12,>t11=%xmm7
  1709. movdqa %xmm12,%xmm7
  1710. # qhasm: float6464 t11 *= *(int128 *)(t2p + 64)
  1711. # asm 1: mulpd 64(<t2p=int64#4),<t11=int6464#8
  1712. # asm 2: mulpd 64(<t2p=%rcx),<t11=%xmm7
  1713. mulpd 64(%rcx),%xmm7
  1714. # qhasm: float6464 r11 +=t11
  1715. # asm 1: addpd <t11=int6464#8,<r11=int6464#1
  1716. # asm 2: addpd <t11=%xmm7,<r11=%xmm0
  1717. addpd %xmm7,%xmm0
  1718. # qhasm: t12 = ab7six
  1719. # asm 1: movdqa <ab7six=int6464#13,>t12=int6464#8
  1720. # asm 2: movdqa <ab7six=%xmm12,>t12=%xmm7
  1721. movdqa %xmm12,%xmm7
  1722. # qhasm: float6464 t12 *= *(int128 *)(t2p + 80)
  1723. # asm 1: mulpd 80(<t2p=int64#4),<t12=int6464#8
  1724. # asm 2: mulpd 80(<t2p=%rcx),<t12=%xmm7
  1725. mulpd 80(%rcx),%xmm7
  1726. # qhasm: float6464 r12 +=t12
  1727. # asm 1: addpd <t12=int6464#8,<r12=int6464#2
  1728. # asm 2: addpd <t12=%xmm7,<r12=%xmm1
  1729. addpd %xmm7,%xmm1
  1730. # qhasm: t14 = ab7six
  1731. # asm 1: movdqa <ab7six=int6464#13,>t14=int6464#8
  1732. # asm 2: movdqa <ab7six=%xmm12,>t14=%xmm7
  1733. movdqa %xmm12,%xmm7
  1734. # qhasm: float6464 t14 *= *(int128 *)(t2p + 112)
  1735. # asm 1: mulpd 112(<t2p=int64#4),<t14=int6464#8
  1736. # asm 2: mulpd 112(<t2p=%rcx),<t14=%xmm7
  1737. mulpd 112(%rcx),%xmm7
  1738. # qhasm: float6464 r14 +=t14
  1739. # asm 1: addpd <t14=int6464#8,<r14=int6464#4
  1740. # asm 2: addpd <t14=%xmm7,<r14=%xmm3
  1741. addpd %xmm7,%xmm3
  1742. # qhasm: t15 = ab7six
  1743. # asm 1: movdqa <ab7six=int6464#13,>t15=int6464#8
  1744. # asm 2: movdqa <ab7six=%xmm12,>t15=%xmm7
  1745. movdqa %xmm12,%xmm7
  1746. # qhasm: float6464 t15 *= *(int128 *)(t2p + 128)
  1747. # asm 1: mulpd 128(<t2p=int64#4),<t15=int6464#8
  1748. # asm 2: mulpd 128(<t2p=%rcx),<t15=%xmm7
  1749. mulpd 128(%rcx),%xmm7
  1750. # qhasm: float6464 r15 +=t15
  1751. # asm 1: addpd <t15=int6464#8,<r15=int6464#5
  1752. # asm 2: addpd <t15=%xmm7,<r15=%xmm4
  1753. addpd %xmm7,%xmm4
  1754. # qhasm: t16 = ab7six
  1755. # asm 1: movdqa <ab7six=int6464#13,>t16=int6464#8
  1756. # asm 2: movdqa <ab7six=%xmm12,>t16=%xmm7
  1757. movdqa %xmm12,%xmm7
  1758. # qhasm: float6464 t16 *= *(int128 *)(t2p + 144)
  1759. # asm 1: mulpd 144(<t2p=int64#4),<t16=int6464#8
  1760. # asm 2: mulpd 144(<t2p=%rcx),<t16=%xmm7
  1761. mulpd 144(%rcx),%xmm7
  1762. # qhasm: float6464 r16 +=t16
  1763. # asm 1: addpd <t16=int6464#8,<r16=int6464#6
  1764. # asm 2: addpd <t16=%xmm7,<r16=%xmm5
  1765. addpd %xmm7,%xmm5
  1766. # qhasm: t17 = ab7six
  1767. # asm 1: movdqa <ab7six=int6464#13,>t17=int6464#8
  1768. # asm 2: movdqa <ab7six=%xmm12,>t17=%xmm7
  1769. movdqa %xmm12,%xmm7
  1770. # qhasm: float6464 t17 *= *(int128 *)(t2p + 160)
  1771. # asm 1: mulpd 160(<t2p=int64#4),<t17=int6464#8
  1772. # asm 2: mulpd 160(<t2p=%rcx),<t17=%xmm7
  1773. mulpd 160(%rcx),%xmm7
  1774. # qhasm: float6464 r17 +=t17
  1775. # asm 1: addpd <t17=int6464#8,<r17=int6464#7
  1776. # asm 2: addpd <t17=%xmm7,<r17=%xmm6
  1777. addpd %xmm7,%xmm6
  1778. # qhasm: r18 = ab7six
  1779. # asm 1: movdqa <ab7six=int6464#13,>r18=int6464#8
  1780. # asm 2: movdqa <ab7six=%xmm12,>r18=%xmm7
  1781. movdqa %xmm12,%xmm7
  1782. # qhasm: float6464 r18 *= *(int128 *)(t2p + 176)
  1783. # asm 1: mulpd 176(<t2p=int64#4),<r18=int6464#8
  1784. # asm 2: mulpd 176(<t2p=%rcx),<r18=%xmm7
  1785. mulpd 176(%rcx),%xmm7
  1786. # qhasm: *(int128 *)(rp + 112) = r7
  1787. # asm 1: movdqa <r7=int6464#9,112(<rp=int64#3)
  1788. # asm 2: movdqa <r7=%xmm8,112(<rp=%rdx)
  1789. movdqa %xmm8,112(%rdx)
  1790. # qhasm: ab8 = *(int128 *)(t1p + 128)
  1791. # asm 1: movdqa 128(<t1p=int64#2),>ab8=int6464#9
  1792. # asm 2: movdqa 128(<t1p=%rsi),>ab8=%xmm8
  1793. movdqa 128(%rsi),%xmm8
  1794. # qhasm: ab8six = ab8
  1795. # asm 1: movdqa <ab8=int6464#9,>ab8six=int6464#13
  1796. # asm 2: movdqa <ab8=%xmm8,>ab8six=%xmm12
  1797. movdqa %xmm8,%xmm12
  1798. # qhasm: float6464 ab8six *= SIX_SIX
  1799. # asm 1: mulpd SIX_SIX,<ab8six=int6464#13
  1800. # asm 2: mulpd SIX_SIX,<ab8six=%xmm12
  1801. mulpd SIX_SIX,%xmm12
  1802. # qhasm: t8 = ab8
  1803. # asm 1: movdqa <ab8=int6464#9,>t8=int6464#14
  1804. # asm 2: movdqa <ab8=%xmm8,>t8=%xmm13
  1805. movdqa %xmm8,%xmm13
  1806. # qhasm: float6464 t8 *= *(int128 *)(t2p + 0)
  1807. # asm 1: mulpd 0(<t2p=int64#4),<t8=int6464#14
  1808. # asm 2: mulpd 0(<t2p=%rcx),<t8=%xmm13
  1809. mulpd 0(%rcx),%xmm13
  1810. # qhasm: float6464 r8 +=t8
  1811. # asm 1: addpd <t8=int6464#14,<r8=int6464#10
  1812. # asm 2: addpd <t8=%xmm13,<r8=%xmm9
  1813. addpd %xmm13,%xmm9
  1814. # qhasm: t13 = ab8
  1815. # asm 1: movdqa <ab8=int6464#9,>t13=int6464#14
  1816. # asm 2: movdqa <ab8=%xmm8,>t13=%xmm13
  1817. movdqa %xmm8,%xmm13
  1818. # qhasm: float6464 t13 *= *(int128 *)(t2p + 80)
  1819. # asm 1: mulpd 80(<t2p=int64#4),<t13=int6464#14
  1820. # asm 2: mulpd 80(<t2p=%rcx),<t13=%xmm13
  1821. mulpd 80(%rcx),%xmm13
  1822. # qhasm: float6464 r13 +=t13
  1823. # asm 1: addpd <t13=int6464#14,<r13=int6464#3
  1824. # asm 2: addpd <t13=%xmm13,<r13=%xmm2
  1825. addpd %xmm13,%xmm2
  1826. # qhasm: t14 = ab8
  1827. # asm 1: movdqa <ab8=int6464#9,>t14=int6464#14
  1828. # asm 2: movdqa <ab8=%xmm8,>t14=%xmm13
  1829. movdqa %xmm8,%xmm13
  1830. # qhasm: float6464 t14 *= *(int128 *)(t2p + 96)
  1831. # asm 1: mulpd 96(<t2p=int64#4),<t14=int6464#14
  1832. # asm 2: mulpd 96(<t2p=%rcx),<t14=%xmm13
  1833. mulpd 96(%rcx),%xmm13
  1834. # qhasm: float6464 r14 +=t14
  1835. # asm 1: addpd <t14=int6464#14,<r14=int6464#4
  1836. # asm 2: addpd <t14=%xmm13,<r14=%xmm3
  1837. addpd %xmm13,%xmm3
  1838. # qhasm: r19 = ab8
  1839. # asm 1: movdqa <ab8=int6464#9,>r19=int6464#9
  1840. # asm 2: movdqa <ab8=%xmm8,>r19=%xmm8
  1841. movdqa %xmm8,%xmm8
  1842. # qhasm: float6464 r19 *= *(int128 *)(t2p + 176)
  1843. # asm 1: mulpd 176(<t2p=int64#4),<r19=int6464#9
  1844. # asm 2: mulpd 176(<t2p=%rcx),<r19=%xmm8
  1845. mulpd 176(%rcx),%xmm8
  1846. # qhasm: t9 = ab8six
  1847. # asm 1: movdqa <ab8six=int6464#13,>t9=int6464#14
  1848. # asm 2: movdqa <ab8six=%xmm12,>t9=%xmm13
  1849. movdqa %xmm12,%xmm13
  1850. # qhasm: float6464 t9 *= *(int128 *)(t2p + 16)
  1851. # asm 1: mulpd 16(<t2p=int64#4),<t9=int6464#14
  1852. # asm 2: mulpd 16(<t2p=%rcx),<t9=%xmm13
  1853. mulpd 16(%rcx),%xmm13
  1854. # qhasm: float6464 r9 +=t9
  1855. # asm 1: addpd <t9=int6464#14,<r9=int6464#11
  1856. # asm 2: addpd <t9=%xmm13,<r9=%xmm10
  1857. addpd %xmm13,%xmm10
  1858. # qhasm: t10 = ab8six
  1859. # asm 1: movdqa <ab8six=int6464#13,>t10=int6464#14
  1860. # asm 2: movdqa <ab8six=%xmm12,>t10=%xmm13
  1861. movdqa %xmm12,%xmm13
  1862. # qhasm: float6464 t10 *= *(int128 *)(t2p + 32)
  1863. # asm 1: mulpd 32(<t2p=int64#4),<t10=int6464#14
  1864. # asm 2: mulpd 32(<t2p=%rcx),<t10=%xmm13
  1865. mulpd 32(%rcx),%xmm13
  1866. # qhasm: float6464 r10 +=t10
  1867. # asm 1: addpd <t10=int6464#14,<r10=int6464#12
  1868. # asm 2: addpd <t10=%xmm13,<r10=%xmm11
  1869. addpd %xmm13,%xmm11
  1870. # qhasm: t11 = ab8six
  1871. # asm 1: movdqa <ab8six=int6464#13,>t11=int6464#14
  1872. # asm 2: movdqa <ab8six=%xmm12,>t11=%xmm13
  1873. movdqa %xmm12,%xmm13
  1874. # qhasm: float6464 t11 *= *(int128 *)(t2p + 48)
  1875. # asm 1: mulpd 48(<t2p=int64#4),<t11=int6464#14
  1876. # asm 2: mulpd 48(<t2p=%rcx),<t11=%xmm13
  1877. mulpd 48(%rcx),%xmm13
  1878. # qhasm: float6464 r11 +=t11
  1879. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  1880. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  1881. addpd %xmm13,%xmm0
  1882. # qhasm: t12 = ab8six
  1883. # asm 1: movdqa <ab8six=int6464#13,>t12=int6464#14
  1884. # asm 2: movdqa <ab8six=%xmm12,>t12=%xmm13
  1885. movdqa %xmm12,%xmm13
  1886. # qhasm: float6464 t12 *= *(int128 *)(t2p + 64)
  1887. # asm 1: mulpd 64(<t2p=int64#4),<t12=int6464#14
  1888. # asm 2: mulpd 64(<t2p=%rcx),<t12=%xmm13
  1889. mulpd 64(%rcx),%xmm13
  1890. # qhasm: float6464 r12 +=t12
  1891. # asm 1: addpd <t12=int6464#14,<r12=int6464#2
  1892. # asm 2: addpd <t12=%xmm13,<r12=%xmm1
  1893. addpd %xmm13,%xmm1
  1894. # qhasm: t15 = ab8six
  1895. # asm 1: movdqa <ab8six=int6464#13,>t15=int6464#14
  1896. # asm 2: movdqa <ab8six=%xmm12,>t15=%xmm13
  1897. movdqa %xmm12,%xmm13
  1898. # qhasm: float6464 t15 *= *(int128 *)(t2p + 112)
  1899. # asm 1: mulpd 112(<t2p=int64#4),<t15=int6464#14
  1900. # asm 2: mulpd 112(<t2p=%rcx),<t15=%xmm13
  1901. mulpd 112(%rcx),%xmm13
  1902. # qhasm: float6464 r15 +=t15
  1903. # asm 1: addpd <t15=int6464#14,<r15=int6464#5
  1904. # asm 2: addpd <t15=%xmm13,<r15=%xmm4
  1905. addpd %xmm13,%xmm4
  1906. # qhasm: t16 = ab8six
  1907. # asm 1: movdqa <ab8six=int6464#13,>t16=int6464#14
  1908. # asm 2: movdqa <ab8six=%xmm12,>t16=%xmm13
  1909. movdqa %xmm12,%xmm13
  1910. # qhasm: float6464 t16 *= *(int128 *)(t2p + 128)
  1911. # asm 1: mulpd 128(<t2p=int64#4),<t16=int6464#14
  1912. # asm 2: mulpd 128(<t2p=%rcx),<t16=%xmm13
  1913. mulpd 128(%rcx),%xmm13
  1914. # qhasm: float6464 r16 +=t16
  1915. # asm 1: addpd <t16=int6464#14,<r16=int6464#6
  1916. # asm 2: addpd <t16=%xmm13,<r16=%xmm5
  1917. addpd %xmm13,%xmm5
  1918. # qhasm: t17 = ab8six
  1919. # asm 1: movdqa <ab8six=int6464#13,>t17=int6464#14
  1920. # asm 2: movdqa <ab8six=%xmm12,>t17=%xmm13
  1921. movdqa %xmm12,%xmm13
  1922. # qhasm: float6464 t17 *= *(int128 *)(t2p + 144)
  1923. # asm 1: mulpd 144(<t2p=int64#4),<t17=int6464#14
  1924. # asm 2: mulpd 144(<t2p=%rcx),<t17=%xmm13
  1925. mulpd 144(%rcx),%xmm13
  1926. # qhasm: float6464 r17 +=t17
  1927. # asm 1: addpd <t17=int6464#14,<r17=int6464#7
  1928. # asm 2: addpd <t17=%xmm13,<r17=%xmm6
  1929. addpd %xmm13,%xmm6
  1930. # qhasm: t18 = ab8six
  1931. # asm 1: movdqa <ab8six=int6464#13,>t18=int6464#13
  1932. # asm 2: movdqa <ab8six=%xmm12,>t18=%xmm12
  1933. movdqa %xmm12,%xmm12
  1934. # qhasm: float6464 t18 *= *(int128 *)(t2p + 160)
  1935. # asm 1: mulpd 160(<t2p=int64#4),<t18=int6464#13
  1936. # asm 2: mulpd 160(<t2p=%rcx),<t18=%xmm12
  1937. mulpd 160(%rcx),%xmm12
  1938. # qhasm: float6464 r18 +=t18
  1939. # asm 1: addpd <t18=int6464#13,<r18=int6464#8
  1940. # asm 2: addpd <t18=%xmm12,<r18=%xmm7
  1941. addpd %xmm12,%xmm7
  1942. # qhasm: *(int128 *)(rp + 128) = r8
  1943. # asm 1: movdqa <r8=int6464#10,128(<rp=int64#3)
  1944. # asm 2: movdqa <r8=%xmm9,128(<rp=%rdx)
  1945. movdqa %xmm9,128(%rdx)
  1946. # qhasm: ab9 = *(int128 *)(t1p + 144)
  1947. # asm 1: movdqa 144(<t1p=int64#2),>ab9=int6464#10
  1948. # asm 2: movdqa 144(<t1p=%rsi),>ab9=%xmm9
  1949. movdqa 144(%rsi),%xmm9
  1950. # qhasm: ab9six = ab9
  1951. # asm 1: movdqa <ab9=int6464#10,>ab9six=int6464#13
  1952. # asm 2: movdqa <ab9=%xmm9,>ab9six=%xmm12
  1953. movdqa %xmm9,%xmm12
  1954. # qhasm: float6464 ab9six *= SIX_SIX
  1955. # asm 1: mulpd SIX_SIX,<ab9six=int6464#13
  1956. # asm 2: mulpd SIX_SIX,<ab9six=%xmm12
  1957. mulpd SIX_SIX,%xmm12
  1958. # qhasm: t9 = ab9
  1959. # asm 1: movdqa <ab9=int6464#10,>t9=int6464#14
  1960. # asm 2: movdqa <ab9=%xmm9,>t9=%xmm13
  1961. movdqa %xmm9,%xmm13
  1962. # qhasm: float6464 t9 *= *(int128 *)(t2p + 0)
  1963. # asm 1: mulpd 0(<t2p=int64#4),<t9=int6464#14
  1964. # asm 2: mulpd 0(<t2p=%rcx),<t9=%xmm13
  1965. mulpd 0(%rcx),%xmm13
  1966. # qhasm: float6464 r9 +=t9
  1967. # asm 1: addpd <t9=int6464#14,<r9=int6464#11
  1968. # asm 2: addpd <t9=%xmm13,<r9=%xmm10
  1969. addpd %xmm13,%xmm10
  1970. # qhasm: t13 = ab9
  1971. # asm 1: movdqa <ab9=int6464#10,>t13=int6464#14
  1972. # asm 2: movdqa <ab9=%xmm9,>t13=%xmm13
  1973. movdqa %xmm9,%xmm13
  1974. # qhasm: float6464 t13 *= *(int128 *)(t2p + 64)
  1975. # asm 1: mulpd 64(<t2p=int64#4),<t13=int6464#14
  1976. # asm 2: mulpd 64(<t2p=%rcx),<t13=%xmm13
  1977. mulpd 64(%rcx),%xmm13
  1978. # qhasm: float6464 r13 +=t13
  1979. # asm 1: addpd <t13=int6464#14,<r13=int6464#3
  1980. # asm 2: addpd <t13=%xmm13,<r13=%xmm2
  1981. addpd %xmm13,%xmm2
  1982. # qhasm: t14 = ab9
  1983. # asm 1: movdqa <ab9=int6464#10,>t14=int6464#14
  1984. # asm 2: movdqa <ab9=%xmm9,>t14=%xmm13
  1985. movdqa %xmm9,%xmm13
  1986. # qhasm: float6464 t14 *= *(int128 *)(t2p + 80)
  1987. # asm 1: mulpd 80(<t2p=int64#4),<t14=int6464#14
  1988. # asm 2: mulpd 80(<t2p=%rcx),<t14=%xmm13
  1989. mulpd 80(%rcx),%xmm13
  1990. # qhasm: float6464 r14 +=t14
  1991. # asm 1: addpd <t14=int6464#14,<r14=int6464#4
  1992. # asm 2: addpd <t14=%xmm13,<r14=%xmm3
  1993. addpd %xmm13,%xmm3
  1994. # qhasm: t15 = ab9
  1995. # asm 1: movdqa <ab9=int6464#10,>t15=int6464#14
  1996. # asm 2: movdqa <ab9=%xmm9,>t15=%xmm13
  1997. movdqa %xmm9,%xmm13
  1998. # qhasm: float6464 t15 *= *(int128 *)(t2p + 96)
  1999. # asm 1: mulpd 96(<t2p=int64#4),<t15=int6464#14
  2000. # asm 2: mulpd 96(<t2p=%rcx),<t15=%xmm13
  2001. mulpd 96(%rcx),%xmm13
  2002. # qhasm: float6464 r15 +=t15
  2003. # asm 1: addpd <t15=int6464#14,<r15=int6464#5
  2004. # asm 2: addpd <t15=%xmm13,<r15=%xmm4
  2005. addpd %xmm13,%xmm4
  2006. # qhasm: t19 = ab9
  2007. # asm 1: movdqa <ab9=int6464#10,>t19=int6464#14
  2008. # asm 2: movdqa <ab9=%xmm9,>t19=%xmm13
  2009. movdqa %xmm9,%xmm13
  2010. # qhasm: float6464 t19 *= *(int128 *)(t2p + 160)
  2011. # asm 1: mulpd 160(<t2p=int64#4),<t19=int6464#14
  2012. # asm 2: mulpd 160(<t2p=%rcx),<t19=%xmm13
  2013. mulpd 160(%rcx),%xmm13
  2014. # qhasm: float6464 r19 +=t19
  2015. # asm 1: addpd <t19=int6464#14,<r19=int6464#9
  2016. # asm 2: addpd <t19=%xmm13,<r19=%xmm8
  2017. addpd %xmm13,%xmm8
  2018. # qhasm: r20 = ab9
  2019. # asm 1: movdqa <ab9=int6464#10,>r20=int6464#10
  2020. # asm 2: movdqa <ab9=%xmm9,>r20=%xmm9
  2021. movdqa %xmm9,%xmm9
  2022. # qhasm: float6464 r20 *= *(int128 *)(t2p + 176)
  2023. # asm 1: mulpd 176(<t2p=int64#4),<r20=int6464#10
  2024. # asm 2: mulpd 176(<t2p=%rcx),<r20=%xmm9
  2025. mulpd 176(%rcx),%xmm9
  2026. # qhasm: t10 = ab9six
  2027. # asm 1: movdqa <ab9six=int6464#13,>t10=int6464#14
  2028. # asm 2: movdqa <ab9six=%xmm12,>t10=%xmm13
  2029. movdqa %xmm12,%xmm13
  2030. # qhasm: float6464 t10 *= *(int128 *)(t2p + 16)
  2031. # asm 1: mulpd 16(<t2p=int64#4),<t10=int6464#14
  2032. # asm 2: mulpd 16(<t2p=%rcx),<t10=%xmm13
  2033. mulpd 16(%rcx),%xmm13
  2034. # qhasm: float6464 r10 +=t10
  2035. # asm 1: addpd <t10=int6464#14,<r10=int6464#12
  2036. # asm 2: addpd <t10=%xmm13,<r10=%xmm11
  2037. addpd %xmm13,%xmm11
  2038. # qhasm: t11 = ab9six
  2039. # asm 1: movdqa <ab9six=int6464#13,>t11=int6464#14
  2040. # asm 2: movdqa <ab9six=%xmm12,>t11=%xmm13
  2041. movdqa %xmm12,%xmm13
  2042. # qhasm: float6464 t11 *= *(int128 *)(t2p + 32)
  2043. # asm 1: mulpd 32(<t2p=int64#4),<t11=int6464#14
  2044. # asm 2: mulpd 32(<t2p=%rcx),<t11=%xmm13
  2045. mulpd 32(%rcx),%xmm13
  2046. # qhasm: float6464 r11 +=t11
  2047. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  2048. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  2049. addpd %xmm13,%xmm0
  2050. # qhasm: t12 = ab9six
  2051. # asm 1: movdqa <ab9six=int6464#13,>t12=int6464#14
  2052. # asm 2: movdqa <ab9six=%xmm12,>t12=%xmm13
  2053. movdqa %xmm12,%xmm13
  2054. # qhasm: float6464 t12 *= *(int128 *)(t2p + 48)
  2055. # asm 1: mulpd 48(<t2p=int64#4),<t12=int6464#14
  2056. # asm 2: mulpd 48(<t2p=%rcx),<t12=%xmm13
  2057. mulpd 48(%rcx),%xmm13
  2058. # qhasm: float6464 r12 +=t12
  2059. # asm 1: addpd <t12=int6464#14,<r12=int6464#2
  2060. # asm 2: addpd <t12=%xmm13,<r12=%xmm1
  2061. addpd %xmm13,%xmm1
  2062. # qhasm: t16 = ab9six
  2063. # asm 1: movdqa <ab9six=int6464#13,>t16=int6464#14
  2064. # asm 2: movdqa <ab9six=%xmm12,>t16=%xmm13
  2065. movdqa %xmm12,%xmm13
  2066. # qhasm: float6464 t16 *= *(int128 *)(t2p + 112)
  2067. # asm 1: mulpd 112(<t2p=int64#4),<t16=int6464#14
  2068. # asm 2: mulpd 112(<t2p=%rcx),<t16=%xmm13
  2069. mulpd 112(%rcx),%xmm13
  2070. # qhasm: float6464 r16 +=t16
  2071. # asm 1: addpd <t16=int6464#14,<r16=int6464#6
  2072. # asm 2: addpd <t16=%xmm13,<r16=%xmm5
  2073. addpd %xmm13,%xmm5
  2074. # qhasm: t17 = ab9six
  2075. # asm 1: movdqa <ab9six=int6464#13,>t17=int6464#14
  2076. # asm 2: movdqa <ab9six=%xmm12,>t17=%xmm13
  2077. movdqa %xmm12,%xmm13
  2078. # qhasm: float6464 t17 *= *(int128 *)(t2p + 128)
  2079. # asm 1: mulpd 128(<t2p=int64#4),<t17=int6464#14
  2080. # asm 2: mulpd 128(<t2p=%rcx),<t17=%xmm13
  2081. mulpd 128(%rcx),%xmm13
  2082. # qhasm: float6464 r17 +=t17
  2083. # asm 1: addpd <t17=int6464#14,<r17=int6464#7
  2084. # asm 2: addpd <t17=%xmm13,<r17=%xmm6
  2085. addpd %xmm13,%xmm6
  2086. # qhasm: t18 = ab9six
  2087. # asm 1: movdqa <ab9six=int6464#13,>t18=int6464#13
  2088. # asm 2: movdqa <ab9six=%xmm12,>t18=%xmm12
  2089. movdqa %xmm12,%xmm12
  2090. # qhasm: float6464 t18 *= *(int128 *)(t2p + 144)
  2091. # asm 1: mulpd 144(<t2p=int64#4),<t18=int6464#13
  2092. # asm 2: mulpd 144(<t2p=%rcx),<t18=%xmm12
  2093. mulpd 144(%rcx),%xmm12
  2094. # qhasm: float6464 r18 +=t18
  2095. # asm 1: addpd <t18=int6464#13,<r18=int6464#8
  2096. # asm 2: addpd <t18=%xmm12,<r18=%xmm7
  2097. addpd %xmm12,%xmm7
  2098. # qhasm: *(int128 *)(rp + 144) = r9
  2099. # asm 1: movdqa <r9=int6464#11,144(<rp=int64#3)
  2100. # asm 2: movdqa <r9=%xmm10,144(<rp=%rdx)
  2101. movdqa %xmm10,144(%rdx)
  2102. # qhasm: ab10 = *(int128 *)(t1p + 160)
  2103. # asm 1: movdqa 160(<t1p=int64#2),>ab10=int6464#11
  2104. # asm 2: movdqa 160(<t1p=%rsi),>ab10=%xmm10
  2105. movdqa 160(%rsi),%xmm10
  2106. # qhasm: ab10six = ab10
  2107. # asm 1: movdqa <ab10=int6464#11,>ab10six=int6464#13
  2108. # asm 2: movdqa <ab10=%xmm10,>ab10six=%xmm12
  2109. movdqa %xmm10,%xmm12
  2110. # qhasm: float6464 ab10six *= SIX_SIX
  2111. # asm 1: mulpd SIX_SIX,<ab10six=int6464#13
  2112. # asm 2: mulpd SIX_SIX,<ab10six=%xmm12
  2113. mulpd SIX_SIX,%xmm12
  2114. # qhasm: t10 = ab10
  2115. # asm 1: movdqa <ab10=int6464#11,>t10=int6464#14
  2116. # asm 2: movdqa <ab10=%xmm10,>t10=%xmm13
  2117. movdqa %xmm10,%xmm13
  2118. # qhasm: float6464 t10 *= *(int128 *)(t2p + 0)
  2119. # asm 1: mulpd 0(<t2p=int64#4),<t10=int6464#14
  2120. # asm 2: mulpd 0(<t2p=%rcx),<t10=%xmm13
  2121. mulpd 0(%rcx),%xmm13
  2122. # qhasm: float6464 r10 +=t10
  2123. # asm 1: addpd <t10=int6464#14,<r10=int6464#12
  2124. # asm 2: addpd <t10=%xmm13,<r10=%xmm11
  2125. addpd %xmm13,%xmm11
  2126. # qhasm: t13 = ab10
  2127. # asm 1: movdqa <ab10=int6464#11,>t13=int6464#14
  2128. # asm 2: movdqa <ab10=%xmm10,>t13=%xmm13
  2129. movdqa %xmm10,%xmm13
  2130. # qhasm: float6464 t13 *= *(int128 *)(t2p + 48)
  2131. # asm 1: mulpd 48(<t2p=int64#4),<t13=int6464#14
  2132. # asm 2: mulpd 48(<t2p=%rcx),<t13=%xmm13
  2133. mulpd 48(%rcx),%xmm13
  2134. # qhasm: float6464 r13 +=t13
  2135. # asm 1: addpd <t13=int6464#14,<r13=int6464#3
  2136. # asm 2: addpd <t13=%xmm13,<r13=%xmm2
  2137. addpd %xmm13,%xmm2
  2138. # qhasm: t14 = ab10
  2139. # asm 1: movdqa <ab10=int6464#11,>t14=int6464#14
  2140. # asm 2: movdqa <ab10=%xmm10,>t14=%xmm13
  2141. movdqa %xmm10,%xmm13
  2142. # qhasm: float6464 t14 *= *(int128 *)(t2p + 64)
  2143. # asm 1: mulpd 64(<t2p=int64#4),<t14=int6464#14
  2144. # asm 2: mulpd 64(<t2p=%rcx),<t14=%xmm13
  2145. mulpd 64(%rcx),%xmm13
  2146. # qhasm: float6464 r14 +=t14
  2147. # asm 1: addpd <t14=int6464#14,<r14=int6464#4
  2148. # asm 2: addpd <t14=%xmm13,<r14=%xmm3
  2149. addpd %xmm13,%xmm3
  2150. # qhasm: t16 = ab10
  2151. # asm 1: movdqa <ab10=int6464#11,>t16=int6464#14
  2152. # asm 2: movdqa <ab10=%xmm10,>t16=%xmm13
  2153. movdqa %xmm10,%xmm13
  2154. # qhasm: float6464 t16 *= *(int128 *)(t2p + 96)
  2155. # asm 1: mulpd 96(<t2p=int64#4),<t16=int6464#14
  2156. # asm 2: mulpd 96(<t2p=%rcx),<t16=%xmm13
  2157. mulpd 96(%rcx),%xmm13
  2158. # qhasm: float6464 r16 +=t16
  2159. # asm 1: addpd <t16=int6464#14,<r16=int6464#6
  2160. # asm 2: addpd <t16=%xmm13,<r16=%xmm5
  2161. addpd %xmm13,%xmm5
  2162. # qhasm: t15 = ab10
  2163. # asm 1: movdqa <ab10=int6464#11,>t15=int6464#14
  2164. # asm 2: movdqa <ab10=%xmm10,>t15=%xmm13
  2165. movdqa %xmm10,%xmm13
  2166. # qhasm: float6464 t15 *= *(int128 *)(t2p + 80)
  2167. # asm 1: mulpd 80(<t2p=int64#4),<t15=int6464#14
  2168. # asm 2: mulpd 80(<t2p=%rcx),<t15=%xmm13
  2169. mulpd 80(%rcx),%xmm13
  2170. # qhasm: float6464 r15 +=t15
  2171. # asm 1: addpd <t15=int6464#14,<r15=int6464#5
  2172. # asm 2: addpd <t15=%xmm13,<r15=%xmm4
  2173. addpd %xmm13,%xmm4
  2174. # qhasm: t19 = ab10
  2175. # asm 1: movdqa <ab10=int6464#11,>t19=int6464#14
  2176. # asm 2: movdqa <ab10=%xmm10,>t19=%xmm13
  2177. movdqa %xmm10,%xmm13
  2178. # qhasm: float6464 t19 *= *(int128 *)(t2p + 144)
  2179. # asm 1: mulpd 144(<t2p=int64#4),<t19=int6464#14
  2180. # asm 2: mulpd 144(<t2p=%rcx),<t19=%xmm13
  2181. mulpd 144(%rcx),%xmm13
  2182. # qhasm: float6464 r19 +=t19
  2183. # asm 1: addpd <t19=int6464#14,<r19=int6464#9
  2184. # asm 2: addpd <t19=%xmm13,<r19=%xmm8
  2185. addpd %xmm13,%xmm8
  2186. # qhasm: t20 = ab10
  2187. # asm 1: movdqa <ab10=int6464#11,>t20=int6464#14
  2188. # asm 2: movdqa <ab10=%xmm10,>t20=%xmm13
  2189. movdqa %xmm10,%xmm13
  2190. # qhasm: float6464 t20 *= *(int128 *)(t2p + 160)
  2191. # asm 1: mulpd 160(<t2p=int64#4),<t20=int6464#14
  2192. # asm 2: mulpd 160(<t2p=%rcx),<t20=%xmm13
  2193. mulpd 160(%rcx),%xmm13
  2194. # qhasm: float6464 r20 +=t20
  2195. # asm 1: addpd <t20=int6464#14,<r20=int6464#10
  2196. # asm 2: addpd <t20=%xmm13,<r20=%xmm9
  2197. addpd %xmm13,%xmm9
  2198. # qhasm: r21 = ab10
  2199. # asm 1: movdqa <ab10=int6464#11,>r21=int6464#11
  2200. # asm 2: movdqa <ab10=%xmm10,>r21=%xmm10
  2201. movdqa %xmm10,%xmm10
  2202. # qhasm: float6464 r21 *= *(int128 *)(t2p + 176)
  2203. # asm 1: mulpd 176(<t2p=int64#4),<r21=int6464#11
  2204. # asm 2: mulpd 176(<t2p=%rcx),<r21=%xmm10
  2205. mulpd 176(%rcx),%xmm10
  2206. # qhasm: t11 = ab10six
  2207. # asm 1: movdqa <ab10six=int6464#13,>t11=int6464#14
  2208. # asm 2: movdqa <ab10six=%xmm12,>t11=%xmm13
  2209. movdqa %xmm12,%xmm13
  2210. # qhasm: float6464 t11 *= *(int128 *)(t2p + 16)
  2211. # asm 1: mulpd 16(<t2p=int64#4),<t11=int6464#14
  2212. # asm 2: mulpd 16(<t2p=%rcx),<t11=%xmm13
  2213. mulpd 16(%rcx),%xmm13
  2214. # qhasm: float6464 r11 +=t11
  2215. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  2216. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  2217. addpd %xmm13,%xmm0
  2218. # qhasm: t12 = ab10six
  2219. # asm 1: movdqa <ab10six=int6464#13,>t12=int6464#14
  2220. # asm 2: movdqa <ab10six=%xmm12,>t12=%xmm13
  2221. movdqa %xmm12,%xmm13
  2222. # qhasm: float6464 t12 *= *(int128 *)(t2p + 32)
  2223. # asm 1: mulpd 32(<t2p=int64#4),<t12=int6464#14
  2224. # asm 2: mulpd 32(<t2p=%rcx),<t12=%xmm13
  2225. mulpd 32(%rcx),%xmm13
  2226. # qhasm: float6464 r12 +=t12
  2227. # asm 1: addpd <t12=int6464#14,<r12=int6464#2
  2228. # asm 2: addpd <t12=%xmm13,<r12=%xmm1
  2229. addpd %xmm13,%xmm1
  2230. # qhasm: t17 = ab10six
  2231. # asm 1: movdqa <ab10six=int6464#13,>t17=int6464#14
  2232. # asm 2: movdqa <ab10six=%xmm12,>t17=%xmm13
  2233. movdqa %xmm12,%xmm13
  2234. # qhasm: float6464 t17 *= *(int128 *)(t2p + 112)
  2235. # asm 1: mulpd 112(<t2p=int64#4),<t17=int6464#14
  2236. # asm 2: mulpd 112(<t2p=%rcx),<t17=%xmm13
  2237. mulpd 112(%rcx),%xmm13
  2238. # qhasm: float6464 r17 +=t17
  2239. # asm 1: addpd <t17=int6464#14,<r17=int6464#7
  2240. # asm 2: addpd <t17=%xmm13,<r17=%xmm6
  2241. addpd %xmm13,%xmm6
  2242. # qhasm: t18 = ab10six
  2243. # asm 1: movdqa <ab10six=int6464#13,>t18=int6464#13
  2244. # asm 2: movdqa <ab10six=%xmm12,>t18=%xmm12
  2245. movdqa %xmm12,%xmm12
  2246. # qhasm: float6464 t18 *= *(int128 *)(t2p + 128)
  2247. # asm 1: mulpd 128(<t2p=int64#4),<t18=int6464#13
  2248. # asm 2: mulpd 128(<t2p=%rcx),<t18=%xmm12
  2249. mulpd 128(%rcx),%xmm12
  2250. # qhasm: float6464 r18 +=t18
  2251. # asm 1: addpd <t18=int6464#13,<r18=int6464#8
  2252. # asm 2: addpd <t18=%xmm12,<r18=%xmm7
  2253. addpd %xmm12,%xmm7
  2254. # qhasm: *(int128 *)(rp + 160) = r10
  2255. # asm 1: movdqa <r10=int6464#12,160(<rp=int64#3)
  2256. # asm 2: movdqa <r10=%xmm11,160(<rp=%rdx)
  2257. movdqa %xmm11,160(%rdx)
  2258. # qhasm: ab11 = *(int128 *)(t1p + 176)
  2259. # asm 1: movdqa 176(<t1p=int64#2),>ab11=int6464#12
  2260. # asm 2: movdqa 176(<t1p=%rsi),>ab11=%xmm11
  2261. movdqa 176(%rsi),%xmm11
  2262. # qhasm: ab11six = ab11
  2263. # asm 1: movdqa <ab11=int6464#12,>ab11six=int6464#13
  2264. # asm 2: movdqa <ab11=%xmm11,>ab11six=%xmm12
  2265. movdqa %xmm11,%xmm12
  2266. # qhasm: float6464 ab11six *= SIX_SIX
  2267. # asm 1: mulpd SIX_SIX,<ab11six=int6464#13
  2268. # asm 2: mulpd SIX_SIX,<ab11six=%xmm12
  2269. mulpd SIX_SIX,%xmm12
  2270. # qhasm: t11 = ab11
  2271. # asm 1: movdqa <ab11=int6464#12,>t11=int6464#14
  2272. # asm 2: movdqa <ab11=%xmm11,>t11=%xmm13
  2273. movdqa %xmm11,%xmm13
  2274. # qhasm: float6464 t11 *= *(int128 *)(t2p + 0)
  2275. # asm 1: mulpd 0(<t2p=int64#4),<t11=int6464#14
  2276. # asm 2: mulpd 0(<t2p=%rcx),<t11=%xmm13
  2277. mulpd 0(%rcx),%xmm13
  2278. # qhasm: float6464 r11 +=t11
  2279. # asm 1: addpd <t11=int6464#14,<r11=int6464#1
  2280. # asm 2: addpd <t11=%xmm13,<r11=%xmm0
  2281. addpd %xmm13,%xmm0
  2282. # qhasm: t13 = ab11
  2283. # asm 1: movdqa <ab11=int6464#12,>t13=int6464#14
  2284. # asm 2: movdqa <ab11=%xmm11,>t13=%xmm13
  2285. movdqa %xmm11,%xmm13
  2286. # qhasm: float6464 t13 *= *(int128 *)(t2p + 32)
  2287. # asm 1: mulpd 32(<t2p=int64#4),<t13=int6464#14
  2288. # asm 2: mulpd 32(<t2p=%rcx),<t13=%xmm13
  2289. mulpd 32(%rcx),%xmm13
  2290. # qhasm: float6464 r13 +=t13
  2291. # asm 1: addpd <t13=int6464#14,<r13=int6464#3
  2292. # asm 2: addpd <t13=%xmm13,<r13=%xmm2
  2293. addpd %xmm13,%xmm2
  2294. # qhasm: t14 = ab11
  2295. # asm 1: movdqa <ab11=int6464#12,>t14=int6464#14
  2296. # asm 2: movdqa <ab11=%xmm11,>t14=%xmm13
  2297. movdqa %xmm11,%xmm13
  2298. # qhasm: float6464 t14 *= *(int128 *)(t2p + 48)
  2299. # asm 1: mulpd 48(<t2p=int64#4),<t14=int6464#14
  2300. # asm 2: mulpd 48(<t2p=%rcx),<t14=%xmm13
  2301. mulpd 48(%rcx),%xmm13
  2302. # qhasm: float6464 r14 +=t14
  2303. # asm 1: addpd <t14=int6464#14,<r14=int6464#4
  2304. # asm 2: addpd <t14=%xmm13,<r14=%xmm3
  2305. addpd %xmm13,%xmm3
  2306. # qhasm: t15 = ab11
  2307. # asm 1: movdqa <ab11=int6464#12,>t15=int6464#14
  2308. # asm 2: movdqa <ab11=%xmm11,>t15=%xmm13
  2309. movdqa %xmm11,%xmm13
  2310. # qhasm: float6464 t15 *= *(int128 *)(t2p + 64)
  2311. # asm 1: mulpd 64(<t2p=int64#4),<t15=int6464#14
  2312. # asm 2: mulpd 64(<t2p=%rcx),<t15=%xmm13
  2313. mulpd 64(%rcx),%xmm13
  2314. # qhasm: float6464 r15 +=t15
  2315. # asm 1: addpd <t15=int6464#14,<r15=int6464#5
  2316. # asm 2: addpd <t15=%xmm13,<r15=%xmm4
  2317. addpd %xmm13,%xmm4
  2318. # qhasm: t16 = ab11
  2319. # asm 1: movdqa <ab11=int6464#12,>t16=int6464#14
  2320. # asm 2: movdqa <ab11=%xmm11,>t16=%xmm13
  2321. movdqa %xmm11,%xmm13
  2322. # qhasm: float6464 t16 *= *(int128 *)(t2p + 80)
  2323. # asm 1: mulpd 80(<t2p=int64#4),<t16=int6464#14
  2324. # asm 2: mulpd 80(<t2p=%rcx),<t16=%xmm13
  2325. mulpd 80(%rcx),%xmm13
  2326. # qhasm: float6464 r16 +=t16
  2327. # asm 1: addpd <t16=int6464#14,<r16=int6464#6
  2328. # asm 2: addpd <t16=%xmm13,<r16=%xmm5
  2329. addpd %xmm13,%xmm5
  2330. # qhasm: t17 = ab11
  2331. # asm 1: movdqa <ab11=int6464#12,>t17=int6464#14
  2332. # asm 2: movdqa <ab11=%xmm11,>t17=%xmm13
  2333. movdqa %xmm11,%xmm13
  2334. # qhasm: float6464 t17 *= *(int128 *)(t2p + 96)
  2335. # asm 1: mulpd 96(<t2p=int64#4),<t17=int6464#14
  2336. # asm 2: mulpd 96(<t2p=%rcx),<t17=%xmm13
  2337. mulpd 96(%rcx),%xmm13
  2338. # qhasm: float6464 r17 +=t17
  2339. # asm 1: addpd <t17=int6464#14,<r17=int6464#7
  2340. # asm 2: addpd <t17=%xmm13,<r17=%xmm6
  2341. addpd %xmm13,%xmm6
  2342. # qhasm: t19 = ab11
  2343. # asm 1: movdqa <ab11=int6464#12,>t19=int6464#14
  2344. # asm 2: movdqa <ab11=%xmm11,>t19=%xmm13
  2345. movdqa %xmm11,%xmm13
  2346. # qhasm: float6464 t19 *= *(int128 *)(t2p + 128)
  2347. # asm 1: mulpd 128(<t2p=int64#4),<t19=int6464#14
  2348. # asm 2: mulpd 128(<t2p=%rcx),<t19=%xmm13
  2349. mulpd 128(%rcx),%xmm13
  2350. # qhasm: float6464 r19 +=t19
  2351. # asm 1: addpd <t19=int6464#14,<r19=int6464#9
  2352. # asm 2: addpd <t19=%xmm13,<r19=%xmm8
  2353. addpd %xmm13,%xmm8
  2354. # qhasm: t20 = ab11
  2355. # asm 1: movdqa <ab11=int6464#12,>t20=int6464#14
  2356. # asm 2: movdqa <ab11=%xmm11,>t20=%xmm13
  2357. movdqa %xmm11,%xmm13
  2358. # qhasm: float6464 t20 *= *(int128 *)(t2p + 144)
  2359. # asm 1: mulpd 144(<t2p=int64#4),<t20=int6464#14
  2360. # asm 2: mulpd 144(<t2p=%rcx),<t20=%xmm13
  2361. mulpd 144(%rcx),%xmm13
  2362. # qhasm: float6464 r20 +=t20
  2363. # asm 1: addpd <t20=int6464#14,<r20=int6464#10
  2364. # asm 2: addpd <t20=%xmm13,<r20=%xmm9
  2365. addpd %xmm13,%xmm9
  2366. # qhasm: t21 = ab11
  2367. # asm 1: movdqa <ab11=int6464#12,>t21=int6464#14
  2368. # asm 2: movdqa <ab11=%xmm11,>t21=%xmm13
  2369. movdqa %xmm11,%xmm13
  2370. # qhasm: float6464 t21 *= *(int128 *)(t2p + 160)
  2371. # asm 1: mulpd 160(<t2p=int64#4),<t21=int6464#14
  2372. # asm 2: mulpd 160(<t2p=%rcx),<t21=%xmm13
  2373. mulpd 160(%rcx),%xmm13
  2374. # qhasm: float6464 r21 +=t21
  2375. # asm 1: addpd <t21=int6464#14,<r21=int6464#11
  2376. # asm 2: addpd <t21=%xmm13,<r21=%xmm10
  2377. addpd %xmm13,%xmm10
  2378. # qhasm: r22 = ab11
  2379. # asm 1: movdqa <ab11=int6464#12,>r22=int6464#12
  2380. # asm 2: movdqa <ab11=%xmm11,>r22=%xmm11
  2381. movdqa %xmm11,%xmm11
  2382. # qhasm: float6464 r22 *= *(int128 *)(t2p + 176)
  2383. # asm 1: mulpd 176(<t2p=int64#4),<r22=int6464#12
  2384. # asm 2: mulpd 176(<t2p=%rcx),<r22=%xmm11
  2385. mulpd 176(%rcx),%xmm11
  2386. # qhasm: t12 = ab11six
  2387. # asm 1: movdqa <ab11six=int6464#13,>t12=int6464#14
  2388. # asm 2: movdqa <ab11six=%xmm12,>t12=%xmm13
  2389. movdqa %xmm12,%xmm13
  2390. # qhasm: float6464 t12 *= *(int128 *)(t2p + 16)
  2391. # asm 1: mulpd 16(<t2p=int64#4),<t12=int6464#14
  2392. # asm 2: mulpd 16(<t2p=%rcx),<t12=%xmm13
  2393. mulpd 16(%rcx),%xmm13
  2394. # qhasm: float6464 r12 +=t12
  2395. # asm 1: addpd <t12=int6464#14,<r12=int6464#2
  2396. # asm 2: addpd <t12=%xmm13,<r12=%xmm1
  2397. addpd %xmm13,%xmm1
  2398. # qhasm: t18 = ab11six
  2399. # asm 1: movdqa <ab11six=int6464#13,>t18=int6464#13
  2400. # asm 2: movdqa <ab11six=%xmm12,>t18=%xmm12
  2401. movdqa %xmm12,%xmm12
  2402. # qhasm: float6464 t18 *= *(int128 *)(t2p + 112)
  2403. # asm 1: mulpd 112(<t2p=int64#4),<t18=int6464#13
  2404. # asm 2: mulpd 112(<t2p=%rcx),<t18=%xmm12
  2405. mulpd 112(%rcx),%xmm12
  2406. # qhasm: float6464 r18 +=t18
  2407. # asm 1: addpd <t18=int6464#13,<r18=int6464#8
  2408. # asm 2: addpd <t18=%xmm12,<r18=%xmm7
  2409. addpd %xmm12,%xmm7
  2410. # qhasm: *(int128 *)(rp + 176) = r11
  2411. # asm 1: movdqa <r11=int6464#1,176(<rp=int64#3)
  2412. # asm 2: movdqa <r11=%xmm0,176(<rp=%rdx)
  2413. movdqa %xmm0,176(%rdx)
  2414. # qhasm: r0 = *(int128 *)(rp + 0)
  2415. # asm 1: movdqa 0(<rp=int64#3),>r0=int6464#1
  2416. # asm 2: movdqa 0(<rp=%rdx),>r0=%xmm0
  2417. movdqa 0(%rdx),%xmm0
  2418. # qhasm: float6464 r0 -= r12
  2419. # asm 1: subpd <r12=int6464#2,<r0=int6464#1
  2420. # asm 2: subpd <r12=%xmm1,<r0=%xmm0
  2421. subpd %xmm1,%xmm0
  2422. # qhasm: t15 = r15
  2423. # asm 1: movdqa <r15=int6464#5,>t15=int6464#13
  2424. # asm 2: movdqa <r15=%xmm4,>t15=%xmm12
  2425. movdqa %xmm4,%xmm12
  2426. # qhasm: float6464 t15 *= SIX_SIX
  2427. # asm 1: mulpd SIX_SIX,<t15=int6464#13
  2428. # asm 2: mulpd SIX_SIX,<t15=%xmm12
  2429. mulpd SIX_SIX,%xmm12
  2430. # qhasm: float6464 r0 += t15
  2431. # asm 1: addpd <t15=int6464#13,<r0=int6464#1
  2432. # asm 2: addpd <t15=%xmm12,<r0=%xmm0
  2433. addpd %xmm12,%xmm0
  2434. # qhasm: t18 = r18
  2435. # asm 1: movdqa <r18=int6464#8,>t18=int6464#13
  2436. # asm 2: movdqa <r18=%xmm7,>t18=%xmm12
  2437. movdqa %xmm7,%xmm12
  2438. # qhasm: float6464 t18 *= TWO_TWO
  2439. # asm 1: mulpd TWO_TWO,<t18=int6464#13
  2440. # asm 2: mulpd TWO_TWO,<t18=%xmm12
  2441. mulpd TWO_TWO,%xmm12
  2442. # qhasm: float6464 r0 -= t18
  2443. # asm 1: subpd <t18=int6464#13,<r0=int6464#1
  2444. # asm 2: subpd <t18=%xmm12,<r0=%xmm0
  2445. subpd %xmm12,%xmm0
  2446. # qhasm: t21 = r21
  2447. # asm 1: movdqa <r21=int6464#11,>t21=int6464#13
  2448. # asm 2: movdqa <r21=%xmm10,>t21=%xmm12
  2449. movdqa %xmm10,%xmm12
  2450. # qhasm: float6464 t21 *= SIX_SIX
  2451. # asm 1: mulpd SIX_SIX,<t21=int6464#13
  2452. # asm 2: mulpd SIX_SIX,<t21=%xmm12
  2453. mulpd SIX_SIX,%xmm12
  2454. # qhasm: float6464 r0 -= t21
  2455. # asm 1: subpd <t21=int6464#13,<r0=int6464#1
  2456. # asm 2: subpd <t21=%xmm12,<r0=%xmm0
  2457. subpd %xmm12,%xmm0
  2458. # qhasm: r3 = *(int128 *)(rp + 48)
  2459. # asm 1: movdqa 48(<rp=int64#3),>r3=int6464#13
  2460. # asm 2: movdqa 48(<rp=%rdx),>r3=%xmm12
  2461. movdqa 48(%rdx),%xmm12
  2462. # qhasm: float6464 r3 -= r12
  2463. # asm 1: subpd <r12=int6464#2,<r3=int6464#13
  2464. # asm 2: subpd <r12=%xmm1,<r3=%xmm12
  2465. subpd %xmm1,%xmm12
  2466. # qhasm: t15 = r15
  2467. # asm 1: movdqa <r15=int6464#5,>t15=int6464#14
  2468. # asm 2: movdqa <r15=%xmm4,>t15=%xmm13
  2469. movdqa %xmm4,%xmm13
  2470. # qhasm: float6464 t15 *= FIVE_FIVE
  2471. # asm 1: mulpd FIVE_FIVE,<t15=int6464#14
  2472. # asm 2: mulpd FIVE_FIVE,<t15=%xmm13
  2473. mulpd FIVE_FIVE,%xmm13
  2474. # qhasm: float6464 r3 += t15
  2475. # asm 1: addpd <t15=int6464#14,<r3=int6464#13
  2476. # asm 2: addpd <t15=%xmm13,<r3=%xmm12
  2477. addpd %xmm13,%xmm12
  2478. # qhasm: float6464 r3 -= r18
  2479. # asm 1: subpd <r18=int6464#8,<r3=int6464#13
  2480. # asm 2: subpd <r18=%xmm7,<r3=%xmm12
  2481. subpd %xmm7,%xmm12
  2482. # qhasm: t21 = r21
  2483. # asm 1: movdqa <r21=int6464#11,>t21=int6464#14
  2484. # asm 2: movdqa <r21=%xmm10,>t21=%xmm13
  2485. movdqa %xmm10,%xmm13
  2486. # qhasm: float6464 t21 *= EIGHT_EIGHT
  2487. # asm 1: mulpd EIGHT_EIGHT,<t21=int6464#14
  2488. # asm 2: mulpd EIGHT_EIGHT,<t21=%xmm13
  2489. mulpd EIGHT_EIGHT,%xmm13
  2490. # qhasm: float6464 r3 -= t21
  2491. # asm 1: subpd <t21=int6464#14,<r3=int6464#13
  2492. # asm 2: subpd <t21=%xmm13,<r3=%xmm12
  2493. subpd %xmm13,%xmm12
  2494. # qhasm: r6 = *(int128 *)(rp + 96)
  2495. # asm 1: movdqa 96(<rp=int64#3),>r6=int6464#14
  2496. # asm 2: movdqa 96(<rp=%rdx),>r6=%xmm13
  2497. movdqa 96(%rdx),%xmm13
  2498. # qhasm: t12 = r12
  2499. # asm 1: movdqa <r12=int6464#2,>t12=int6464#15
  2500. # asm 2: movdqa <r12=%xmm1,>t12=%xmm14
  2501. movdqa %xmm1,%xmm14
  2502. # qhasm: float6464 t12 *= FOUR_FOUR
  2503. # asm 1: mulpd FOUR_FOUR,<t12=int6464#15
  2504. # asm 2: mulpd FOUR_FOUR,<t12=%xmm14
  2505. mulpd FOUR_FOUR,%xmm14
  2506. # qhasm: float6464 r6 -= t12
  2507. # asm 1: subpd <t12=int6464#15,<r6=int6464#14
  2508. # asm 2: subpd <t12=%xmm14,<r6=%xmm13
  2509. subpd %xmm14,%xmm13
  2510. # qhasm: t15 = r15
  2511. # asm 1: movdqa <r15=int6464#5,>t15=int6464#15
  2512. # asm 2: movdqa <r15=%xmm4,>t15=%xmm14
  2513. movdqa %xmm4,%xmm14
  2514. # qhasm: float6464 t15 *= EIGHTEEN_EIGHTEEN
  2515. # asm 1: mulpd EIGHTEEN_EIGHTEEN,<t15=int6464#15
  2516. # asm 2: mulpd EIGHTEEN_EIGHTEEN,<t15=%xmm14
  2517. mulpd EIGHTEEN_EIGHTEEN,%xmm14
  2518. # qhasm: float6464 r6 += t15
  2519. # asm 1: addpd <t15=int6464#15,<r6=int6464#14
  2520. # asm 2: addpd <t15=%xmm14,<r6=%xmm13
  2521. addpd %xmm14,%xmm13
  2522. # qhasm: t18 = r18
  2523. # asm 1: movdqa <r18=int6464#8,>t18=int6464#15
  2524. # asm 2: movdqa <r18=%xmm7,>t18=%xmm14
  2525. movdqa %xmm7,%xmm14
  2526. # qhasm: float6464 t18 *= THREE_THREE
  2527. # asm 1: mulpd THREE_THREE,<t18=int6464#15
  2528. # asm 2: mulpd THREE_THREE,<t18=%xmm14
  2529. mulpd THREE_THREE,%xmm14
  2530. # qhasm: float6464 r6 -= t18
  2531. # asm 1: subpd <t18=int6464#15,<r6=int6464#14
  2532. # asm 2: subpd <t18=%xmm14,<r6=%xmm13
  2533. subpd %xmm14,%xmm13
  2534. # qhasm: t21 = r21
  2535. # asm 1: movdqa <r21=int6464#11,>t21=int6464#15
  2536. # asm 2: movdqa <r21=%xmm10,>t21=%xmm14
  2537. movdqa %xmm10,%xmm14
  2538. # qhasm: float6464 t21 *= THIRTY_THIRTY
  2539. # asm 1: mulpd THIRTY_THIRTY,<t21=int6464#15
  2540. # asm 2: mulpd THIRTY_THIRTY,<t21=%xmm14
  2541. mulpd THIRTY_THIRTY,%xmm14
  2542. # qhasm: float6464 r6 -= t21
  2543. # asm 1: subpd <t21=int6464#15,<r6=int6464#14
  2544. # asm 2: subpd <t21=%xmm14,<r6=%xmm13
  2545. subpd %xmm14,%xmm13
  2546. # qhasm: r9 = *(int128 *)(rp + 144)
  2547. # asm 1: movdqa 144(<rp=int64#3),>r9=int6464#15
  2548. # asm 2: movdqa 144(<rp=%rdx),>r9=%xmm14
  2549. movdqa 144(%rdx),%xmm14
  2550. # qhasm: float6464 r9 -= r12
  2551. # asm 1: subpd <r12=int6464#2,<r9=int6464#15
  2552. # asm 2: subpd <r12=%xmm1,<r9=%xmm14
  2553. subpd %xmm1,%xmm14
  2554. # qhasm: t15 = r15
  2555. # asm 1: movdqa <r15=int6464#5,>t15=int6464#2
  2556. # asm 2: movdqa <r15=%xmm4,>t15=%xmm1
  2557. movdqa %xmm4,%xmm1
  2558. # qhasm: float6464 t15 *= TWO_TWO
  2559. # asm 1: mulpd TWO_TWO,<t15=int6464#2
  2560. # asm 2: mulpd TWO_TWO,<t15=%xmm1
  2561. mulpd TWO_TWO,%xmm1
  2562. # qhasm: float6464 r9 += t15
  2563. # asm 1: addpd <t15=int6464#2,<r9=int6464#15
  2564. # asm 2: addpd <t15=%xmm1,<r9=%xmm14
  2565. addpd %xmm1,%xmm14
  2566. # qhasm: float6464 r9 += r18
  2567. # asm 1: addpd <r18=int6464#8,<r9=int6464#15
  2568. # asm 2: addpd <r18=%xmm7,<r9=%xmm14
  2569. addpd %xmm7,%xmm14
  2570. # qhasm: t21 = r21
  2571. # asm 1: movdqa <r21=int6464#11,>t21=int6464#2
  2572. # asm 2: movdqa <r21=%xmm10,>t21=%xmm1
  2573. movdqa %xmm10,%xmm1
  2574. # qhasm: float6464 t21 *= NINE_NINE
  2575. # asm 1: mulpd NINE_NINE,<t21=int6464#2
  2576. # asm 2: mulpd NINE_NINE,<t21=%xmm1
  2577. mulpd NINE_NINE,%xmm1
  2578. # qhasm: float6464 r9 -= t21
  2579. # asm 1: subpd <t21=int6464#2,<r9=int6464#15
  2580. # asm 2: subpd <t21=%xmm1,<r9=%xmm14
  2581. subpd %xmm1,%xmm14
  2582. # qhasm: r1 = *(int128 *)(rp + 16)
  2583. # asm 1: movdqa 16(<rp=int64#3),>r1=int6464#2
  2584. # asm 2: movdqa 16(<rp=%rdx),>r1=%xmm1
  2585. movdqa 16(%rdx),%xmm1
  2586. # qhasm: float6464 r1 -= r13
  2587. # asm 1: subpd <r13=int6464#3,<r1=int6464#2
  2588. # asm 2: subpd <r13=%xmm2,<r1=%xmm1
  2589. subpd %xmm2,%xmm1
  2590. # qhasm: float6464 r1 += r16
  2591. # asm 1: addpd <r16=int6464#6,<r1=int6464#2
  2592. # asm 2: addpd <r16=%xmm5,<r1=%xmm1
  2593. addpd %xmm5,%xmm1
  2594. # qhasm: t19 = r19
  2595. # asm 1: movdqa <r19=int6464#9,>t19=int6464#5
  2596. # asm 2: movdqa <r19=%xmm8,>t19=%xmm4
  2597. movdqa %xmm8,%xmm4
  2598. # qhasm: float6464 t19 *= TWO_TWO
  2599. # asm 1: mulpd TWO_TWO,<t19=int6464#5
  2600. # asm 2: mulpd TWO_TWO,<t19=%xmm4
  2601. mulpd TWO_TWO,%xmm4
  2602. # qhasm: float6464 r1 -= t19
  2603. # asm 1: subpd <t19=int6464#5,<r1=int6464#2
  2604. # asm 2: subpd <t19=%xmm4,<r1=%xmm1
  2605. subpd %xmm4,%xmm1
  2606. # qhasm: float6464 r1 -= r22
  2607. # asm 1: subpd <r22=int6464#12,<r1=int6464#2
  2608. # asm 2: subpd <r22=%xmm11,<r1=%xmm1
  2609. subpd %xmm11,%xmm1
  2610. # qhasm: r4 = *(int128 *)(rp + 64)
  2611. # asm 1: movdqa 64(<rp=int64#3),>r4=int6464#5
  2612. # asm 2: movdqa 64(<rp=%rdx),>r4=%xmm4
  2613. movdqa 64(%rdx),%xmm4
  2614. # qhasm: t13 = r13
  2615. # asm 1: movdqa <r13=int6464#3,>t13=int6464#8
  2616. # asm 2: movdqa <r13=%xmm2,>t13=%xmm7
  2617. movdqa %xmm2,%xmm7
  2618. # qhasm: float6464 t13 *= SIX_SIX
  2619. # asm 1: mulpd SIX_SIX,<t13=int6464#8
  2620. # asm 2: mulpd SIX_SIX,<t13=%xmm7
  2621. mulpd SIX_SIX,%xmm7
  2622. # qhasm: float6464 r4 -= t13
  2623. # asm 1: subpd <t13=int6464#8,<r4=int6464#5
  2624. # asm 2: subpd <t13=%xmm7,<r4=%xmm4
  2625. subpd %xmm7,%xmm4
  2626. # qhasm: t16 = r16
  2627. # asm 1: movdqa <r16=int6464#6,>t16=int6464#8
  2628. # asm 2: movdqa <r16=%xmm5,>t16=%xmm7
  2629. movdqa %xmm5,%xmm7
  2630. # qhasm: float6464 t16 *= FIVE_FIVE
  2631. # asm 1: mulpd FIVE_FIVE,<t16=int6464#8
  2632. # asm 2: mulpd FIVE_FIVE,<t16=%xmm7
  2633. mulpd FIVE_FIVE,%xmm7
  2634. # qhasm: float6464 r4 += t16
  2635. # asm 1: addpd <t16=int6464#8,<r4=int6464#5
  2636. # asm 2: addpd <t16=%xmm7,<r4=%xmm4
  2637. addpd %xmm7,%xmm4
  2638. # qhasm: t19 = r19
  2639. # asm 1: movdqa <r19=int6464#9,>t19=int6464#8
  2640. # asm 2: movdqa <r19=%xmm8,>t19=%xmm7
  2641. movdqa %xmm8,%xmm7
  2642. # qhasm: float6464 t19 *= SIX_SIX
  2643. # asm 1: mulpd SIX_SIX,<t19=int6464#8
  2644. # asm 2: mulpd SIX_SIX,<t19=%xmm7
  2645. mulpd SIX_SIX,%xmm7
  2646. # qhasm: float6464 r4 -= t19
  2647. # asm 1: subpd <t19=int6464#8,<r4=int6464#5
  2648. # asm 2: subpd <t19=%xmm7,<r4=%xmm4
  2649. subpd %xmm7,%xmm4
  2650. # qhasm: t22 = r22
  2651. # asm 1: movdqa <r22=int6464#12,>t22=int6464#8
  2652. # asm 2: movdqa <r22=%xmm11,>t22=%xmm7
  2653. movdqa %xmm11,%xmm7
  2654. # qhasm: float6464 t22 *= EIGHT_EIGHT
  2655. # asm 1: mulpd EIGHT_EIGHT,<t22=int6464#8
  2656. # asm 2: mulpd EIGHT_EIGHT,<t22=%xmm7
  2657. mulpd EIGHT_EIGHT,%xmm7
  2658. # qhasm: float6464 r4 -= t22
  2659. # asm 1: subpd <t22=int6464#8,<r4=int6464#5
  2660. # asm 2: subpd <t22=%xmm7,<r4=%xmm4
  2661. subpd %xmm7,%xmm4
  2662. # qhasm: r7 = *(int128 *)(rp + 112)
  2663. # asm 1: movdqa 112(<rp=int64#3),>r7=int6464#8
  2664. # asm 2: movdqa 112(<rp=%rdx),>r7=%xmm7
  2665. movdqa 112(%rdx),%xmm7
  2666. # qhasm: t13 = r13
  2667. # asm 1: movdqa <r13=int6464#3,>t13=int6464#11
  2668. # asm 2: movdqa <r13=%xmm2,>t13=%xmm10
  2669. movdqa %xmm2,%xmm10
  2670. # qhasm: float6464 t13 *= FOUR_FOUR
  2671. # asm 1: mulpd FOUR_FOUR,<t13=int6464#11
  2672. # asm 2: mulpd FOUR_FOUR,<t13=%xmm10
  2673. mulpd FOUR_FOUR,%xmm10
  2674. # qhasm: float6464 r7 -= t13
  2675. # asm 1: subpd <t13=int6464#11,<r7=int6464#8
  2676. # asm 2: subpd <t13=%xmm10,<r7=%xmm7
  2677. subpd %xmm10,%xmm7
  2678. # qhasm: t16 = r16
  2679. # asm 1: movdqa <r16=int6464#6,>t16=int6464#11
  2680. # asm 2: movdqa <r16=%xmm5,>t16=%xmm10
  2681. movdqa %xmm5,%xmm10
  2682. # qhasm: float6464 t16 *= THREE_THREE
  2683. # asm 1: mulpd THREE_THREE,<t16=int6464#11
  2684. # asm 2: mulpd THREE_THREE,<t16=%xmm10
  2685. mulpd THREE_THREE,%xmm10
  2686. # qhasm: float6464 r7 += t16
  2687. # asm 1: addpd <t16=int6464#11,<r7=int6464#8
  2688. # asm 2: addpd <t16=%xmm10,<r7=%xmm7
  2689. addpd %xmm10,%xmm7
  2690. # qhasm: t19 = r19
  2691. # asm 1: movdqa <r19=int6464#9,>t19=int6464#11
  2692. # asm 2: movdqa <r19=%xmm8,>t19=%xmm10
  2693. movdqa %xmm8,%xmm10
  2694. # qhasm: float6464 t19 *= THREE_THREE
  2695. # asm 1: mulpd THREE_THREE,<t19=int6464#11
  2696. # asm 2: mulpd THREE_THREE,<t19=%xmm10
  2697. mulpd THREE_THREE,%xmm10
  2698. # qhasm: float6464 r7 -= t19
  2699. # asm 1: subpd <t19=int6464#11,<r7=int6464#8
  2700. # asm 2: subpd <t19=%xmm10,<r7=%xmm7
  2701. subpd %xmm10,%xmm7
  2702. # qhasm: t22 = r22
  2703. # asm 1: movdqa <r22=int6464#12,>t22=int6464#11
  2704. # asm 2: movdqa <r22=%xmm11,>t22=%xmm10
  2705. movdqa %xmm11,%xmm10
  2706. # qhasm: float6464 t22 *= FIVE_FIVE
  2707. # asm 1: mulpd FIVE_FIVE,<t22=int6464#11
  2708. # asm 2: mulpd FIVE_FIVE,<t22=%xmm10
  2709. mulpd FIVE_FIVE,%xmm10
  2710. # qhasm: float6464 r7 -= t22
  2711. # asm 1: subpd <t22=int6464#11,<r7=int6464#8
  2712. # asm 2: subpd <t22=%xmm10,<r7=%xmm7
  2713. subpd %xmm10,%xmm7
  2714. # qhasm: r10 = *(int128 *)(rp + 160)
  2715. # asm 1: movdqa 160(<rp=int64#3),>r10=int6464#11
  2716. # asm 2: movdqa 160(<rp=%rdx),>r10=%xmm10
  2717. movdqa 160(%rdx),%xmm10
  2718. # qhasm: t13 = r13
  2719. # asm 1: movdqa <r13=int6464#3,>t13=int6464#3
  2720. # asm 2: movdqa <r13=%xmm2,>t13=%xmm2
  2721. movdqa %xmm2,%xmm2
  2722. # qhasm: float6464 t13 *= SIX_SIX
  2723. # asm 1: mulpd SIX_SIX,<t13=int6464#3
  2724. # asm 2: mulpd SIX_SIX,<t13=%xmm2
  2725. mulpd SIX_SIX,%xmm2
  2726. # qhasm: float6464 r10 -= t13
  2727. # asm 1: subpd <t13=int6464#3,<r10=int6464#11
  2728. # asm 2: subpd <t13=%xmm2,<r10=%xmm10
  2729. subpd %xmm2,%xmm10
  2730. # qhasm: t16 = r16
  2731. # asm 1: movdqa <r16=int6464#6,>t16=int6464#3
  2732. # asm 2: movdqa <r16=%xmm5,>t16=%xmm2
  2733. movdqa %xmm5,%xmm2
  2734. # qhasm: float6464 t16 *= TWO_TWO
  2735. # asm 1: mulpd TWO_TWO,<t16=int6464#3
  2736. # asm 2: mulpd TWO_TWO,<t16=%xmm2
  2737. mulpd TWO_TWO,%xmm2
  2738. # qhasm: float6464 r10 += t16
  2739. # asm 1: addpd <t16=int6464#3,<r10=int6464#11
  2740. # asm 2: addpd <t16=%xmm2,<r10=%xmm10
  2741. addpd %xmm2,%xmm10
  2742. # qhasm: t19 = r19
  2743. # asm 1: movdqa <r19=int6464#9,>t19=int6464#3
  2744. # asm 2: movdqa <r19=%xmm8,>t19=%xmm2
  2745. movdqa %xmm8,%xmm2
  2746. # qhasm: float6464 t19 *= SIX_SIX
  2747. # asm 1: mulpd SIX_SIX,<t19=int6464#3
  2748. # asm 2: mulpd SIX_SIX,<t19=%xmm2
  2749. mulpd SIX_SIX,%xmm2
  2750. # qhasm: float6464 r10 += t19
  2751. # asm 1: addpd <t19=int6464#3,<r10=int6464#11
  2752. # asm 2: addpd <t19=%xmm2,<r10=%xmm10
  2753. addpd %xmm2,%xmm10
  2754. # qhasm: t22 = r22
  2755. # asm 1: movdqa <r22=int6464#12,>t22=int6464#3
  2756. # asm 2: movdqa <r22=%xmm11,>t22=%xmm2
  2757. movdqa %xmm11,%xmm2
  2758. # qhasm: float6464 t22 *= NINE_NINE
  2759. # asm 1: mulpd NINE_NINE,<t22=int6464#3
  2760. # asm 2: mulpd NINE_NINE,<t22=%xmm2
  2761. mulpd NINE_NINE,%xmm2
  2762. # qhasm: float6464 r10 -= t22
  2763. # asm 1: subpd <t22=int6464#3,<r10=int6464#11
  2764. # asm 2: subpd <t22=%xmm2,<r10=%xmm10
  2765. subpd %xmm2,%xmm10
  2766. # qhasm: r2 = *(int128 *)(rp + 32)
  2767. # asm 1: movdqa 32(<rp=int64#3),>r2=int6464#3
  2768. # asm 2: movdqa 32(<rp=%rdx),>r2=%xmm2
  2769. movdqa 32(%rdx),%xmm2
  2770. # qhasm: float6464 r2 -= r14
  2771. # asm 1: subpd <r14=int6464#4,<r2=int6464#3
  2772. # asm 2: subpd <r14=%xmm3,<r2=%xmm2
  2773. subpd %xmm3,%xmm2
  2774. # qhasm: float6464 r2 += r17
  2775. # asm 1: addpd <r17=int6464#7,<r2=int6464#3
  2776. # asm 2: addpd <r17=%xmm6,<r2=%xmm2
  2777. addpd %xmm6,%xmm2
  2778. # qhasm: t20 = r20
  2779. # asm 1: movdqa <r20=int6464#10,>t20=int6464#6
  2780. # asm 2: movdqa <r20=%xmm9,>t20=%xmm5
  2781. movdqa %xmm9,%xmm5
  2782. # qhasm: float6464 t20 *= TWO_TWO
  2783. # asm 1: mulpd TWO_TWO,<t20=int6464#6
  2784. # asm 2: mulpd TWO_TWO,<t20=%xmm5
  2785. mulpd TWO_TWO,%xmm5
  2786. # qhasm: float6464 r2 -= t20
  2787. # asm 1: subpd <t20=int6464#6,<r2=int6464#3
  2788. # asm 2: subpd <t20=%xmm5,<r2=%xmm2
  2789. subpd %xmm5,%xmm2
  2790. # qhasm: r5 = *(int128 *)(rp + 80)
  2791. # asm 1: movdqa 80(<rp=int64#3),>r5=int6464#6
  2792. # asm 2: movdqa 80(<rp=%rdx),>r5=%xmm5
  2793. movdqa 80(%rdx),%xmm5
  2794. # qhasm: t14 = r14
  2795. # asm 1: movdqa <r14=int6464#4,>t14=int6464#9
  2796. # asm 2: movdqa <r14=%xmm3,>t14=%xmm8
  2797. movdqa %xmm3,%xmm8
  2798. # qhasm: float6464 t14 *= SIX_SIX
  2799. # asm 1: mulpd SIX_SIX,<t14=int6464#9
  2800. # asm 2: mulpd SIX_SIX,<t14=%xmm8
  2801. mulpd SIX_SIX,%xmm8
  2802. # qhasm: float6464 r5 -= t14
  2803. # asm 1: subpd <t14=int6464#9,<r5=int6464#6
  2804. # asm 2: subpd <t14=%xmm8,<r5=%xmm5
  2805. subpd %xmm8,%xmm5
  2806. # qhasm: t17 = r17
  2807. # asm 1: movdqa <r17=int6464#7,>t17=int6464#9
  2808. # asm 2: movdqa <r17=%xmm6,>t17=%xmm8
  2809. movdqa %xmm6,%xmm8
  2810. # qhasm: float6464 t17 *= FIVE_FIVE
  2811. # asm 1: mulpd FIVE_FIVE,<t17=int6464#9
  2812. # asm 2: mulpd FIVE_FIVE,<t17=%xmm8
  2813. mulpd FIVE_FIVE,%xmm8
  2814. # qhasm: float6464 r5 += t17
  2815. # asm 1: addpd <t17=int6464#9,<r5=int6464#6
  2816. # asm 2: addpd <t17=%xmm8,<r5=%xmm5
  2817. addpd %xmm8,%xmm5
  2818. # qhasm: t20 = r20
  2819. # asm 1: movdqa <r20=int6464#10,>t20=int6464#9
  2820. # asm 2: movdqa <r20=%xmm9,>t20=%xmm8
  2821. movdqa %xmm9,%xmm8
  2822. # qhasm: float6464 t20 *= SIX_SIX
  2823. # asm 1: mulpd SIX_SIX,<t20=int6464#9
  2824. # asm 2: mulpd SIX_SIX,<t20=%xmm8
  2825. mulpd SIX_SIX,%xmm8
  2826. # qhasm: float6464 r5 -= t20
  2827. # asm 1: subpd <t20=int6464#9,<r5=int6464#6
  2828. # asm 2: subpd <t20=%xmm8,<r5=%xmm5
  2829. subpd %xmm8,%xmm5
  2830. # qhasm: r8 = *(int128 *)(rp + 128)
  2831. # asm 1: movdqa 128(<rp=int64#3),>r8=int6464#9
  2832. # asm 2: movdqa 128(<rp=%rdx),>r8=%xmm8
  2833. movdqa 128(%rdx),%xmm8
  2834. # qhasm: t14 = r14
  2835. # asm 1: movdqa <r14=int6464#4,>t14=int6464#12
  2836. # asm 2: movdqa <r14=%xmm3,>t14=%xmm11
  2837. movdqa %xmm3,%xmm11
  2838. # qhasm: float6464 t14 *= FOUR_FOUR
  2839. # asm 1: mulpd FOUR_FOUR,<t14=int6464#12
  2840. # asm 2: mulpd FOUR_FOUR,<t14=%xmm11
  2841. mulpd FOUR_FOUR,%xmm11
  2842. # qhasm: float6464 r8 -= t14
  2843. # asm 1: subpd <t14=int6464#12,<r8=int6464#9
  2844. # asm 2: subpd <t14=%xmm11,<r8=%xmm8
  2845. subpd %xmm11,%xmm8
  2846. # qhasm: t17 = r17
  2847. # asm 1: movdqa <r17=int6464#7,>t17=int6464#12
  2848. # asm 2: movdqa <r17=%xmm6,>t17=%xmm11
  2849. movdqa %xmm6,%xmm11
  2850. # qhasm: float6464 t17 *= THREE_THREE
  2851. # asm 1: mulpd THREE_THREE,<t17=int6464#12
  2852. # asm 2: mulpd THREE_THREE,<t17=%xmm11
  2853. mulpd THREE_THREE,%xmm11
  2854. # qhasm: float6464 r8 += t17
  2855. # asm 1: addpd <t17=int6464#12,<r8=int6464#9
  2856. # asm 2: addpd <t17=%xmm11,<r8=%xmm8
  2857. addpd %xmm11,%xmm8
  2858. # qhasm: t20 = r20
  2859. # asm 1: movdqa <r20=int6464#10,>t20=int6464#12
  2860. # asm 2: movdqa <r20=%xmm9,>t20=%xmm11
  2861. movdqa %xmm9,%xmm11
  2862. # qhasm: float6464 t20 *= THREE_THREE
  2863. # asm 1: mulpd THREE_THREE,<t20=int6464#12
  2864. # asm 2: mulpd THREE_THREE,<t20=%xmm11
  2865. mulpd THREE_THREE,%xmm11
  2866. # qhasm: float6464 r8 -= t20
  2867. # asm 1: subpd <t20=int6464#12,<r8=int6464#9
  2868. # asm 2: subpd <t20=%xmm11,<r8=%xmm8
  2869. subpd %xmm11,%xmm8
  2870. # qhasm: r11 = *(int128 *)(rp + 176)
  2871. # asm 1: movdqa 176(<rp=int64#3),>r11=int6464#12
  2872. # asm 2: movdqa 176(<rp=%rdx),>r11=%xmm11
  2873. movdqa 176(%rdx),%xmm11
  2874. # qhasm: t14 = r14
  2875. # asm 1: movdqa <r14=int6464#4,>t14=int6464#4
  2876. # asm 2: movdqa <r14=%xmm3,>t14=%xmm3
  2877. movdqa %xmm3,%xmm3
  2878. # qhasm: float6464 t14 *= SIX_SIX
  2879. # asm 1: mulpd SIX_SIX,<t14=int6464#4
  2880. # asm 2: mulpd SIX_SIX,<t14=%xmm3
  2881. mulpd SIX_SIX,%xmm3
  2882. # qhasm: float6464 r11 -= t14
  2883. # asm 1: subpd <t14=int6464#4,<r11=int6464#12
  2884. # asm 2: subpd <t14=%xmm3,<r11=%xmm11
  2885. subpd %xmm3,%xmm11
  2886. # qhasm: t17 = r17
  2887. # asm 1: movdqa <r17=int6464#7,>t17=int6464#4
  2888. # asm 2: movdqa <r17=%xmm6,>t17=%xmm3
  2889. movdqa %xmm6,%xmm3
  2890. # qhasm: float6464 t17 *= TWO_TWO
  2891. # asm 1: mulpd TWO_TWO,<t17=int6464#4
  2892. # asm 2: mulpd TWO_TWO,<t17=%xmm3
  2893. mulpd TWO_TWO,%xmm3
  2894. # qhasm: float6464 r11 += t17
  2895. # asm 1: addpd <t17=int6464#4,<r11=int6464#12
  2896. # asm 2: addpd <t17=%xmm3,<r11=%xmm11
  2897. addpd %xmm3,%xmm11
  2898. # qhasm: t20 = r20
  2899. # asm 1: movdqa <r20=int6464#10,>t20=int6464#4
  2900. # asm 2: movdqa <r20=%xmm9,>t20=%xmm3
  2901. movdqa %xmm9,%xmm3
  2902. # qhasm: float6464 t20 *= SIX_SIX
  2903. # asm 1: mulpd SIX_SIX,<t20=int6464#4
  2904. # asm 2: mulpd SIX_SIX,<t20=%xmm3
  2905. mulpd SIX_SIX,%xmm3
  2906. # qhasm: float6464 r11 += t20
  2907. # asm 1: addpd <t20=int6464#4,<r11=int6464#12
  2908. # asm 2: addpd <t20=%xmm3,<r11=%xmm11
  2909. addpd %xmm3,%xmm11
  2910. # qhasm: round = ROUND_ROUND
  2911. # asm 1: movdqa ROUND_ROUND,<round=int6464#4
  2912. # asm 2: movdqa ROUND_ROUND,<round=%xmm3
  2913. movdqa ROUND_ROUND,%xmm3
  2914. # qhasm: carry = r1
  2915. # asm 1: movdqa <r1=int6464#2,>carry=int6464#7
  2916. # asm 2: movdqa <r1=%xmm1,>carry=%xmm6
  2917. movdqa %xmm1,%xmm6
  2918. # qhasm: float6464 carry *= VINV_VINV
  2919. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  2920. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  2921. mulpd VINV_VINV,%xmm6
  2922. # qhasm: float6464 carry += round
  2923. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  2924. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  2925. addpd %xmm3,%xmm6
  2926. # qhasm: float6464 carry -= round
  2927. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  2928. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  2929. subpd %xmm3,%xmm6
  2930. # qhasm: float6464 r2 += carry
  2931. # asm 1: addpd <carry=int6464#7,<r2=int6464#3
  2932. # asm 2: addpd <carry=%xmm6,<r2=%xmm2
  2933. addpd %xmm6,%xmm2
  2934. # qhasm: float6464 carry *= V_V
  2935. # asm 1: mulpd V_V,<carry=int6464#7
  2936. # asm 2: mulpd V_V,<carry=%xmm6
  2937. mulpd V_V,%xmm6
  2938. # qhasm: float6464 r1 -= carry
  2939. # asm 1: subpd <carry=int6464#7,<r1=int6464#2
  2940. # asm 2: subpd <carry=%xmm6,<r1=%xmm1
  2941. subpd %xmm6,%xmm1
  2942. # qhasm: carry = r4
  2943. # asm 1: movdqa <r4=int6464#5,>carry=int6464#7
  2944. # asm 2: movdqa <r4=%xmm4,>carry=%xmm6
  2945. movdqa %xmm4,%xmm6
  2946. # qhasm: float6464 carry *= VINV_VINV
  2947. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  2948. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  2949. mulpd VINV_VINV,%xmm6
  2950. # qhasm: float6464 carry += round
  2951. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  2952. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  2953. addpd %xmm3,%xmm6
  2954. # qhasm: float6464 carry -= round
  2955. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  2956. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  2957. subpd %xmm3,%xmm6
  2958. # qhasm: float6464 r5 += carry
  2959. # asm 1: addpd <carry=int6464#7,<r5=int6464#6
  2960. # asm 2: addpd <carry=%xmm6,<r5=%xmm5
  2961. addpd %xmm6,%xmm5
  2962. # qhasm: float6464 carry *= V_V
  2963. # asm 1: mulpd V_V,<carry=int6464#7
  2964. # asm 2: mulpd V_V,<carry=%xmm6
  2965. mulpd V_V,%xmm6
  2966. # qhasm: float6464 r4 -= carry
  2967. # asm 1: subpd <carry=int6464#7,<r4=int6464#5
  2968. # asm 2: subpd <carry=%xmm6,<r4=%xmm4
  2969. subpd %xmm6,%xmm4
  2970. # qhasm: carry = r7
  2971. # asm 1: movdqa <r7=int6464#8,>carry=int6464#7
  2972. # asm 2: movdqa <r7=%xmm7,>carry=%xmm6
  2973. movdqa %xmm7,%xmm6
  2974. # qhasm: float6464 carry *= VINV_VINV
  2975. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  2976. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  2977. mulpd VINV_VINV,%xmm6
  2978. # qhasm: float6464 carry += round
  2979. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  2980. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  2981. addpd %xmm3,%xmm6
  2982. # qhasm: float6464 carry -= round
  2983. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  2984. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  2985. subpd %xmm3,%xmm6
  2986. # qhasm: float6464 r8 += carry
  2987. # asm 1: addpd <carry=int6464#7,<r8=int6464#9
  2988. # asm 2: addpd <carry=%xmm6,<r8=%xmm8
  2989. addpd %xmm6,%xmm8
  2990. # qhasm: float6464 carry *= V_V
  2991. # asm 1: mulpd V_V,<carry=int6464#7
  2992. # asm 2: mulpd V_V,<carry=%xmm6
  2993. mulpd V_V,%xmm6
  2994. # qhasm: float6464 r7 -= carry
  2995. # asm 1: subpd <carry=int6464#7,<r7=int6464#8
  2996. # asm 2: subpd <carry=%xmm6,<r7=%xmm7
  2997. subpd %xmm6,%xmm7
  2998. # qhasm: carry = r10
  2999. # asm 1: movdqa <r10=int6464#11,>carry=int6464#7
  3000. # asm 2: movdqa <r10=%xmm10,>carry=%xmm6
  3001. movdqa %xmm10,%xmm6
  3002. # qhasm: float6464 carry *= VINV_VINV
  3003. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  3004. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  3005. mulpd VINV_VINV,%xmm6
  3006. # qhasm: float6464 carry += round
  3007. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  3008. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  3009. addpd %xmm3,%xmm6
  3010. # qhasm: float6464 carry -= round
  3011. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  3012. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  3013. subpd %xmm3,%xmm6
  3014. # qhasm: float6464 r11 += carry
  3015. # asm 1: addpd <carry=int6464#7,<r11=int6464#12
  3016. # asm 2: addpd <carry=%xmm6,<r11=%xmm11
  3017. addpd %xmm6,%xmm11
  3018. # qhasm: float6464 carry *= V_V
  3019. # asm 1: mulpd V_V,<carry=int6464#7
  3020. # asm 2: mulpd V_V,<carry=%xmm6
  3021. mulpd V_V,%xmm6
  3022. # qhasm: float6464 r10 -= carry
  3023. # asm 1: subpd <carry=int6464#7,<r10=int6464#11
  3024. # asm 2: subpd <carry=%xmm6,<r10=%xmm10
  3025. subpd %xmm6,%xmm10
  3026. # qhasm: carry = r2
  3027. # asm 1: movdqa <r2=int6464#3,>carry=int6464#7
  3028. # asm 2: movdqa <r2=%xmm2,>carry=%xmm6
  3029. movdqa %xmm2,%xmm6
  3030. # qhasm: float6464 carry *= VINV_VINV
  3031. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  3032. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  3033. mulpd VINV_VINV,%xmm6
  3034. # qhasm: float6464 carry += round
  3035. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  3036. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  3037. addpd %xmm3,%xmm6
  3038. # qhasm: float6464 carry -= round
  3039. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  3040. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  3041. subpd %xmm3,%xmm6
  3042. # qhasm: float6464 r3 += carry
  3043. # asm 1: addpd <carry=int6464#7,<r3=int6464#13
  3044. # asm 2: addpd <carry=%xmm6,<r3=%xmm12
  3045. addpd %xmm6,%xmm12
  3046. # qhasm: float6464 carry *= V_V
  3047. # asm 1: mulpd V_V,<carry=int6464#7
  3048. # asm 2: mulpd V_V,<carry=%xmm6
  3049. mulpd V_V,%xmm6
  3050. # qhasm: float6464 r2 -= carry
  3051. # asm 1: subpd <carry=int6464#7,<r2=int6464#3
  3052. # asm 2: subpd <carry=%xmm6,<r2=%xmm2
  3053. subpd %xmm6,%xmm2
  3054. # qhasm: carry = r5
  3055. # asm 1: movdqa <r5=int6464#6,>carry=int6464#7
  3056. # asm 2: movdqa <r5=%xmm5,>carry=%xmm6
  3057. movdqa %xmm5,%xmm6
  3058. # qhasm: float6464 carry *= VINV_VINV
  3059. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  3060. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  3061. mulpd VINV_VINV,%xmm6
  3062. # qhasm: float6464 carry += round
  3063. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  3064. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  3065. addpd %xmm3,%xmm6
  3066. # qhasm: float6464 carry -= round
  3067. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  3068. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  3069. subpd %xmm3,%xmm6
  3070. # qhasm: float6464 r6 += carry
  3071. # asm 1: addpd <carry=int6464#7,<r6=int6464#14
  3072. # asm 2: addpd <carry=%xmm6,<r6=%xmm13
  3073. addpd %xmm6,%xmm13
  3074. # qhasm: float6464 carry *= V_V
  3075. # asm 1: mulpd V_V,<carry=int6464#7
  3076. # asm 2: mulpd V_V,<carry=%xmm6
  3077. mulpd V_V,%xmm6
  3078. # qhasm: float6464 r5 -= carry
  3079. # asm 1: subpd <carry=int6464#7,<r5=int6464#6
  3080. # asm 2: subpd <carry=%xmm6,<r5=%xmm5
  3081. subpd %xmm6,%xmm5
  3082. # qhasm: carry = r8
  3083. # asm 1: movdqa <r8=int6464#9,>carry=int6464#7
  3084. # asm 2: movdqa <r8=%xmm8,>carry=%xmm6
  3085. movdqa %xmm8,%xmm6
  3086. # qhasm: float6464 carry *= VINV_VINV
  3087. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  3088. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  3089. mulpd VINV_VINV,%xmm6
  3090. # qhasm: float6464 carry += round
  3091. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  3092. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  3093. addpd %xmm3,%xmm6
  3094. # qhasm: float6464 carry -= round
  3095. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  3096. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  3097. subpd %xmm3,%xmm6
  3098. # qhasm: float6464 r9 += carry
  3099. # asm 1: addpd <carry=int6464#7,<r9=int6464#15
  3100. # asm 2: addpd <carry=%xmm6,<r9=%xmm14
  3101. addpd %xmm6,%xmm14
  3102. # qhasm: float6464 carry *= V_V
  3103. # asm 1: mulpd V_V,<carry=int6464#7
  3104. # asm 2: mulpd V_V,<carry=%xmm6
  3105. mulpd V_V,%xmm6
  3106. # qhasm: float6464 r8 -= carry
  3107. # asm 1: subpd <carry=int6464#7,<r8=int6464#9
  3108. # asm 2: subpd <carry=%xmm6,<r8=%xmm8
  3109. subpd %xmm6,%xmm8
  3110. # qhasm: carry = r11
  3111. # asm 1: movdqa <r11=int6464#12,>carry=int6464#7
  3112. # asm 2: movdqa <r11=%xmm11,>carry=%xmm6
  3113. movdqa %xmm11,%xmm6
  3114. # qhasm: float6464 carry *= VINV_VINV
  3115. # asm 1: mulpd VINV_VINV,<carry=int6464#7
  3116. # asm 2: mulpd VINV_VINV,<carry=%xmm6
  3117. mulpd VINV_VINV,%xmm6
  3118. # qhasm: float6464 carry += round
  3119. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  3120. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  3121. addpd %xmm3,%xmm6
  3122. # qhasm: float6464 carry -= round
  3123. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  3124. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  3125. subpd %xmm3,%xmm6
  3126. # qhasm: float6464 r0 -= carry
  3127. # asm 1: subpd <carry=int6464#7,<r0=int6464#1
  3128. # asm 2: subpd <carry=%xmm6,<r0=%xmm0
  3129. subpd %xmm6,%xmm0
  3130. # qhasm: float6464 r3 -= carry
  3131. # asm 1: subpd <carry=int6464#7,<r3=int6464#13
  3132. # asm 2: subpd <carry=%xmm6,<r3=%xmm12
  3133. subpd %xmm6,%xmm12
  3134. # qhasm: 2t6 = carry
  3135. # asm 1: movdqa <carry=int6464#7,>2t6=int6464#10
  3136. # asm 2: movdqa <carry=%xmm6,>2t6=%xmm9
  3137. movdqa %xmm6,%xmm9
  3138. # qhasm: float6464 2t6 *= FOUR_FOUR
  3139. # asm 1: mulpd FOUR_FOUR,<2t6=int6464#10
  3140. # asm 2: mulpd FOUR_FOUR,<2t6=%xmm9
  3141. mulpd FOUR_FOUR,%xmm9
  3142. # qhasm: float6464 r6 -= 2t6
  3143. # asm 1: subpd <2t6=int6464#10,<r6=int6464#14
  3144. # asm 2: subpd <2t6=%xmm9,<r6=%xmm13
  3145. subpd %xmm9,%xmm13
  3146. # qhasm: float6464 r9 -= carry
  3147. # asm 1: subpd <carry=int6464#7,<r9=int6464#15
  3148. # asm 2: subpd <carry=%xmm6,<r9=%xmm14
  3149. subpd %xmm6,%xmm14
  3150. # qhasm: float6464 carry *= V_V
  3151. # asm 1: mulpd V_V,<carry=int6464#7
  3152. # asm 2: mulpd V_V,<carry=%xmm6
  3153. mulpd V_V,%xmm6
  3154. # qhasm: float6464 r11 -= carry
  3155. # asm 1: subpd <carry=int6464#7,<r11=int6464#12
  3156. # asm 2: subpd <carry=%xmm6,<r11=%xmm11
  3157. subpd %xmm6,%xmm11
  3158. # qhasm: carry = r0
  3159. # asm 1: movdqa <r0=int6464#1,>carry=int6464#7
  3160. # asm 2: movdqa <r0=%xmm0,>carry=%xmm6
  3161. movdqa %xmm0,%xmm6
  3162. # qhasm: float6464 carry *= V6INV_V6INV
  3163. # asm 1: mulpd V6INV_V6INV,<carry=int6464#7
  3164. # asm 2: mulpd V6INV_V6INV,<carry=%xmm6
  3165. mulpd V6INV_V6INV,%xmm6
  3166. # qhasm: float6464 carry += round
  3167. # asm 1: addpd <round=int6464#4,<carry=int6464#7
  3168. # asm 2: addpd <round=%xmm3,<carry=%xmm6
  3169. addpd %xmm3,%xmm6
  3170. # qhasm: float6464 carry -= round
  3171. # asm 1: subpd <round=int6464#4,<carry=int6464#7
  3172. # asm 2: subpd <round=%xmm3,<carry=%xmm6
  3173. subpd %xmm3,%xmm6
  3174. # qhasm: float6464 r1 += carry
  3175. # asm 1: addpd <carry=int6464#7,<r1=int6464#2
  3176. # asm 2: addpd <carry=%xmm6,<r1=%xmm1
  3177. addpd %xmm6,%xmm1
  3178. # qhasm: float6464 carry *= V6_V6
  3179. # asm 1: mulpd V6_V6,<carry=int6464#7
  3180. # asm 2: mulpd V6_V6,<carry=%xmm6
  3181. mulpd V6_V6,%xmm6
  3182. # qhasm: float6464 r0 -= carry
  3183. # asm 1: subpd <carry=int6464#7,<r0=int6464#1
  3184. # asm 2: subpd <carry=%xmm6,<r0=%xmm0
  3185. subpd %xmm6,%xmm0
  3186. # qhasm: *(int128 *)(rop + 0) = r0
  3187. # asm 1: movdqa <r0=int6464#1,0(<rop=int64#1)
  3188. # asm 2: movdqa <r0=%xmm0,0(<rop=%rdi)
  3189. movdqa %xmm0,0(%rdi)
  3190. # qhasm: carry = r3
  3191. # asm 1: movdqa <r3=int6464#13,>carry=int6464#1
  3192. # asm 2: movdqa <r3=%xmm12,>carry=%xmm0
  3193. movdqa %xmm12,%xmm0
  3194. # qhasm: float6464 carry *= VINV_VINV
  3195. # asm 1: mulpd VINV_VINV,<carry=int6464#1
  3196. # asm 2: mulpd VINV_VINV,<carry=%xmm0
  3197. mulpd VINV_VINV,%xmm0
  3198. # qhasm: float6464 carry += round
  3199. # asm 1: addpd <round=int6464#4,<carry=int6464#1
  3200. # asm 2: addpd <round=%xmm3,<carry=%xmm0
  3201. addpd %xmm3,%xmm0
  3202. # qhasm: float6464 carry -= round
  3203. # asm 1: subpd <round=int6464#4,<carry=int6464#1
  3204. # asm 2: subpd <round=%xmm3,<carry=%xmm0
  3205. subpd %xmm3,%xmm0
  3206. # qhasm: float6464 r4 += carry
  3207. # asm 1: addpd <carry=int6464#1,<r4=int6464#5
  3208. # asm 2: addpd <carry=%xmm0,<r4=%xmm4
  3209. addpd %xmm0,%xmm4
  3210. # qhasm: float6464 carry *= V_V
  3211. # asm 1: mulpd V_V,<carry=int6464#1
  3212. # asm 2: mulpd V_V,<carry=%xmm0
  3213. mulpd V_V,%xmm0
  3214. # qhasm: float6464 r3 -= carry
  3215. # asm 1: subpd <carry=int6464#1,<r3=int6464#13
  3216. # asm 2: subpd <carry=%xmm0,<r3=%xmm12
  3217. subpd %xmm0,%xmm12
  3218. # qhasm: *(int128 *)(rop + 48) = r3
  3219. # asm 1: movdqa <r3=int6464#13,48(<rop=int64#1)
  3220. # asm 2: movdqa <r3=%xmm12,48(<rop=%rdi)
  3221. movdqa %xmm12,48(%rdi)
  3222. # qhasm: carry = r6
  3223. # asm 1: movdqa <r6=int6464#14,>carry=int6464#1
  3224. # asm 2: movdqa <r6=%xmm13,>carry=%xmm0
  3225. movdqa %xmm13,%xmm0
  3226. # qhasm: float6464 carry *= V6INV_V6INV
  3227. # asm 1: mulpd V6INV_V6INV,<carry=int6464#1
  3228. # asm 2: mulpd V6INV_V6INV,<carry=%xmm0
  3229. mulpd V6INV_V6INV,%xmm0
  3230. # qhasm: float6464 carry += round
  3231. # asm 1: addpd <round=int6464#4,<carry=int6464#1
  3232. # asm 2: addpd <round=%xmm3,<carry=%xmm0
  3233. addpd %xmm3,%xmm0
  3234. # qhasm: float6464 carry -= round
  3235. # asm 1: subpd <round=int6464#4,<carry=int6464#1
  3236. # asm 2: subpd <round=%xmm3,<carry=%xmm0
  3237. subpd %xmm3,%xmm0
  3238. # qhasm: float6464 r7 += carry
  3239. # asm 1: addpd <carry=int6464#1,<r7=int6464#8
  3240. # asm 2: addpd <carry=%xmm0,<r7=%xmm7
  3241. addpd %xmm0,%xmm7
  3242. # qhasm: float6464 carry *= V6_V6
  3243. # asm 1: mulpd V6_V6,<carry=int6464#1
  3244. # asm 2: mulpd V6_V6,<carry=%xmm0
  3245. mulpd V6_V6,%xmm0
  3246. # qhasm: float6464 r6 -= carry
  3247. # asm 1: subpd <carry=int6464#1,<r6=int6464#14
  3248. # asm 2: subpd <carry=%xmm0,<r6=%xmm13
  3249. subpd %xmm0,%xmm13
  3250. # qhasm: *(int128 *)(rop + 96) = r6
  3251. # asm 1: movdqa <r6=int6464#14,96(<rop=int64#1)
  3252. # asm 2: movdqa <r6=%xmm13,96(<rop=%rdi)
  3253. movdqa %xmm13,96(%rdi)
  3254. # qhasm: carry = r9
  3255. # asm 1: movdqa <r9=int6464#15,>carry=int6464#1
  3256. # asm 2: movdqa <r9=%xmm14,>carry=%xmm0
  3257. movdqa %xmm14,%xmm0
  3258. # qhasm: float6464 carry *= VINV_VINV
  3259. # asm 1: mulpd VINV_VINV,<carry=int6464#1
  3260. # asm 2: mulpd VINV_VINV,<carry=%xmm0
  3261. mulpd VINV_VINV,%xmm0
  3262. # qhasm: float6464 carry += round
  3263. # asm 1: addpd <round=int6464#4,<carry=int6464#1
  3264. # asm 2: addpd <round=%xmm3,<carry=%xmm0
  3265. addpd %xmm3,%xmm0
  3266. # qhasm: float6464 carry -= round
  3267. # asm 1: subpd <round=int6464#4,<carry=int6464#1
  3268. # asm 2: subpd <round=%xmm3,<carry=%xmm0
  3269. subpd %xmm3,%xmm0
  3270. # qhasm: float6464 r10 += carry
  3271. # asm 1: addpd <carry=int6464#1,<r10=int6464#11
  3272. # asm 2: addpd <carry=%xmm0,<r10=%xmm10
  3273. addpd %xmm0,%xmm10
  3274. # qhasm: float6464 carry *= V_V
  3275. # asm 1: mulpd V_V,<carry=int6464#1
  3276. # asm 2: mulpd V_V,<carry=%xmm0
  3277. mulpd V_V,%xmm0
  3278. # qhasm: float6464 r9 -= carry
  3279. # asm 1: subpd <carry=int6464#1,<r9=int6464#15
  3280. # asm 2: subpd <carry=%xmm0,<r9=%xmm14
  3281. subpd %xmm0,%xmm14
  3282. # qhasm: *(int128 *)(rop + 144) = r9
  3283. # asm 1: movdqa <r9=int6464#15,144(<rop=int64#1)
  3284. # asm 2: movdqa <r9=%xmm14,144(<rop=%rdi)
  3285. movdqa %xmm14,144(%rdi)
  3286. # qhasm: carry = r1
  3287. # asm 1: movdqa <r1=int6464#2,>carry=int6464#1
  3288. # asm 2: movdqa <r1=%xmm1,>carry=%xmm0
  3289. movdqa %xmm1,%xmm0
  3290. # qhasm: float6464 carry *= VINV_VINV
  3291. # asm 1: mulpd VINV_VINV,<carry=int6464#1
  3292. # asm 2: mulpd VINV_VINV,<carry=%xmm0
  3293. mulpd VINV_VINV,%xmm0
  3294. # qhasm: float6464 carry += round
  3295. # asm 1: addpd <round=int6464#4,<carry=int6464#1
  3296. # asm 2: addpd <round=%xmm3,<carry=%xmm0
  3297. addpd %xmm3,%xmm0
  3298. # qhasm: float6464 carry -= round
  3299. # asm 1: subpd <round=int6464#4,<carry=int6464#1
  3300. # asm 2: subpd <round=%xmm3,<carry=%xmm0
  3301. subpd %xmm3,%xmm0
  3302. # qhasm: float6464 r2 += carry
  3303. # asm 1: addpd <carry=int6464#1,<r2=int6464#3
  3304. # asm 2: addpd <carry=%xmm0,<r2=%xmm2
  3305. addpd %xmm0,%xmm2
  3306. # qhasm: float6464 carry *= V_V
  3307. # asm 1: mulpd V_V,<carry=int6464#1
  3308. # asm 2: mulpd V_V,<carry=%xmm0
  3309. mulpd V_V,%xmm0
  3310. # qhasm: float6464 r1 -= carry
  3311. # asm 1: subpd <carry=int6464#1,<r1=int6464#2
  3312. # asm 2: subpd <carry=%xmm0,<r1=%xmm1
  3313. subpd %xmm0,%xmm1
  3314. # qhasm: *(int128 *)(rop + 16) = r1
  3315. # asm 1: movdqa <r1=int6464#2,16(<rop=int64#1)
  3316. # asm 2: movdqa <r1=%xmm1,16(<rop=%rdi)
  3317. movdqa %xmm1,16(%rdi)
  3318. # qhasm: *(int128 *)(rop + 32) = r2
  3319. # asm 1: movdqa <r2=int6464#3,32(<rop=int64#1)
  3320. # asm 2: movdqa <r2=%xmm2,32(<rop=%rdi)
  3321. movdqa %xmm2,32(%rdi)
  3322. # qhasm: carry = r4
  3323. # asm 1: movdqa <r4=int6464#5,>carry=int6464#1
  3324. # asm 2: movdqa <r4=%xmm4,>carry=%xmm0
  3325. movdqa %xmm4,%xmm0
  3326. # qhasm: float6464 carry *= VINV_VINV
  3327. # asm 1: mulpd VINV_VINV,<carry=int6464#1
  3328. # asm 2: mulpd VINV_VINV,<carry=%xmm0
  3329. mulpd VINV_VINV,%xmm0
  3330. # qhasm: float6464 carry += round
  3331. # asm 1: addpd <round=int6464#4,<carry=int6464#1
  3332. # asm 2: addpd <round=%xmm3,<carry=%xmm0
  3333. addpd %xmm3,%xmm0
  3334. # qhasm: float6464 carry -= round
  3335. # asm 1: subpd <round=int6464#4,<carry=int6464#1
  3336. # asm 2: subpd <round=%xmm3,<carry=%xmm0
  3337. subpd %xmm3,%xmm0
  3338. # qhasm: float6464 r5 += carry
  3339. # asm 1: addpd <carry=int6464#1,<r5=int6464#6
  3340. # asm 2: addpd <carry=%xmm0,<r5=%xmm5
  3341. addpd %xmm0,%xmm5
  3342. # qhasm: float6464 carry *= V_V
  3343. # asm 1: mulpd V_V,<carry=int6464#1
  3344. # asm 2: mulpd V_V,<carry=%xmm0
  3345. mulpd V_V,%xmm0
  3346. # qhasm: float6464 r4 -= carry
  3347. # asm 1: subpd <carry=int6464#1,<r4=int6464#5
  3348. # asm 2: subpd <carry=%xmm0,<r4=%xmm4
  3349. subpd %xmm0,%xmm4
  3350. # qhasm: *(int128 *)(rop + 64) = r4
  3351. # asm 1: movdqa <r4=int6464#5,64(<rop=int64#1)
  3352. # asm 2: movdqa <r4=%xmm4,64(<rop=%rdi)
  3353. movdqa %xmm4,64(%rdi)
  3354. # qhasm: *(int128 *)(rop + 80) = r5
  3355. # asm 1: movdqa <r5=int6464#6,80(<rop=int64#1)
  3356. # asm 2: movdqa <r5=%xmm5,80(<rop=%rdi)
  3357. movdqa %xmm5,80(%rdi)
  3358. # qhasm: carry = r7
  3359. # asm 1: movdqa <r7=int6464#8,>carry=int6464#1
  3360. # asm 2: movdqa <r7=%xmm7,>carry=%xmm0
  3361. movdqa %xmm7,%xmm0
  3362. # qhasm: float6464 carry *= VINV_VINV
  3363. # asm 1: mulpd VINV_VINV,<carry=int6464#1
  3364. # asm 2: mulpd VINV_VINV,<carry=%xmm0
  3365. mulpd VINV_VINV,%xmm0
  3366. # qhasm: float6464 carry += round
  3367. # asm 1: addpd <round=int6464#4,<carry=int6464#1
  3368. # asm 2: addpd <round=%xmm3,<carry=%xmm0
  3369. addpd %xmm3,%xmm0
  3370. # qhasm: float6464 carry -= round
  3371. # asm 1: subpd <round=int6464#4,<carry=int6464#1
  3372. # asm 2: subpd <round=%xmm3,<carry=%xmm0
  3373. subpd %xmm3,%xmm0
  3374. # qhasm: float6464 r8 += carry
  3375. # asm 1: addpd <carry=int6464#1,<r8=int6464#9
  3376. # asm 2: addpd <carry=%xmm0,<r8=%xmm8
  3377. addpd %xmm0,%xmm8
  3378. # qhasm: float6464 carry *= V_V
  3379. # asm 1: mulpd V_V,<carry=int6464#1
  3380. # asm 2: mulpd V_V,<carry=%xmm0
  3381. mulpd V_V,%xmm0
  3382. # qhasm: float6464 r7 -= carry
  3383. # asm 1: subpd <carry=int6464#1,<r7=int6464#8
  3384. # asm 2: subpd <carry=%xmm0,<r7=%xmm7
  3385. subpd %xmm0,%xmm7
  3386. # qhasm: *(int128 *)(rop + 112) = r7
  3387. # asm 1: movdqa <r7=int6464#8,112(<rop=int64#1)
  3388. # asm 2: movdqa <r7=%xmm7,112(<rop=%rdi)
  3389. movdqa %xmm7,112(%rdi)
  3390. # qhasm: *(int128 *)(rop + 128) = r8
  3391. # asm 1: movdqa <r8=int6464#9,128(<rop=int64#1)
  3392. # asm 2: movdqa <r8=%xmm8,128(<rop=%rdi)
  3393. movdqa %xmm8,128(%rdi)
  3394. # qhasm: carry = r10
  3395. # asm 1: movdqa <r10=int6464#11,>carry=int6464#1
  3396. # asm 2: movdqa <r10=%xmm10,>carry=%xmm0
  3397. movdqa %xmm10,%xmm0
  3398. # qhasm: float6464 carry *= VINV_VINV
  3399. # asm 1: mulpd VINV_VINV,<carry=int6464#1
  3400. # asm 2: mulpd VINV_VINV,<carry=%xmm0
  3401. mulpd VINV_VINV,%xmm0
  3402. # qhasm: float6464 carry += round
  3403. # asm 1: addpd <round=int6464#4,<carry=int6464#1
  3404. # asm 2: addpd <round=%xmm3,<carry=%xmm0
  3405. addpd %xmm3,%xmm0
  3406. # qhasm: float6464 carry -= round
  3407. # asm 1: subpd <round=int6464#4,<carry=int6464#1
  3408. # asm 2: subpd <round=%xmm3,<carry=%xmm0
  3409. subpd %xmm3,%xmm0
  3410. # qhasm: float6464 r11 += carry
  3411. # asm 1: addpd <carry=int6464#1,<r11=int6464#12
  3412. # asm 2: addpd <carry=%xmm0,<r11=%xmm11
  3413. addpd %xmm0,%xmm11
  3414. # qhasm: float6464 carry *= V_V
  3415. # asm 1: mulpd V_V,<carry=int6464#1
  3416. # asm 2: mulpd V_V,<carry=%xmm0
  3417. mulpd V_V,%xmm0
  3418. # qhasm: float6464 r10 -= carry
  3419. # asm 1: subpd <carry=int6464#1,<r10=int6464#11
  3420. # asm 2: subpd <carry=%xmm0,<r10=%xmm10
  3421. subpd %xmm0,%xmm10
  3422. # qhasm: *(int128 *)(rop + 160) = r10
  3423. # asm 1: movdqa <r10=int6464#11,160(<rop=int64#1)
  3424. # asm 2: movdqa <r10=%xmm10,160(<rop=%rdi)
  3425. movdqa %xmm10,160(%rdi)
  3426. # qhasm: *(int128 *)(rop + 176) = r11
  3427. # asm 1: movdqa <r11=int6464#12,176(<rop=int64#1)
  3428. # asm 2: movdqa <r11=%xmm11,176(<rop=%rdi)
  3429. movdqa %xmm11,176(%rdi)
  3430. # qhasm: leave
  3431. add %r11,%rsp
  3432. mov %rdi,%rax
  3433. mov %rsi,%rdx
  3434. ret