123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233 |
- # File: dclxvi-20130329/fp2e_mul_fpe.s
- # Author: Ruben Niederhagen, Peter Schwabe
- # Public Domain
- # qhasm: enter fp2e_mul_fpe_qhasm
- .text
- .p2align 5
- .globl _fp2e_mul_fpe_qhasm
- .globl fp2e_mul_fpe_qhasm
- _fp2e_mul_fpe_qhasm:
- fp2e_mul_fpe_qhasm:
- mov %rsp,%r11
- and $31,%r11
- add $768,%r11
- sub %r11,%rsp
- # qhasm: int64 rop
- # qhasm: int64 op1
- # qhasm: int64 op2
- # qhasm: input rop
- # qhasm: input op1
- # qhasm: input op2
- # qhasm: stack6144 0mys
- # qhasm: int64 0mysp
- # qhasm: 0mysp = &0mys
- # asm 1: leaq <0mys=stack6144#1,>0mysp=int64#4
- # asm 2: leaq <0mys=0(%rsp),>0mysp=%rcx
- leaq 0(%rsp),%rcx
- # qhasm: int64 c1
- # qhasm: int64 c2
- # qhasm: int64 c3
- # qhasm: int64 c4
- # qhasm: int64 c5
- # qhasm: int64 c6
- # qhasm: int64 c7
- # qhasm: caller c1
- # qhasm: caller c2
- # qhasm: caller c3
- # qhasm: caller c4
- # qhasm: caller c5
- # qhasm: caller c6
- # qhasm: caller c7
- # qhasm: stack64 c1_stack
- # qhasm: stack64 c2_stack
- # qhasm: stack64 c3_stack
- # qhasm: stack64 c4_stack
- # qhasm: stack64 c5_stack
- # qhasm: stack64 c6_stack
- # qhasm: stack64 c7_stack
- # qhasm: int6464 r0
- # qhasm: int6464 r1
- # qhasm: int6464 r2
- # qhasm: int6464 r3
- # qhasm: int6464 r4
- # qhasm: int6464 r5
- # qhasm: int6464 r6
- # qhasm: int6464 r7
- # qhasm: int6464 r8
- # qhasm: int6464 r9
- # qhasm: int6464 r10
- # qhasm: int6464 r11
- # qhasm: int6464 0t12
- # qhasm: int6464 0t13
- # qhasm: int6464 0t14
- # qhasm: int6464 0t15
- # qhasm: int6464 0t16
- # qhasm: int6464 0t17
- # qhasm: int6464 0t18
- # qhasm: int6464 0t19
- # qhasm: int6464 0t20
- # qhasm: int6464 0t21
- # qhasm: int6464 0t22
- # qhasm: int6464 0t
- # qhasm: int64 1mysp
- # qhasm: int64 0arg1p
- # qhasm: 1mysp = 0mysp
- # asm 1: mov <0mysp=int64#4,>1mysp=int64#4
- # asm 2: mov <0mysp=%rcx,>1mysp=%rcx
- mov %rcx,%rcx
- # qhasm: 0arg1p = 1mysp+576
- # asm 1: lea 576(<1mysp=int64#4),>0arg1p=int64#5
- # asm 2: lea 576(<1mysp=%rcx),>0arg1p=%r8
- lea 576(%rcx),%r8
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 0)
- # asm 1: movddup 0(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 0(<op2=%rdx),>0t=%xmm0
- movddup 0(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 0) = 0t
- # asm 1: movdqa <0t=int6464#1,0(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,0(<0arg1p=%r8)
- movdqa %xmm0,0(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 8)
- # asm 1: movddup 8(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 8(<op2=%rdx),>0t=%xmm0
- movddup 8(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 16) = 0t
- # asm 1: movdqa <0t=int6464#1,16(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,16(<0arg1p=%r8)
- movdqa %xmm0,16(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 16)
- # asm 1: movddup 16(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 16(<op2=%rdx),>0t=%xmm0
- movddup 16(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 32) = 0t
- # asm 1: movdqa <0t=int6464#1,32(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,32(<0arg1p=%r8)
- movdqa %xmm0,32(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 24)
- # asm 1: movddup 24(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 24(<op2=%rdx),>0t=%xmm0
- movddup 24(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 48) = 0t
- # asm 1: movdqa <0t=int6464#1,48(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,48(<0arg1p=%r8)
- movdqa %xmm0,48(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 32)
- # asm 1: movddup 32(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 32(<op2=%rdx),>0t=%xmm0
- movddup 32(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 64) = 0t
- # asm 1: movdqa <0t=int6464#1,64(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,64(<0arg1p=%r8)
- movdqa %xmm0,64(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 40)
- # asm 1: movddup 40(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 40(<op2=%rdx),>0t=%xmm0
- movddup 40(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 80) = 0t
- # asm 1: movdqa <0t=int6464#1,80(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,80(<0arg1p=%r8)
- movdqa %xmm0,80(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 48)
- # asm 1: movddup 48(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 48(<op2=%rdx),>0t=%xmm0
- movddup 48(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 96) = 0t
- # asm 1: movdqa <0t=int6464#1,96(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,96(<0arg1p=%r8)
- movdqa %xmm0,96(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 56)
- # asm 1: movddup 56(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 56(<op2=%rdx),>0t=%xmm0
- movddup 56(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 112) = 0t
- # asm 1: movdqa <0t=int6464#1,112(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,112(<0arg1p=%r8)
- movdqa %xmm0,112(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 64)
- # asm 1: movddup 64(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 64(<op2=%rdx),>0t=%xmm0
- movddup 64(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 128) = 0t
- # asm 1: movdqa <0t=int6464#1,128(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,128(<0arg1p=%r8)
- movdqa %xmm0,128(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 72)
- # asm 1: movddup 72(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 72(<op2=%rdx),>0t=%xmm0
- movddup 72(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 144) = 0t
- # asm 1: movdqa <0t=int6464#1,144(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,144(<0arg1p=%r8)
- movdqa %xmm0,144(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 80)
- # asm 1: movddup 80(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 80(<op2=%rdx),>0t=%xmm0
- movddup 80(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 160) = 0t
- # asm 1: movdqa <0t=int6464#1,160(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,160(<0arg1p=%r8)
- movdqa %xmm0,160(%r8)
- # qhasm: float6464 0t[0] = 0t[1] = *(float64 *)(op2 + 88)
- # asm 1: movddup 88(<op2=int64#3),>0t=int6464#1
- # asm 2: movddup 88(<op2=%rdx),>0t=%xmm0
- movddup 88(%rdx),%xmm0
- # qhasm: *(int128 *)(0arg1p + 176) = 0t
- # asm 1: movdqa <0t=int6464#1,176(<0arg1p=int64#5)
- # asm 2: movdqa <0t=%xmm0,176(<0arg1p=%r8)
- movdqa %xmm0,176(%r8)
- # qhasm: int6464 0yoff
- # qhasm: int6464 0r0
- # qhasm: int6464 0r1
- # qhasm: int6464 0r2
- # qhasm: int6464 0r3
- # qhasm: int6464 0r4
- # qhasm: int6464 0r5
- # qhasm: int6464 0r6
- # qhasm: int6464 0r7
- # qhasm: int6464 0r8
- # qhasm: int6464 0r9
- # qhasm: int6464 0r10
- # qhasm: int6464 0r11
- # qhasm: int6464 0t0
- # qhasm: int6464 0t1
- # qhasm: int6464 0t2
- # qhasm: int6464 0t3
- # qhasm: int6464 0t4
- # qhasm: int6464 0t5
- # qhasm: int6464 0t6
- # qhasm: int6464 0t7
- # qhasm: int6464 0t8
- # qhasm: int6464 0t9
- # qhasm: int6464 0t10
- # qhasm: int6464 0t11
- # qhasm: int6464 1t12
- # qhasm: int6464 1t13
- # qhasm: int6464 1t14
- # qhasm: int6464 1t15
- # qhasm: int6464 1t16
- # qhasm: int6464 1t17
- # qhasm: int6464 1t18
- # qhasm: int6464 1t19
- # qhasm: int6464 1t20
- # qhasm: int6464 1t21
- # qhasm: int6464 1t22
- # qhasm: int6464 0ab0
- # qhasm: int6464 0ab1
- # qhasm: int6464 0ab2
- # qhasm: int6464 0ab3
- # qhasm: int6464 0ab4
- # qhasm: int6464 0ab5
- # qhasm: int6464 0ab6
- # qhasm: int6464 0ab7
- # qhasm: int6464 0ab8
- # qhasm: int6464 0ab9
- # qhasm: int6464 0ab10
- # qhasm: int6464 0ab11
- # qhasm: int6464 0ab0six
- # qhasm: int6464 0ab1six
- # qhasm: int6464 0ab2six
- # qhasm: int6464 0ab3six
- # qhasm: int6464 0ab4six
- # qhasm: int6464 0ab5six
- # qhasm: int6464 0ab6six
- # qhasm: int6464 0ab7six
- # qhasm: int6464 0ab8six
- # qhasm: int6464 0ab9six
- # qhasm: int6464 0ab10six
- # qhasm: int6464 0ab11six
- # qhasm: 0ab0 = *(int128 *)(0arg1p + 0)
- # asm 1: movdqa 0(<0arg1p=int64#5),>0ab0=int6464#1
- # asm 2: movdqa 0(<0arg1p=%r8),>0ab0=%xmm0
- movdqa 0(%r8),%xmm0
- # qhasm: 0t0 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t0=int6464#2
- # asm 2: movdqa <0ab0=%xmm0,>0t0=%xmm1
- movdqa %xmm0,%xmm1
- # qhasm: float6464 0t0 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t0=int6464#2
- # asm 2: mulpd 0(<op1=%rsi),<0t0=%xmm1
- mulpd 0(%rsi),%xmm1
- # qhasm: 0r0 =0t0
- # asm 1: movdqa <0t0=int6464#2,>0r0=int6464#2
- # asm 2: movdqa <0t0=%xmm1,>0r0=%xmm1
- movdqa %xmm1,%xmm1
- # qhasm: 0t1 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t1=int6464#3
- # asm 2: movdqa <0ab0=%xmm0,>0t1=%xmm2
- movdqa %xmm0,%xmm2
- # qhasm: float6464 0t1 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t1=int6464#3
- # asm 2: mulpd 16(<op1=%rsi),<0t1=%xmm2
- mulpd 16(%rsi),%xmm2
- # qhasm: 0r1 =0t1
- # asm 1: movdqa <0t1=int6464#3,>0r1=int6464#3
- # asm 2: movdqa <0t1=%xmm2,>0r1=%xmm2
- movdqa %xmm2,%xmm2
- # qhasm: 0t2 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t2=int6464#4
- # asm 2: movdqa <0ab0=%xmm0,>0t2=%xmm3
- movdqa %xmm0,%xmm3
- # qhasm: float6464 0t2 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t2=int6464#4
- # asm 2: mulpd 32(<op1=%rsi),<0t2=%xmm3
- mulpd 32(%rsi),%xmm3
- # qhasm: 0r2 =0t2
- # asm 1: movdqa <0t2=int6464#4,>0r2=int6464#4
- # asm 2: movdqa <0t2=%xmm3,>0r2=%xmm3
- movdqa %xmm3,%xmm3
- # qhasm: 0t3 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t3=int6464#5
- # asm 2: movdqa <0ab0=%xmm0,>0t3=%xmm4
- movdqa %xmm0,%xmm4
- # qhasm: float6464 0t3 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t3=int6464#5
- # asm 2: mulpd 48(<op1=%rsi),<0t3=%xmm4
- mulpd 48(%rsi),%xmm4
- # qhasm: 0r3 =0t3
- # asm 1: movdqa <0t3=int6464#5,>0r3=int6464#5
- # asm 2: movdqa <0t3=%xmm4,>0r3=%xmm4
- movdqa %xmm4,%xmm4
- # qhasm: 0t4 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t4=int6464#6
- # asm 2: movdqa <0ab0=%xmm0,>0t4=%xmm5
- movdqa %xmm0,%xmm5
- # qhasm: float6464 0t4 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t4=int6464#6
- # asm 2: mulpd 64(<op1=%rsi),<0t4=%xmm5
- mulpd 64(%rsi),%xmm5
- # qhasm: 0r4 =0t4
- # asm 1: movdqa <0t4=int6464#6,>0r4=int6464#6
- # asm 2: movdqa <0t4=%xmm5,>0r4=%xmm5
- movdqa %xmm5,%xmm5
- # qhasm: 0t5 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t5=int6464#7
- # asm 2: movdqa <0ab0=%xmm0,>0t5=%xmm6
- movdqa %xmm0,%xmm6
- # qhasm: float6464 0t5 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<0t5=int6464#7
- # asm 2: mulpd 80(<op1=%rsi),<0t5=%xmm6
- mulpd 80(%rsi),%xmm6
- # qhasm: 0r5 =0t5
- # asm 1: movdqa <0t5=int6464#7,>0r5=int6464#7
- # asm 2: movdqa <0t5=%xmm6,>0r5=%xmm6
- movdqa %xmm6,%xmm6
- # qhasm: 0t6 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t6=int6464#8
- # asm 2: movdqa <0ab0=%xmm0,>0t6=%xmm7
- movdqa %xmm0,%xmm7
- # qhasm: float6464 0t6 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<0t6=int6464#8
- # asm 2: mulpd 96(<op1=%rsi),<0t6=%xmm7
- mulpd 96(%rsi),%xmm7
- # qhasm: 0r6 =0t6
- # asm 1: movdqa <0t6=int6464#8,>0r6=int6464#8
- # asm 2: movdqa <0t6=%xmm7,>0r6=%xmm7
- movdqa %xmm7,%xmm7
- # qhasm: 0t7 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t7=int6464#9
- # asm 2: movdqa <0ab0=%xmm0,>0t7=%xmm8
- movdqa %xmm0,%xmm8
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<0t7=int6464#9
- # asm 2: mulpd 112(<op1=%rsi),<0t7=%xmm8
- mulpd 112(%rsi),%xmm8
- # qhasm: 0r7 =0t7
- # asm 1: movdqa <0t7=int6464#9,>0r7=int6464#9
- # asm 2: movdqa <0t7=%xmm8,>0r7=%xmm8
- movdqa %xmm8,%xmm8
- # qhasm: 0t8 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t8=int6464#10
- # asm 2: movdqa <0ab0=%xmm0,>0t8=%xmm9
- movdqa %xmm0,%xmm9
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<0t8=int6464#10
- # asm 2: mulpd 128(<op1=%rsi),<0t8=%xmm9
- mulpd 128(%rsi),%xmm9
- # qhasm: 0r8 =0t8
- # asm 1: movdqa <0t8=int6464#10,>0r8=int6464#10
- # asm 2: movdqa <0t8=%xmm9,>0r8=%xmm9
- movdqa %xmm9,%xmm9
- # qhasm: 0t9 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t9=int6464#11
- # asm 2: movdqa <0ab0=%xmm0,>0t9=%xmm10
- movdqa %xmm0,%xmm10
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<0t9=int6464#11
- # asm 2: mulpd 144(<op1=%rsi),<0t9=%xmm10
- mulpd 144(%rsi),%xmm10
- # qhasm: 0r9 =0t9
- # asm 1: movdqa <0t9=int6464#11,>0r9=int6464#11
- # asm 2: movdqa <0t9=%xmm10,>0r9=%xmm10
- movdqa %xmm10,%xmm10
- # qhasm: 0t10 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t10=int6464#12
- # asm 2: movdqa <0ab0=%xmm0,>0t10=%xmm11
- movdqa %xmm0,%xmm11
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<0t10=int6464#12
- # asm 2: mulpd 160(<op1=%rsi),<0t10=%xmm11
- mulpd 160(%rsi),%xmm11
- # qhasm: 0r10 =0t10
- # asm 1: movdqa <0t10=int6464#12,>0r10=int6464#12
- # asm 2: movdqa <0t10=%xmm11,>0r10=%xmm11
- movdqa %xmm11,%xmm11
- # qhasm: 0t11 = 0ab0
- # asm 1: movdqa <0ab0=int6464#1,>0t11=int6464#1
- # asm 2: movdqa <0ab0=%xmm0,>0t11=%xmm0
- movdqa %xmm0,%xmm0
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<0t11=int6464#1
- # asm 2: mulpd 176(<op1=%rsi),<0t11=%xmm0
- mulpd 176(%rsi),%xmm0
- # qhasm: 0r11 =0t11
- # asm 1: movdqa <0t11=int6464#1,>0r11=int6464#1
- # asm 2: movdqa <0t11=%xmm0,>0r11=%xmm0
- movdqa %xmm0,%xmm0
- # qhasm: *(int128 *)(1mysp + 0) = 0r0
- # asm 1: movdqa <0r0=int6464#2,0(<1mysp=int64#4)
- # asm 2: movdqa <0r0=%xmm1,0(<1mysp=%rcx)
- movdqa %xmm1,0(%rcx)
- # qhasm: 0ab1 = *(int128 *)(0arg1p + 16)
- # asm 1: movdqa 16(<0arg1p=int64#5),>0ab1=int6464#2
- # asm 2: movdqa 16(<0arg1p=%r8),>0ab1=%xmm1
- movdqa 16(%r8),%xmm1
- # qhasm: 0ab1six = 0ab1
- # asm 1: movdqa <0ab1=int6464#2,>0ab1six=int6464#13
- # asm 2: movdqa <0ab1=%xmm1,>0ab1six=%xmm12
- movdqa %xmm1,%xmm12
- # qhasm: float6464 0ab1six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab1six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab1six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t1 = 0ab1
- # asm 1: movdqa <0ab1=int6464#2,>0t1=int6464#14
- # asm 2: movdqa <0ab1=%xmm1,>0t1=%xmm13
- movdqa %xmm1,%xmm13
- # qhasm: float6464 0t1 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t1=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t1=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r1 +=0t1
- # asm 1: addpd <0t1=int6464#14,<0r1=int6464#3
- # asm 2: addpd <0t1=%xmm13,<0r1=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 0t7 = 0ab1
- # asm 1: movdqa <0ab1=int6464#2,>0t7=int6464#2
- # asm 2: movdqa <0ab1=%xmm1,>0t7=%xmm1
- movdqa %xmm1,%xmm1
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<0t7=int6464#2
- # asm 2: mulpd 96(<op1=%rsi),<0t7=%xmm1
- mulpd 96(%rsi),%xmm1
- # qhasm: float6464 0r7 +=0t7
- # asm 1: addpd <0t7=int6464#2,<0r7=int6464#9
- # asm 2: addpd <0t7=%xmm1,<0r7=%xmm8
- addpd %xmm1,%xmm8
- # qhasm: 0t2 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t2=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t2=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t2 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t2=int6464#2
- # asm 2: mulpd 16(<op1=%rsi),<0t2=%xmm1
- mulpd 16(%rsi),%xmm1
- # qhasm: float6464 0r2 +=0t2
- # asm 1: addpd <0t2=int6464#2,<0r2=int6464#4
- # asm 2: addpd <0t2=%xmm1,<0r2=%xmm3
- addpd %xmm1,%xmm3
- # qhasm: 0t3 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t3=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t3=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t3 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t3=int6464#2
- # asm 2: mulpd 32(<op1=%rsi),<0t3=%xmm1
- mulpd 32(%rsi),%xmm1
- # qhasm: float6464 0r3 +=0t3
- # asm 1: addpd <0t3=int6464#2,<0r3=int6464#5
- # asm 2: addpd <0t3=%xmm1,<0r3=%xmm4
- addpd %xmm1,%xmm4
- # qhasm: 0t4 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t4=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t4=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t4 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t4=int6464#2
- # asm 2: mulpd 48(<op1=%rsi),<0t4=%xmm1
- mulpd 48(%rsi),%xmm1
- # qhasm: float6464 0r4 +=0t4
- # asm 1: addpd <0t4=int6464#2,<0r4=int6464#6
- # asm 2: addpd <0t4=%xmm1,<0r4=%xmm5
- addpd %xmm1,%xmm5
- # qhasm: 0t5 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t5=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t5=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t5 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t5=int6464#2
- # asm 2: mulpd 64(<op1=%rsi),<0t5=%xmm1
- mulpd 64(%rsi),%xmm1
- # qhasm: float6464 0r5 +=0t5
- # asm 1: addpd <0t5=int6464#2,<0r5=int6464#7
- # asm 2: addpd <0t5=%xmm1,<0r5=%xmm6
- addpd %xmm1,%xmm6
- # qhasm: 0t6 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t6=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t6=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t6 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<0t6=int6464#2
- # asm 2: mulpd 80(<op1=%rsi),<0t6=%xmm1
- mulpd 80(%rsi),%xmm1
- # qhasm: float6464 0r6 +=0t6
- # asm 1: addpd <0t6=int6464#2,<0r6=int6464#8
- # asm 2: addpd <0t6=%xmm1,<0r6=%xmm7
- addpd %xmm1,%xmm7
- # qhasm: 0t8 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t8=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t8=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<0t8=int6464#2
- # asm 2: mulpd 112(<op1=%rsi),<0t8=%xmm1
- mulpd 112(%rsi),%xmm1
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#2,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm1,<0r8=%xmm9
- addpd %xmm1,%xmm9
- # qhasm: 0t9 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t9=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t9=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<0t9=int6464#2
- # asm 2: mulpd 128(<op1=%rsi),<0t9=%xmm1
- mulpd 128(%rsi),%xmm1
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#2,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm1,<0r9=%xmm10
- addpd %xmm1,%xmm10
- # qhasm: 0t10 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t10=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t10=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<0t10=int6464#2
- # asm 2: mulpd 144(<op1=%rsi),<0t10=%xmm1
- mulpd 144(%rsi),%xmm1
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#2,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm1,<0r10=%xmm11
- addpd %xmm1,%xmm11
- # qhasm: 0t11 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>0t11=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>0t11=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<0t11=int6464#2
- # asm 2: mulpd 160(<op1=%rsi),<0t11=%xmm1
- mulpd 160(%rsi),%xmm1
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#2,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm1,<0r11=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 1t12 = 0ab1six
- # asm 1: movdqa <0ab1six=int6464#13,>1t12=int6464#2
- # asm 2: movdqa <0ab1six=%xmm12,>1t12=%xmm1
- movdqa %xmm12,%xmm1
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t12=int6464#2
- # asm 2: mulpd 176(<op1=%rsi),<1t12=%xmm1
- mulpd 176(%rsi),%xmm1
- # qhasm: 0t12 =1t12
- # asm 1: movdqa <1t12=int6464#2,>0t12=int6464#2
- # asm 2: movdqa <1t12=%xmm1,>0t12=%xmm1
- movdqa %xmm1,%xmm1
- # qhasm: *(int128 *)(1mysp + 16) = 0r1
- # asm 1: movdqa <0r1=int6464#3,16(<1mysp=int64#4)
- # asm 2: movdqa <0r1=%xmm2,16(<1mysp=%rcx)
- movdqa %xmm2,16(%rcx)
- # qhasm: 0ab2 = *(int128 *)(0arg1p + 32)
- # asm 1: movdqa 32(<0arg1p=int64#5),>0ab2=int6464#3
- # asm 2: movdqa 32(<0arg1p=%r8),>0ab2=%xmm2
- movdqa 32(%r8),%xmm2
- # qhasm: 0ab2six = 0ab2
- # asm 1: movdqa <0ab2=int6464#3,>0ab2six=int6464#13
- # asm 2: movdqa <0ab2=%xmm2,>0ab2six=%xmm12
- movdqa %xmm2,%xmm12
- # qhasm: float6464 0ab2six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab2six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab2six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t2 = 0ab2
- # asm 1: movdqa <0ab2=int6464#3,>0t2=int6464#14
- # asm 2: movdqa <0ab2=%xmm2,>0t2=%xmm13
- movdqa %xmm2,%xmm13
- # qhasm: float6464 0t2 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t2=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t2=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r2 +=0t2
- # asm 1: addpd <0t2=int6464#14,<0r2=int6464#4
- # asm 2: addpd <0t2=%xmm13,<0r2=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: 0t7 = 0ab2
- # asm 1: movdqa <0ab2=int6464#3,>0t7=int6464#14
- # asm 2: movdqa <0ab2=%xmm2,>0t7=%xmm13
- movdqa %xmm2,%xmm13
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<0t7=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<0t7=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0r7 +=0t7
- # asm 1: addpd <0t7=int6464#14,<0r7=int6464#9
- # asm 2: addpd <0t7=%xmm13,<0r7=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 0t8 = 0ab2
- # asm 1: movdqa <0ab2=int6464#3,>0t8=int6464#14
- # asm 2: movdqa <0ab2=%xmm2,>0t8=%xmm13
- movdqa %xmm2,%xmm13
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<0t8=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<0t8=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#14,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm13,<0r8=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: 1t13 = 0ab2
- # asm 1: movdqa <0ab2=int6464#3,>1t13=int6464#3
- # asm 2: movdqa <0ab2=%xmm2,>1t13=%xmm2
- movdqa %xmm2,%xmm2
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t13=int6464#3
- # asm 2: mulpd 176(<op1=%rsi),<1t13=%xmm2
- mulpd 176(%rsi),%xmm2
- # qhasm: 0t13 =1t13
- # asm 1: movdqa <1t13=int6464#3,>0t13=int6464#3
- # asm 2: movdqa <1t13=%xmm2,>0t13=%xmm2
- movdqa %xmm2,%xmm2
- # qhasm: 0t3 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>0t3=int6464#14
- # asm 2: movdqa <0ab2six=%xmm12,>0t3=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t3 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t3=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<0t3=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0r3 +=0t3
- # asm 1: addpd <0t3=int6464#14,<0r3=int6464#5
- # asm 2: addpd <0t3=%xmm13,<0r3=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: 0t4 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>0t4=int6464#14
- # asm 2: movdqa <0ab2six=%xmm12,>0t4=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t4 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t4=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<0t4=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0r4 +=0t4
- # asm 1: addpd <0t4=int6464#14,<0r4=int6464#6
- # asm 2: addpd <0t4=%xmm13,<0r4=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: 0t5 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>0t5=int6464#14
- # asm 2: movdqa <0ab2six=%xmm12,>0t5=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t5 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t5=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<0t5=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0r5 +=0t5
- # asm 1: addpd <0t5=int6464#14,<0r5=int6464#7
- # asm 2: addpd <0t5=%xmm13,<0r5=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 0t6 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>0t6=int6464#14
- # asm 2: movdqa <0ab2six=%xmm12,>0t6=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t6 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t6=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<0t6=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0r6 +=0t6
- # asm 1: addpd <0t6=int6464#14,<0r6=int6464#8
- # asm 2: addpd <0t6=%xmm13,<0r6=%xmm7
- addpd %xmm13,%xmm7
- # qhasm: 0t9 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>0t9=int6464#14
- # asm 2: movdqa <0ab2six=%xmm12,>0t9=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<0t9=int6464#14
- # asm 2: mulpd 112(<op1=%rsi),<0t9=%xmm13
- mulpd 112(%rsi),%xmm13
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#14,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm13,<0r9=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: 0t10 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>0t10=int6464#14
- # asm 2: movdqa <0ab2six=%xmm12,>0t10=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<0t10=int6464#14
- # asm 2: mulpd 128(<op1=%rsi),<0t10=%xmm13
- mulpd 128(%rsi),%xmm13
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#14,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm13,<0r10=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: 0t11 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>0t11=int6464#14
- # asm 2: movdqa <0ab2six=%xmm12,>0t11=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 144(<op1=%rsi),<0t11=%xmm13
- mulpd 144(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t12 = 0ab2six
- # asm 1: movdqa <0ab2six=int6464#13,>1t12=int6464#13
- # asm 2: movdqa <0ab2six=%xmm12,>1t12=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t12=int6464#13
- # asm 2: mulpd 160(<op1=%rsi),<1t12=%xmm12
- mulpd 160(%rsi),%xmm12
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#13,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm12,<0t12=%xmm1
- addpd %xmm12,%xmm1
- # qhasm: *(int128 *)(1mysp + 32) = 0r2
- # asm 1: movdqa <0r2=int6464#4,32(<1mysp=int64#4)
- # asm 2: movdqa <0r2=%xmm3,32(<1mysp=%rcx)
- movdqa %xmm3,32(%rcx)
- # qhasm: 0ab3 = *(int128 *)(0arg1p + 48)
- # asm 1: movdqa 48(<0arg1p=int64#5),>0ab3=int6464#4
- # asm 2: movdqa 48(<0arg1p=%r8),>0ab3=%xmm3
- movdqa 48(%r8),%xmm3
- # qhasm: 0ab3six = 0ab3
- # asm 1: movdqa <0ab3=int6464#4,>0ab3six=int6464#13
- # asm 2: movdqa <0ab3=%xmm3,>0ab3six=%xmm12
- movdqa %xmm3,%xmm12
- # qhasm: float6464 0ab3six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab3six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab3six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t3 = 0ab3
- # asm 1: movdqa <0ab3=int6464#4,>0t3=int6464#14
- # asm 2: movdqa <0ab3=%xmm3,>0t3=%xmm13
- movdqa %xmm3,%xmm13
- # qhasm: float6464 0t3 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t3=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t3=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r3 +=0t3
- # asm 1: addpd <0t3=int6464#14,<0r3=int6464#5
- # asm 2: addpd <0t3=%xmm13,<0r3=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: 0t7 = 0ab3
- # asm 1: movdqa <0ab3=int6464#4,>0t7=int6464#14
- # asm 2: movdqa <0ab3=%xmm3,>0t7=%xmm13
- movdqa %xmm3,%xmm13
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t7=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<0t7=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0r7 +=0t7
- # asm 1: addpd <0t7=int6464#14,<0r7=int6464#9
- # asm 2: addpd <0t7=%xmm13,<0r7=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 0t8 = 0ab3
- # asm 1: movdqa <0ab3=int6464#4,>0t8=int6464#14
- # asm 2: movdqa <0ab3=%xmm3,>0t8=%xmm13
- movdqa %xmm3,%xmm13
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<0t8=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<0t8=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#14,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm13,<0r8=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: 0t9 = 0ab3
- # asm 1: movdqa <0ab3=int6464#4,>0t9=int6464#14
- # asm 2: movdqa <0ab3=%xmm3,>0t9=%xmm13
- movdqa %xmm3,%xmm13
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<0t9=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<0t9=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#14,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm13,<0r9=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: 1t13 = 0ab3
- # asm 1: movdqa <0ab3=int6464#4,>1t13=int6464#14
- # asm 2: movdqa <0ab3=%xmm3,>1t13=%xmm13
- movdqa %xmm3,%xmm13
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t13=int6464#14
- # asm 2: mulpd 160(<op1=%rsi),<1t13=%xmm13
- mulpd 160(%rsi),%xmm13
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#14,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm13,<0t13=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 1t14 = 0ab3
- # asm 1: movdqa <0ab3=int6464#4,>1t14=int6464#4
- # asm 2: movdqa <0ab3=%xmm3,>1t14=%xmm3
- movdqa %xmm3,%xmm3
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t14=int6464#4
- # asm 2: mulpd 176(<op1=%rsi),<1t14=%xmm3
- mulpd 176(%rsi),%xmm3
- # qhasm: 0t14 =1t14
- # asm 1: movdqa <1t14=int6464#4,>0t14=int6464#4
- # asm 2: movdqa <1t14=%xmm3,>0t14=%xmm3
- movdqa %xmm3,%xmm3
- # qhasm: 0t4 = 0ab3six
- # asm 1: movdqa <0ab3six=int6464#13,>0t4=int6464#14
- # asm 2: movdqa <0ab3six=%xmm12,>0t4=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t4 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t4=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<0t4=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0r4 +=0t4
- # asm 1: addpd <0t4=int6464#14,<0r4=int6464#6
- # asm 2: addpd <0t4=%xmm13,<0r4=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: 0t5 = 0ab3six
- # asm 1: movdqa <0ab3six=int6464#13,>0t5=int6464#14
- # asm 2: movdqa <0ab3six=%xmm12,>0t5=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t5 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t5=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<0t5=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0r5 +=0t5
- # asm 1: addpd <0t5=int6464#14,<0r5=int6464#7
- # asm 2: addpd <0t5=%xmm13,<0r5=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 0t6 = 0ab3six
- # asm 1: movdqa <0ab3six=int6464#13,>0t6=int6464#14
- # asm 2: movdqa <0ab3six=%xmm12,>0t6=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t6 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t6=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<0t6=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0r6 +=0t6
- # asm 1: addpd <0t6=int6464#14,<0r6=int6464#8
- # asm 2: addpd <0t6=%xmm13,<0r6=%xmm7
- addpd %xmm13,%xmm7
- # qhasm: 0t10 = 0ab3six
- # asm 1: movdqa <0ab3six=int6464#13,>0t10=int6464#14
- # asm 2: movdqa <0ab3six=%xmm12,>0t10=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<0t10=int6464#14
- # asm 2: mulpd 112(<op1=%rsi),<0t10=%xmm13
- mulpd 112(%rsi),%xmm13
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#14,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm13,<0r10=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: 0t11 = 0ab3six
- # asm 1: movdqa <0ab3six=int6464#13,>0t11=int6464#14
- # asm 2: movdqa <0ab3six=%xmm12,>0t11=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 128(<op1=%rsi),<0t11=%xmm13
- mulpd 128(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t12 = 0ab3six
- # asm 1: movdqa <0ab3six=int6464#13,>1t12=int6464#13
- # asm 2: movdqa <0ab3six=%xmm12,>1t12=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t12=int6464#13
- # asm 2: mulpd 144(<op1=%rsi),<1t12=%xmm12
- mulpd 144(%rsi),%xmm12
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#13,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm12,<0t12=%xmm1
- addpd %xmm12,%xmm1
- # qhasm: *(int128 *)(1mysp + 48) = 0r3
- # asm 1: movdqa <0r3=int6464#5,48(<1mysp=int64#4)
- # asm 2: movdqa <0r3=%xmm4,48(<1mysp=%rcx)
- movdqa %xmm4,48(%rcx)
- # qhasm: 0ab4 = *(int128 *)(0arg1p + 64)
- # asm 1: movdqa 64(<0arg1p=int64#5),>0ab4=int6464#5
- # asm 2: movdqa 64(<0arg1p=%r8),>0ab4=%xmm4
- movdqa 64(%r8),%xmm4
- # qhasm: 0ab4six = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>0ab4six=int6464#13
- # asm 2: movdqa <0ab4=%xmm4,>0ab4six=%xmm12
- movdqa %xmm4,%xmm12
- # qhasm: float6464 0ab4six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab4six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab4six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t4 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>0t4=int6464#14
- # asm 2: movdqa <0ab4=%xmm4,>0t4=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 0t4 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t4=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t4=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r4 +=0t4
- # asm 1: addpd <0t4=int6464#14,<0r4=int6464#6
- # asm 2: addpd <0t4=%xmm13,<0r4=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: 0t7 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>0t7=int6464#14
- # asm 2: movdqa <0ab4=%xmm4,>0t7=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t7=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<0t7=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0r7 +=0t7
- # asm 1: addpd <0t7=int6464#14,<0r7=int6464#9
- # asm 2: addpd <0t7=%xmm13,<0r7=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 0t8 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>0t8=int6464#14
- # asm 2: movdqa <0ab4=%xmm4,>0t8=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t8=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<0t8=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#14,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm13,<0r8=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: 0t9 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>0t9=int6464#14
- # asm 2: movdqa <0ab4=%xmm4,>0t9=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<0t9=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<0t9=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#14,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm13,<0r9=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: 0t10 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>0t10=int6464#14
- # asm 2: movdqa <0ab4=%xmm4,>0t10=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<0t10=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<0t10=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#14,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm13,<0r10=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: 1t13 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>1t13=int6464#14
- # asm 2: movdqa <0ab4=%xmm4,>1t13=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t13=int6464#14
- # asm 2: mulpd 144(<op1=%rsi),<1t13=%xmm13
- mulpd 144(%rsi),%xmm13
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#14,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm13,<0t13=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 1t14 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>1t14=int6464#14
- # asm 2: movdqa <0ab4=%xmm4,>1t14=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t14=int6464#14
- # asm 2: mulpd 160(<op1=%rsi),<1t14=%xmm13
- mulpd 160(%rsi),%xmm13
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#14,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm13,<0t14=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: 1t15 = 0ab4
- # asm 1: movdqa <0ab4=int6464#5,>1t15=int6464#5
- # asm 2: movdqa <0ab4=%xmm4,>1t15=%xmm4
- movdqa %xmm4,%xmm4
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t15=int6464#5
- # asm 2: mulpd 176(<op1=%rsi),<1t15=%xmm4
- mulpd 176(%rsi),%xmm4
- # qhasm: 0t15 =1t15
- # asm 1: movdqa <1t15=int6464#5,>0t15=int6464#5
- # asm 2: movdqa <1t15=%xmm4,>0t15=%xmm4
- movdqa %xmm4,%xmm4
- # qhasm: 0t5 = 0ab4six
- # asm 1: movdqa <0ab4six=int6464#13,>0t5=int6464#14
- # asm 2: movdqa <0ab4six=%xmm12,>0t5=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t5 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t5=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<0t5=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0r5 +=0t5
- # asm 1: addpd <0t5=int6464#14,<0r5=int6464#7
- # asm 2: addpd <0t5=%xmm13,<0r5=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 0t6 = 0ab4six
- # asm 1: movdqa <0ab4six=int6464#13,>0t6=int6464#14
- # asm 2: movdqa <0ab4six=%xmm12,>0t6=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t6 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t6=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<0t6=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0r6 +=0t6
- # asm 1: addpd <0t6=int6464#14,<0r6=int6464#8
- # asm 2: addpd <0t6=%xmm13,<0r6=%xmm7
- addpd %xmm13,%xmm7
- # qhasm: 0t11 = 0ab4six
- # asm 1: movdqa <0ab4six=int6464#13,>0t11=int6464#14
- # asm 2: movdqa <0ab4six=%xmm12,>0t11=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 112(<op1=%rsi),<0t11=%xmm13
- mulpd 112(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t12 = 0ab4six
- # asm 1: movdqa <0ab4six=int6464#13,>1t12=int6464#13
- # asm 2: movdqa <0ab4six=%xmm12,>1t12=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t12=int6464#13
- # asm 2: mulpd 128(<op1=%rsi),<1t12=%xmm12
- mulpd 128(%rsi),%xmm12
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#13,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm12,<0t12=%xmm1
- addpd %xmm12,%xmm1
- # qhasm: *(int128 *)(1mysp + 64) = 0r4
- # asm 1: movdqa <0r4=int6464#6,64(<1mysp=int64#4)
- # asm 2: movdqa <0r4=%xmm5,64(<1mysp=%rcx)
- movdqa %xmm5,64(%rcx)
- # qhasm: 0ab5 = *(int128 *)(0arg1p + 80)
- # asm 1: movdqa 80(<0arg1p=int64#5),>0ab5=int6464#6
- # asm 2: movdqa 80(<0arg1p=%r8),>0ab5=%xmm5
- movdqa 80(%r8),%xmm5
- # qhasm: 0ab5six = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>0ab5six=int6464#13
- # asm 2: movdqa <0ab5=%xmm5,>0ab5six=%xmm12
- movdqa %xmm5,%xmm12
- # qhasm: float6464 0ab5six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab5six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab5six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t5 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>0t5=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>0t5=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 0t5 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t5=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t5=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r5 +=0t5
- # asm 1: addpd <0t5=int6464#14,<0r5=int6464#7
- # asm 2: addpd <0t5=%xmm13,<0r5=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 0t7 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>0t7=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>0t7=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t7=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<0t7=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0r7 +=0t7
- # asm 1: addpd <0t7=int6464#14,<0r7=int6464#9
- # asm 2: addpd <0t7=%xmm13,<0r7=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 0t8 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>0t8=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>0t8=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t8=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<0t8=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#14,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm13,<0r8=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: 0t9 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>0t9=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>0t9=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t9=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<0t9=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#14,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm13,<0r9=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: 0t10 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>0t10=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>0t10=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<0t10=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<0t10=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#14,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm13,<0r10=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: 0t11 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>0t11=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>0t11=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<0t11=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t13 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>1t13=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>1t13=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t13=int6464#14
- # asm 2: mulpd 128(<op1=%rsi),<1t13=%xmm13
- mulpd 128(%rsi),%xmm13
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#14,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm13,<0t13=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 1t14 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>1t14=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>1t14=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t14=int6464#14
- # asm 2: mulpd 144(<op1=%rsi),<1t14=%xmm13
- mulpd 144(%rsi),%xmm13
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#14,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm13,<0t14=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: 1t15 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>1t15=int6464#14
- # asm 2: movdqa <0ab5=%xmm5,>1t15=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t15=int6464#14
- # asm 2: mulpd 160(<op1=%rsi),<1t15=%xmm13
- mulpd 160(%rsi),%xmm13
- # qhasm: float6464 0t15 +=1t15
- # asm 1: addpd <1t15=int6464#14,<0t15=int6464#5
- # asm 2: addpd <1t15=%xmm13,<0t15=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: 1t16 = 0ab5
- # asm 1: movdqa <0ab5=int6464#6,>1t16=int6464#6
- # asm 2: movdqa <0ab5=%xmm5,>1t16=%xmm5
- movdqa %xmm5,%xmm5
- # qhasm: float6464 1t16 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t16=int6464#6
- # asm 2: mulpd 176(<op1=%rsi),<1t16=%xmm5
- mulpd 176(%rsi),%xmm5
- # qhasm: 0t16 =1t16
- # asm 1: movdqa <1t16=int6464#6,>0t16=int6464#6
- # asm 2: movdqa <1t16=%xmm5,>0t16=%xmm5
- movdqa %xmm5,%xmm5
- # qhasm: 0t6 = 0ab5six
- # asm 1: movdqa <0ab5six=int6464#13,>0t6=int6464#14
- # asm 2: movdqa <0ab5six=%xmm12,>0t6=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t6 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t6=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<0t6=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0r6 +=0t6
- # asm 1: addpd <0t6=int6464#14,<0r6=int6464#8
- # asm 2: addpd <0t6=%xmm13,<0r6=%xmm7
- addpd %xmm13,%xmm7
- # qhasm: 1t12 = 0ab5six
- # asm 1: movdqa <0ab5six=int6464#13,>1t12=int6464#13
- # asm 2: movdqa <0ab5six=%xmm12,>1t12=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<1t12=int6464#13
- # asm 2: mulpd 112(<op1=%rsi),<1t12=%xmm12
- mulpd 112(%rsi),%xmm12
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#13,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm12,<0t12=%xmm1
- addpd %xmm12,%xmm1
- # qhasm: *(int128 *)(1mysp + 80) = 0r5
- # asm 1: movdqa <0r5=int6464#7,80(<1mysp=int64#4)
- # asm 2: movdqa <0r5=%xmm6,80(<1mysp=%rcx)
- movdqa %xmm6,80(%rcx)
- # qhasm: 0ab6 = *(int128 *)(0arg1p + 96)
- # asm 1: movdqa 96(<0arg1p=int64#5),>0ab6=int6464#7
- # asm 2: movdqa 96(<0arg1p=%r8),>0ab6=%xmm6
- movdqa 96(%r8),%xmm6
- # qhasm: 0t6 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>0t6=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>0t6=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 0t6 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t6=int6464#13
- # asm 2: mulpd 0(<op1=%rsi),<0t6=%xmm12
- mulpd 0(%rsi),%xmm12
- # qhasm: float6464 0r6 +=0t6
- # asm 1: addpd <0t6=int6464#13,<0r6=int6464#8
- # asm 2: addpd <0t6=%xmm12,<0r6=%xmm7
- addpd %xmm12,%xmm7
- # qhasm: 0t7 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>0t7=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>0t7=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t7=int6464#13
- # asm 2: mulpd 16(<op1=%rsi),<0t7=%xmm12
- mulpd 16(%rsi),%xmm12
- # qhasm: float6464 0r7 +=0t7
- # asm 1: addpd <0t7=int6464#13,<0r7=int6464#9
- # asm 2: addpd <0t7=%xmm12,<0r7=%xmm8
- addpd %xmm12,%xmm8
- # qhasm: 0t8 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>0t8=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>0t8=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t8=int6464#13
- # asm 2: mulpd 32(<op1=%rsi),<0t8=%xmm12
- mulpd 32(%rsi),%xmm12
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#13,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm12,<0r8=%xmm9
- addpd %xmm12,%xmm9
- # qhasm: 0t9 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>0t9=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>0t9=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t9=int6464#13
- # asm 2: mulpd 48(<op1=%rsi),<0t9=%xmm12
- mulpd 48(%rsi),%xmm12
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#13,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm12,<0r9=%xmm10
- addpd %xmm12,%xmm10
- # qhasm: 0t10 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>0t10=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>0t10=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t10=int6464#13
- # asm 2: mulpd 64(<op1=%rsi),<0t10=%xmm12
- mulpd 64(%rsi),%xmm12
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#13,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm12,<0r10=%xmm11
- addpd %xmm12,%xmm11
- # qhasm: 0t11 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>0t11=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>0t11=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<0t11=int6464#13
- # asm 2: mulpd 80(<op1=%rsi),<0t11=%xmm12
- mulpd 80(%rsi),%xmm12
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#13,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm12,<0r11=%xmm0
- addpd %xmm12,%xmm0
- # qhasm: 1t12 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>1t12=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>1t12=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<1t12=int6464#13
- # asm 2: mulpd 96(<op1=%rsi),<1t12=%xmm12
- mulpd 96(%rsi),%xmm12
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#13,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm12,<0t12=%xmm1
- addpd %xmm12,%xmm1
- # qhasm: 1t13 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>1t13=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>1t13=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<1t13=int6464#13
- # asm 2: mulpd 112(<op1=%rsi),<1t13=%xmm12
- mulpd 112(%rsi),%xmm12
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#13,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm12,<0t13=%xmm2
- addpd %xmm12,%xmm2
- # qhasm: 1t14 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>1t14=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>1t14=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t14=int6464#13
- # asm 2: mulpd 128(<op1=%rsi),<1t14=%xmm12
- mulpd 128(%rsi),%xmm12
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#13,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm12,<0t14=%xmm3
- addpd %xmm12,%xmm3
- # qhasm: 1t15 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>1t15=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>1t15=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t15=int6464#13
- # asm 2: mulpd 144(<op1=%rsi),<1t15=%xmm12
- mulpd 144(%rsi),%xmm12
- # qhasm: float6464 0t15 +=1t15
- # asm 1: addpd <1t15=int6464#13,<0t15=int6464#5
- # asm 2: addpd <1t15=%xmm12,<0t15=%xmm4
- addpd %xmm12,%xmm4
- # qhasm: 1t16 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>1t16=int6464#13
- # asm 2: movdqa <0ab6=%xmm6,>1t16=%xmm12
- movdqa %xmm6,%xmm12
- # qhasm: float6464 1t16 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t16=int6464#13
- # asm 2: mulpd 160(<op1=%rsi),<1t16=%xmm12
- mulpd 160(%rsi),%xmm12
- # qhasm: float6464 0t16 +=1t16
- # asm 1: addpd <1t16=int6464#13,<0t16=int6464#6
- # asm 2: addpd <1t16=%xmm12,<0t16=%xmm5
- addpd %xmm12,%xmm5
- # qhasm: 1t17 = 0ab6
- # asm 1: movdqa <0ab6=int6464#7,>1t17=int6464#7
- # asm 2: movdqa <0ab6=%xmm6,>1t17=%xmm6
- movdqa %xmm6,%xmm6
- # qhasm: float6464 1t17 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t17=int6464#7
- # asm 2: mulpd 176(<op1=%rsi),<1t17=%xmm6
- mulpd 176(%rsi),%xmm6
- # qhasm: 0t17 =1t17
- # asm 1: movdqa <1t17=int6464#7,>0t17=int6464#7
- # asm 2: movdqa <1t17=%xmm6,>0t17=%xmm6
- movdqa %xmm6,%xmm6
- # qhasm: *(int128 *)(1mysp + 96) = 0r6
- # asm 1: movdqa <0r6=int6464#8,96(<1mysp=int64#4)
- # asm 2: movdqa <0r6=%xmm7,96(<1mysp=%rcx)
- movdqa %xmm7,96(%rcx)
- # qhasm: 0ab7 = *(int128 *)(0arg1p + 112)
- # asm 1: movdqa 112(<0arg1p=int64#5),>0ab7=int6464#8
- # asm 2: movdqa 112(<0arg1p=%r8),>0ab7=%xmm7
- movdqa 112(%r8),%xmm7
- # qhasm: 0ab7six = 0ab7
- # asm 1: movdqa <0ab7=int6464#8,>0ab7six=int6464#13
- # asm 2: movdqa <0ab7=%xmm7,>0ab7six=%xmm12
- movdqa %xmm7,%xmm12
- # qhasm: float6464 0ab7six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab7six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab7six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t7 = 0ab7
- # asm 1: movdqa <0ab7=int6464#8,>0t7=int6464#14
- # asm 2: movdqa <0ab7=%xmm7,>0t7=%xmm13
- movdqa %xmm7,%xmm13
- # qhasm: float6464 0t7 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t7=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t7=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r7 +=0t7
- # asm 1: addpd <0t7=int6464#14,<0r7=int6464#9
- # asm 2: addpd <0t7=%xmm13,<0r7=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 1t13 = 0ab7
- # asm 1: movdqa <0ab7=int6464#8,>1t13=int6464#8
- # asm 2: movdqa <0ab7=%xmm7,>1t13=%xmm7
- movdqa %xmm7,%xmm7
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<1t13=int6464#8
- # asm 2: mulpd 96(<op1=%rsi),<1t13=%xmm7
- mulpd 96(%rsi),%xmm7
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#8,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm7,<0t13=%xmm2
- addpd %xmm7,%xmm2
- # qhasm: 0t8 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>0t8=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>0t8=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t8=int6464#8
- # asm 2: mulpd 16(<op1=%rsi),<0t8=%xmm7
- mulpd 16(%rsi),%xmm7
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#8,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm7,<0r8=%xmm9
- addpd %xmm7,%xmm9
- # qhasm: 0t9 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>0t9=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>0t9=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t9=int6464#8
- # asm 2: mulpd 32(<op1=%rsi),<0t9=%xmm7
- mulpd 32(%rsi),%xmm7
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#8,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm7,<0r9=%xmm10
- addpd %xmm7,%xmm10
- # qhasm: 0t10 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>0t10=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>0t10=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t10=int6464#8
- # asm 2: mulpd 48(<op1=%rsi),<0t10=%xmm7
- mulpd 48(%rsi),%xmm7
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#8,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm7,<0r10=%xmm11
- addpd %xmm7,%xmm11
- # qhasm: 0t11 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>0t11=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>0t11=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<0t11=int6464#8
- # asm 2: mulpd 64(<op1=%rsi),<0t11=%xmm7
- mulpd 64(%rsi),%xmm7
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#8,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm7,<0r11=%xmm0
- addpd %xmm7,%xmm0
- # qhasm: 1t12 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>1t12=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>1t12=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<1t12=int6464#8
- # asm 2: mulpd 80(<op1=%rsi),<1t12=%xmm7
- mulpd 80(%rsi),%xmm7
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#8,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm7,<0t12=%xmm1
- addpd %xmm7,%xmm1
- # qhasm: 1t14 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>1t14=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>1t14=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<1t14=int6464#8
- # asm 2: mulpd 112(<op1=%rsi),<1t14=%xmm7
- mulpd 112(%rsi),%xmm7
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#8,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm7,<0t14=%xmm3
- addpd %xmm7,%xmm3
- # qhasm: 1t15 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>1t15=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>1t15=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t15=int6464#8
- # asm 2: mulpd 128(<op1=%rsi),<1t15=%xmm7
- mulpd 128(%rsi),%xmm7
- # qhasm: float6464 0t15 +=1t15
- # asm 1: addpd <1t15=int6464#8,<0t15=int6464#5
- # asm 2: addpd <1t15=%xmm7,<0t15=%xmm4
- addpd %xmm7,%xmm4
- # qhasm: 1t16 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>1t16=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>1t16=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 1t16 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t16=int6464#8
- # asm 2: mulpd 144(<op1=%rsi),<1t16=%xmm7
- mulpd 144(%rsi),%xmm7
- # qhasm: float6464 0t16 +=1t16
- # asm 1: addpd <1t16=int6464#8,<0t16=int6464#6
- # asm 2: addpd <1t16=%xmm7,<0t16=%xmm5
- addpd %xmm7,%xmm5
- # qhasm: 1t17 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>1t17=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>1t17=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 1t17 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t17=int6464#8
- # asm 2: mulpd 160(<op1=%rsi),<1t17=%xmm7
- mulpd 160(%rsi),%xmm7
- # qhasm: float6464 0t17 +=1t17
- # asm 1: addpd <1t17=int6464#8,<0t17=int6464#7
- # asm 2: addpd <1t17=%xmm7,<0t17=%xmm6
- addpd %xmm7,%xmm6
- # qhasm: 1t18 = 0ab7six
- # asm 1: movdqa <0ab7six=int6464#13,>1t18=int6464#8
- # asm 2: movdqa <0ab7six=%xmm12,>1t18=%xmm7
- movdqa %xmm12,%xmm7
- # qhasm: float6464 1t18 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t18=int6464#8
- # asm 2: mulpd 176(<op1=%rsi),<1t18=%xmm7
- mulpd 176(%rsi),%xmm7
- # qhasm: 0t18 =1t18
- # asm 1: movdqa <1t18=int6464#8,>0t18=int6464#8
- # asm 2: movdqa <1t18=%xmm7,>0t18=%xmm7
- movdqa %xmm7,%xmm7
- # qhasm: *(int128 *)(1mysp + 112) = 0r7
- # asm 1: movdqa <0r7=int6464#9,112(<1mysp=int64#4)
- # asm 2: movdqa <0r7=%xmm8,112(<1mysp=%rcx)
- movdqa %xmm8,112(%rcx)
- # qhasm: 0ab8 = *(int128 *)(0arg1p + 128)
- # asm 1: movdqa 128(<0arg1p=int64#5),>0ab8=int6464#9
- # asm 2: movdqa 128(<0arg1p=%r8),>0ab8=%xmm8
- movdqa 128(%r8),%xmm8
- # qhasm: 0ab8six = 0ab8
- # asm 1: movdqa <0ab8=int6464#9,>0ab8six=int6464#13
- # asm 2: movdqa <0ab8=%xmm8,>0ab8six=%xmm12
- movdqa %xmm8,%xmm12
- # qhasm: float6464 0ab8six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab8six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab8six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t8 = 0ab8
- # asm 1: movdqa <0ab8=int6464#9,>0t8=int6464#14
- # asm 2: movdqa <0ab8=%xmm8,>0t8=%xmm13
- movdqa %xmm8,%xmm13
- # qhasm: float6464 0t8 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t8=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t8=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r8 +=0t8
- # asm 1: addpd <0t8=int6464#14,<0r8=int6464#10
- # asm 2: addpd <0t8=%xmm13,<0r8=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: 1t13 = 0ab8
- # asm 1: movdqa <0ab8=int6464#9,>1t13=int6464#14
- # asm 2: movdqa <0ab8=%xmm8,>1t13=%xmm13
- movdqa %xmm8,%xmm13
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<1t13=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<1t13=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#14,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm13,<0t13=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 1t14 = 0ab8
- # asm 1: movdqa <0ab8=int6464#9,>1t14=int6464#14
- # asm 2: movdqa <0ab8=%xmm8,>1t14=%xmm13
- movdqa %xmm8,%xmm13
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<1t14=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<1t14=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#14,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm13,<0t14=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: 1t19 = 0ab8
- # asm 1: movdqa <0ab8=int6464#9,>1t19=int6464#9
- # asm 2: movdqa <0ab8=%xmm8,>1t19=%xmm8
- movdqa %xmm8,%xmm8
- # qhasm: float6464 1t19 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t19=int6464#9
- # asm 2: mulpd 176(<op1=%rsi),<1t19=%xmm8
- mulpd 176(%rsi),%xmm8
- # qhasm: 0t19 =1t19
- # asm 1: movdqa <1t19=int6464#9,>0t19=int6464#9
- # asm 2: movdqa <1t19=%xmm8,>0t19=%xmm8
- movdqa %xmm8,%xmm8
- # qhasm: 0t9 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>0t9=int6464#14
- # asm 2: movdqa <0ab8six=%xmm12,>0t9=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t9=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<0t9=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#14,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm13,<0r9=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: 0t10 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>0t10=int6464#14
- # asm 2: movdqa <0ab8six=%xmm12,>0t10=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t10=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<0t10=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#14,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm13,<0r10=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: 0t11 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>0t11=int6464#14
- # asm 2: movdqa <0ab8six=%xmm12,>0t11=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<0t11=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t12 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>1t12=int6464#14
- # asm 2: movdqa <0ab8six=%xmm12,>1t12=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<1t12=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<1t12=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#14,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm13,<0t12=%xmm1
- addpd %xmm13,%xmm1
- # qhasm: 1t15 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>1t15=int6464#14
- # asm 2: movdqa <0ab8six=%xmm12,>1t15=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<1t15=int6464#14
- # asm 2: mulpd 112(<op1=%rsi),<1t15=%xmm13
- mulpd 112(%rsi),%xmm13
- # qhasm: float6464 0t15 +=1t15
- # asm 1: addpd <1t15=int6464#14,<0t15=int6464#5
- # asm 2: addpd <1t15=%xmm13,<0t15=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: 1t16 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>1t16=int6464#14
- # asm 2: movdqa <0ab8six=%xmm12,>1t16=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t16 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t16=int6464#14
- # asm 2: mulpd 128(<op1=%rsi),<1t16=%xmm13
- mulpd 128(%rsi),%xmm13
- # qhasm: float6464 0t16 +=1t16
- # asm 1: addpd <1t16=int6464#14,<0t16=int6464#6
- # asm 2: addpd <1t16=%xmm13,<0t16=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: 1t17 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>1t17=int6464#14
- # asm 2: movdqa <0ab8six=%xmm12,>1t17=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t17 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t17=int6464#14
- # asm 2: mulpd 144(<op1=%rsi),<1t17=%xmm13
- mulpd 144(%rsi),%xmm13
- # qhasm: float6464 0t17 +=1t17
- # asm 1: addpd <1t17=int6464#14,<0t17=int6464#7
- # asm 2: addpd <1t17=%xmm13,<0t17=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 1t18 = 0ab8six
- # asm 1: movdqa <0ab8six=int6464#13,>1t18=int6464#13
- # asm 2: movdqa <0ab8six=%xmm12,>1t18=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t18 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t18=int6464#13
- # asm 2: mulpd 160(<op1=%rsi),<1t18=%xmm12
- mulpd 160(%rsi),%xmm12
- # qhasm: float6464 0t18 +=1t18
- # asm 1: addpd <1t18=int6464#13,<0t18=int6464#8
- # asm 2: addpd <1t18=%xmm12,<0t18=%xmm7
- addpd %xmm12,%xmm7
- # qhasm: *(int128 *)(1mysp + 128) = 0r8
- # asm 1: movdqa <0r8=int6464#10,128(<1mysp=int64#4)
- # asm 2: movdqa <0r8=%xmm9,128(<1mysp=%rcx)
- movdqa %xmm9,128(%rcx)
- # qhasm: 0ab9 = *(int128 *)(0arg1p + 144)
- # asm 1: movdqa 144(<0arg1p=int64#5),>0ab9=int6464#10
- # asm 2: movdqa 144(<0arg1p=%r8),>0ab9=%xmm9
- movdqa 144(%r8),%xmm9
- # qhasm: 0ab9six = 0ab9
- # asm 1: movdqa <0ab9=int6464#10,>0ab9six=int6464#13
- # asm 2: movdqa <0ab9=%xmm9,>0ab9six=%xmm12
- movdqa %xmm9,%xmm12
- # qhasm: float6464 0ab9six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab9six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab9six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t9 = 0ab9
- # asm 1: movdqa <0ab9=int6464#10,>0t9=int6464#14
- # asm 2: movdqa <0ab9=%xmm9,>0t9=%xmm13
- movdqa %xmm9,%xmm13
- # qhasm: float6464 0t9 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t9=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t9=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r9 +=0t9
- # asm 1: addpd <0t9=int6464#14,<0r9=int6464#11
- # asm 2: addpd <0t9=%xmm13,<0r9=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: 1t13 = 0ab9
- # asm 1: movdqa <0ab9=int6464#10,>1t13=int6464#14
- # asm 2: movdqa <0ab9=%xmm9,>1t13=%xmm13
- movdqa %xmm9,%xmm13
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<1t13=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<1t13=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#14,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm13,<0t13=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 1t14 = 0ab9
- # asm 1: movdqa <0ab9=int6464#10,>1t14=int6464#14
- # asm 2: movdqa <0ab9=%xmm9,>1t14=%xmm13
- movdqa %xmm9,%xmm13
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<1t14=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<1t14=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#14,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm13,<0t14=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: 1t15 = 0ab9
- # asm 1: movdqa <0ab9=int6464#10,>1t15=int6464#14
- # asm 2: movdqa <0ab9=%xmm9,>1t15=%xmm13
- movdqa %xmm9,%xmm13
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<1t15=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<1t15=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0t15 +=1t15
- # asm 1: addpd <1t15=int6464#14,<0t15=int6464#5
- # asm 2: addpd <1t15=%xmm13,<0t15=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: 1t19 = 0ab9
- # asm 1: movdqa <0ab9=int6464#10,>1t19=int6464#14
- # asm 2: movdqa <0ab9=%xmm9,>1t19=%xmm13
- movdqa %xmm9,%xmm13
- # qhasm: float6464 1t19 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t19=int6464#14
- # asm 2: mulpd 160(<op1=%rsi),<1t19=%xmm13
- mulpd 160(%rsi),%xmm13
- # qhasm: float6464 0t19 +=1t19
- # asm 1: addpd <1t19=int6464#14,<0t19=int6464#9
- # asm 2: addpd <1t19=%xmm13,<0t19=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 1t20 = 0ab9
- # asm 1: movdqa <0ab9=int6464#10,>1t20=int6464#10
- # asm 2: movdqa <0ab9=%xmm9,>1t20=%xmm9
- movdqa %xmm9,%xmm9
- # qhasm: float6464 1t20 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t20=int6464#10
- # asm 2: mulpd 176(<op1=%rsi),<1t20=%xmm9
- mulpd 176(%rsi),%xmm9
- # qhasm: 0t20 =1t20
- # asm 1: movdqa <1t20=int6464#10,>0t20=int6464#10
- # asm 2: movdqa <1t20=%xmm9,>0t20=%xmm9
- movdqa %xmm9,%xmm9
- # qhasm: 0t10 = 0ab9six
- # asm 1: movdqa <0ab9six=int6464#13,>0t10=int6464#14
- # asm 2: movdqa <0ab9six=%xmm12,>0t10=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t10=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<0t10=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#14,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm13,<0r10=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: 0t11 = 0ab9six
- # asm 1: movdqa <0ab9six=int6464#13,>0t11=int6464#14
- # asm 2: movdqa <0ab9six=%xmm12,>0t11=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<0t11=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t12 = 0ab9six
- # asm 1: movdqa <0ab9six=int6464#13,>1t12=int6464#14
- # asm 2: movdqa <0ab9six=%xmm12,>1t12=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<1t12=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<1t12=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#14,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm13,<0t12=%xmm1
- addpd %xmm13,%xmm1
- # qhasm: 1t16 = 0ab9six
- # asm 1: movdqa <0ab9six=int6464#13,>1t16=int6464#14
- # asm 2: movdqa <0ab9six=%xmm12,>1t16=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t16 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<1t16=int6464#14
- # asm 2: mulpd 112(<op1=%rsi),<1t16=%xmm13
- mulpd 112(%rsi),%xmm13
- # qhasm: float6464 0t16 +=1t16
- # asm 1: addpd <1t16=int6464#14,<0t16=int6464#6
- # asm 2: addpd <1t16=%xmm13,<0t16=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: 1t17 = 0ab9six
- # asm 1: movdqa <0ab9six=int6464#13,>1t17=int6464#14
- # asm 2: movdqa <0ab9six=%xmm12,>1t17=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t17 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t17=int6464#14
- # asm 2: mulpd 128(<op1=%rsi),<1t17=%xmm13
- mulpd 128(%rsi),%xmm13
- # qhasm: float6464 0t17 +=1t17
- # asm 1: addpd <1t17=int6464#14,<0t17=int6464#7
- # asm 2: addpd <1t17=%xmm13,<0t17=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 1t18 = 0ab9six
- # asm 1: movdqa <0ab9six=int6464#13,>1t18=int6464#13
- # asm 2: movdqa <0ab9six=%xmm12,>1t18=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t18 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t18=int6464#13
- # asm 2: mulpd 144(<op1=%rsi),<1t18=%xmm12
- mulpd 144(%rsi),%xmm12
- # qhasm: float6464 0t18 +=1t18
- # asm 1: addpd <1t18=int6464#13,<0t18=int6464#8
- # asm 2: addpd <1t18=%xmm12,<0t18=%xmm7
- addpd %xmm12,%xmm7
- # qhasm: *(int128 *)(1mysp + 144) = 0r9
- # asm 1: movdqa <0r9=int6464#11,144(<1mysp=int64#4)
- # asm 2: movdqa <0r9=%xmm10,144(<1mysp=%rcx)
- movdqa %xmm10,144(%rcx)
- # qhasm: 0ab10 = *(int128 *)(0arg1p + 160)
- # asm 1: movdqa 160(<0arg1p=int64#5),>0ab10=int6464#11
- # asm 2: movdqa 160(<0arg1p=%r8),>0ab10=%xmm10
- movdqa 160(%r8),%xmm10
- # qhasm: 0ab10six = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>0ab10six=int6464#13
- # asm 2: movdqa <0ab10=%xmm10,>0ab10six=%xmm12
- movdqa %xmm10,%xmm12
- # qhasm: float6464 0ab10six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab10six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab10six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t10 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>0t10=int6464#14
- # asm 2: movdqa <0ab10=%xmm10,>0t10=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 0t10 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t10=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t10=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r10 +=0t10
- # asm 1: addpd <0t10=int6464#14,<0r10=int6464#12
- # asm 2: addpd <0t10=%xmm13,<0r10=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: 1t13 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>1t13=int6464#14
- # asm 2: movdqa <0ab10=%xmm10,>1t13=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<1t13=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<1t13=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#14,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm13,<0t13=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 1t14 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>1t14=int6464#14
- # asm 2: movdqa <0ab10=%xmm10,>1t14=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<1t14=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<1t14=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#14,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm13,<0t14=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: 1t16 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>1t16=int6464#14
- # asm 2: movdqa <0ab10=%xmm10,>1t16=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 1t16 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<1t16=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<1t16=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0t16 +=1t16
- # asm 1: addpd <1t16=int6464#14,<0t16=int6464#6
- # asm 2: addpd <1t16=%xmm13,<0t16=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: 1t15 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>1t15=int6464#14
- # asm 2: movdqa <0ab10=%xmm10,>1t15=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<1t15=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<1t15=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0t15 +=1t15
- # asm 1: addpd <1t15=int6464#14,<0t15=int6464#5
- # asm 2: addpd <1t15=%xmm13,<0t15=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: 1t19 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>1t19=int6464#14
- # asm 2: movdqa <0ab10=%xmm10,>1t19=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 1t19 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t19=int6464#14
- # asm 2: mulpd 144(<op1=%rsi),<1t19=%xmm13
- mulpd 144(%rsi),%xmm13
- # qhasm: float6464 0t19 +=1t19
- # asm 1: addpd <1t19=int6464#14,<0t19=int6464#9
- # asm 2: addpd <1t19=%xmm13,<0t19=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 1t20 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>1t20=int6464#14
- # asm 2: movdqa <0ab10=%xmm10,>1t20=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 1t20 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t20=int6464#14
- # asm 2: mulpd 160(<op1=%rsi),<1t20=%xmm13
- mulpd 160(%rsi),%xmm13
- # qhasm: float6464 0t20 +=1t20
- # asm 1: addpd <1t20=int6464#14,<0t20=int6464#10
- # asm 2: addpd <1t20=%xmm13,<0t20=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: 1t21 = 0ab10
- # asm 1: movdqa <0ab10=int6464#11,>1t21=int6464#11
- # asm 2: movdqa <0ab10=%xmm10,>1t21=%xmm10
- movdqa %xmm10,%xmm10
- # qhasm: float6464 1t21 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t21=int6464#11
- # asm 2: mulpd 176(<op1=%rsi),<1t21=%xmm10
- mulpd 176(%rsi),%xmm10
- # qhasm: 0t21 =1t21
- # asm 1: movdqa <1t21=int6464#11,>0t21=int6464#11
- # asm 2: movdqa <1t21=%xmm10,>0t21=%xmm10
- movdqa %xmm10,%xmm10
- # qhasm: 0t11 = 0ab10six
- # asm 1: movdqa <0ab10six=int6464#13,>0t11=int6464#14
- # asm 2: movdqa <0ab10six=%xmm12,>0t11=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<0t11=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t12 = 0ab10six
- # asm 1: movdqa <0ab10six=int6464#13,>1t12=int6464#14
- # asm 2: movdqa <0ab10six=%xmm12,>1t12=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<1t12=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<1t12=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#14,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm13,<0t12=%xmm1
- addpd %xmm13,%xmm1
- # qhasm: 1t17 = 0ab10six
- # asm 1: movdqa <0ab10six=int6464#13,>1t17=int6464#14
- # asm 2: movdqa <0ab10six=%xmm12,>1t17=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t17 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<1t17=int6464#14
- # asm 2: mulpd 112(<op1=%rsi),<1t17=%xmm13
- mulpd 112(%rsi),%xmm13
- # qhasm: float6464 0t17 +=1t17
- # asm 1: addpd <1t17=int6464#14,<0t17=int6464#7
- # asm 2: addpd <1t17=%xmm13,<0t17=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 1t18 = 0ab10six
- # asm 1: movdqa <0ab10six=int6464#13,>1t18=int6464#13
- # asm 2: movdqa <0ab10six=%xmm12,>1t18=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t18 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t18=int6464#13
- # asm 2: mulpd 128(<op1=%rsi),<1t18=%xmm12
- mulpd 128(%rsi),%xmm12
- # qhasm: float6464 0t18 +=1t18
- # asm 1: addpd <1t18=int6464#13,<0t18=int6464#8
- # asm 2: addpd <1t18=%xmm12,<0t18=%xmm7
- addpd %xmm12,%xmm7
- # qhasm: *(int128 *)(1mysp + 160) = 0r10
- # asm 1: movdqa <0r10=int6464#12,160(<1mysp=int64#4)
- # asm 2: movdqa <0r10=%xmm11,160(<1mysp=%rcx)
- movdqa %xmm11,160(%rcx)
- # qhasm: 0ab11 = *(int128 *)(0arg1p + 176)
- # asm 1: movdqa 176(<0arg1p=int64#5),>0ab11=int6464#12
- # asm 2: movdqa 176(<0arg1p=%r8),>0ab11=%xmm11
- movdqa 176(%r8),%xmm11
- # qhasm: 0ab11six = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>0ab11six=int6464#13
- # asm 2: movdqa <0ab11=%xmm11,>0ab11six=%xmm12
- movdqa %xmm11,%xmm12
- # qhasm: float6464 0ab11six *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<0ab11six=int6464#13
- # asm 2: mulpd SIX_SIX,<0ab11six=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: 0t11 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>0t11=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>0t11=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 0t11 *= *(int128 *)(op1 + 0)
- # asm 1: mulpd 0(<op1=int64#2),<0t11=int6464#14
- # asm 2: mulpd 0(<op1=%rsi),<0t11=%xmm13
- mulpd 0(%rsi),%xmm13
- # qhasm: float6464 0r11 +=0t11
- # asm 1: addpd <0t11=int6464#14,<0r11=int6464#1
- # asm 2: addpd <0t11=%xmm13,<0r11=%xmm0
- addpd %xmm13,%xmm0
- # qhasm: 1t13 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t13=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t13=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t13 *= *(int128 *)(op1 + 32)
- # asm 1: mulpd 32(<op1=int64#2),<1t13=int6464#14
- # asm 2: mulpd 32(<op1=%rsi),<1t13=%xmm13
- mulpd 32(%rsi),%xmm13
- # qhasm: float6464 0t13 +=1t13
- # asm 1: addpd <1t13=int6464#14,<0t13=int6464#3
- # asm 2: addpd <1t13=%xmm13,<0t13=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: 1t14 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t14=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t14=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t14 *= *(int128 *)(op1 + 48)
- # asm 1: mulpd 48(<op1=int64#2),<1t14=int6464#14
- # asm 2: mulpd 48(<op1=%rsi),<1t14=%xmm13
- mulpd 48(%rsi),%xmm13
- # qhasm: float6464 0t14 +=1t14
- # asm 1: addpd <1t14=int6464#14,<0t14=int6464#4
- # asm 2: addpd <1t14=%xmm13,<0t14=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: 1t15 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t15=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t15=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t15 *= *(int128 *)(op1 + 64)
- # asm 1: mulpd 64(<op1=int64#2),<1t15=int6464#14
- # asm 2: mulpd 64(<op1=%rsi),<1t15=%xmm13
- mulpd 64(%rsi),%xmm13
- # qhasm: float6464 0t15 +=1t15
- # asm 1: addpd <1t15=int6464#14,<0t15=int6464#5
- # asm 2: addpd <1t15=%xmm13,<0t15=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: 1t16 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t16=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t16=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t16 *= *(int128 *)(op1 + 80)
- # asm 1: mulpd 80(<op1=int64#2),<1t16=int6464#14
- # asm 2: mulpd 80(<op1=%rsi),<1t16=%xmm13
- mulpd 80(%rsi),%xmm13
- # qhasm: float6464 0t16 +=1t16
- # asm 1: addpd <1t16=int6464#14,<0t16=int6464#6
- # asm 2: addpd <1t16=%xmm13,<0t16=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: 1t17 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t17=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t17=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t17 *= *(int128 *)(op1 + 96)
- # asm 1: mulpd 96(<op1=int64#2),<1t17=int6464#14
- # asm 2: mulpd 96(<op1=%rsi),<1t17=%xmm13
- mulpd 96(%rsi),%xmm13
- # qhasm: float6464 0t17 +=1t17
- # asm 1: addpd <1t17=int6464#14,<0t17=int6464#7
- # asm 2: addpd <1t17=%xmm13,<0t17=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: 1t19 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t19=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t19=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t19 *= *(int128 *)(op1 + 128)
- # asm 1: mulpd 128(<op1=int64#2),<1t19=int6464#14
- # asm 2: mulpd 128(<op1=%rsi),<1t19=%xmm13
- mulpd 128(%rsi),%xmm13
- # qhasm: float6464 0t19 +=1t19
- # asm 1: addpd <1t19=int6464#14,<0t19=int6464#9
- # asm 2: addpd <1t19=%xmm13,<0t19=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: 1t20 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t20=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t20=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t20 *= *(int128 *)(op1 + 144)
- # asm 1: mulpd 144(<op1=int64#2),<1t20=int6464#14
- # asm 2: mulpd 144(<op1=%rsi),<1t20=%xmm13
- mulpd 144(%rsi),%xmm13
- # qhasm: float6464 0t20 +=1t20
- # asm 1: addpd <1t20=int6464#14,<0t20=int6464#10
- # asm 2: addpd <1t20=%xmm13,<0t20=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: 1t21 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t21=int6464#14
- # asm 2: movdqa <0ab11=%xmm11,>1t21=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 1t21 *= *(int128 *)(op1 + 160)
- # asm 1: mulpd 160(<op1=int64#2),<1t21=int6464#14
- # asm 2: mulpd 160(<op1=%rsi),<1t21=%xmm13
- mulpd 160(%rsi),%xmm13
- # qhasm: float6464 0t21 +=1t21
- # asm 1: addpd <1t21=int6464#14,<0t21=int6464#11
- # asm 2: addpd <1t21=%xmm13,<0t21=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: 1t22 = 0ab11
- # asm 1: movdqa <0ab11=int6464#12,>1t22=int6464#12
- # asm 2: movdqa <0ab11=%xmm11,>1t22=%xmm11
- movdqa %xmm11,%xmm11
- # qhasm: float6464 1t22 *= *(int128 *)(op1 + 176)
- # asm 1: mulpd 176(<op1=int64#2),<1t22=int6464#12
- # asm 2: mulpd 176(<op1=%rsi),<1t22=%xmm11
- mulpd 176(%rsi),%xmm11
- # qhasm: 0t22 =1t22
- # asm 1: movdqa <1t22=int6464#12,>0t22=int6464#12
- # asm 2: movdqa <1t22=%xmm11,>0t22=%xmm11
- movdqa %xmm11,%xmm11
- # qhasm: 1t12 = 0ab11six
- # asm 1: movdqa <0ab11six=int6464#13,>1t12=int6464#14
- # asm 2: movdqa <0ab11six=%xmm12,>1t12=%xmm13
- movdqa %xmm12,%xmm13
- # qhasm: float6464 1t12 *= *(int128 *)(op1 + 16)
- # asm 1: mulpd 16(<op1=int64#2),<1t12=int6464#14
- # asm 2: mulpd 16(<op1=%rsi),<1t12=%xmm13
- mulpd 16(%rsi),%xmm13
- # qhasm: float6464 0t12 +=1t12
- # asm 1: addpd <1t12=int6464#14,<0t12=int6464#2
- # asm 2: addpd <1t12=%xmm13,<0t12=%xmm1
- addpd %xmm13,%xmm1
- # qhasm: 1t18 = 0ab11six
- # asm 1: movdqa <0ab11six=int6464#13,>1t18=int6464#13
- # asm 2: movdqa <0ab11six=%xmm12,>1t18=%xmm12
- movdqa %xmm12,%xmm12
- # qhasm: float6464 1t18 *= *(int128 *)(op1 + 112)
- # asm 1: mulpd 112(<op1=int64#2),<1t18=int6464#13
- # asm 2: mulpd 112(<op1=%rsi),<1t18=%xmm12
- mulpd 112(%rsi),%xmm12
- # qhasm: float6464 0t18 +=1t18
- # asm 1: addpd <1t18=int6464#13,<0t18=int6464#8
- # asm 2: addpd <1t18=%xmm12,<0t18=%xmm7
- addpd %xmm12,%xmm7
- # qhasm: *(int128 *)(1mysp + 176) = 0r11
- # asm 1: movdqa <0r11=int6464#1,176(<1mysp=int64#4)
- # asm 2: movdqa <0r11=%xmm0,176(<1mysp=%rcx)
- movdqa %xmm0,176(%rcx)
- # qhasm: int6464 1r0
- # qhasm: int6464 1r1
- # qhasm: int6464 1r2
- # qhasm: int6464 1r3
- # qhasm: int6464 1r4
- # qhasm: int6464 1r5
- # qhasm: int6464 1r6
- # qhasm: int6464 1r7
- # qhasm: int6464 1r8
- # qhasm: int6464 1r9
- # qhasm: int6464 1r10
- # qhasm: int6464 1r11
- # qhasm: int6464 1t0
- # qhasm: int6464 1t1
- # qhasm: int6464 1t2
- # qhasm: int6464 1t3
- # qhasm: int6464 1t4
- # qhasm: int6464 1t5
- # qhasm: int6464 1t6
- # qhasm: int6464 1t7
- # qhasm: int6464 1t8
- # qhasm: int6464 1t9
- # qhasm: int6464 1t10
- # qhasm: int6464 1t11
- # qhasm: int6464 2t12
- # qhasm: int6464 2t13
- # qhasm: int6464 2t14
- # qhasm: int6464 2t15
- # qhasm: int6464 2t16
- # qhasm: int6464 2t17
- # qhasm: int6464 2t18
- # qhasm: int6464 2t19
- # qhasm: int6464 2t20
- # qhasm: int6464 2t21
- # qhasm: int6464 2t22
- # qhasm: 1r0 = *(int128 *)(1mysp + 0)
- # asm 1: movdqa 0(<1mysp=int64#4),>1r0=int6464#1
- # asm 2: movdqa 0(<1mysp=%rcx),>1r0=%xmm0
- movdqa 0(%rcx),%xmm0
- # qhasm: float6464 1r0 -= 0t12
- # asm 1: subpd <0t12=int6464#2,<1r0=int6464#1
- # asm 2: subpd <0t12=%xmm1,<1r0=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t15 = 0t15
- # asm 1: movdqa <0t15=int6464#5,>2t15=int6464#13
- # asm 2: movdqa <0t15=%xmm4,>2t15=%xmm12
- movdqa %xmm4,%xmm12
- # qhasm: float6464 2t15 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t15=int6464#13
- # asm 2: mulpd SIX_SIX,<2t15=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: float6464 1r0 += 2t15
- # asm 1: addpd <2t15=int6464#13,<1r0=int6464#1
- # asm 2: addpd <2t15=%xmm12,<1r0=%xmm0
- addpd %xmm12,%xmm0
- # qhasm: 2t18 = 0t18
- # asm 1: movdqa <0t18=int6464#8,>2t18=int6464#13
- # asm 2: movdqa <0t18=%xmm7,>2t18=%xmm12
- movdqa %xmm7,%xmm12
- # qhasm: float6464 2t18 *= TWO_TWO
- # asm 1: mulpd TWO_TWO,<2t18=int6464#13
- # asm 2: mulpd TWO_TWO,<2t18=%xmm12
- mulpd TWO_TWO,%xmm12
- # qhasm: float6464 1r0 -= 2t18
- # asm 1: subpd <2t18=int6464#13,<1r0=int6464#1
- # asm 2: subpd <2t18=%xmm12,<1r0=%xmm0
- subpd %xmm12,%xmm0
- # qhasm: 2t21 = 0t21
- # asm 1: movdqa <0t21=int6464#11,>2t21=int6464#13
- # asm 2: movdqa <0t21=%xmm10,>2t21=%xmm12
- movdqa %xmm10,%xmm12
- # qhasm: float6464 2t21 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t21=int6464#13
- # asm 2: mulpd SIX_SIX,<2t21=%xmm12
- mulpd SIX_SIX,%xmm12
- # qhasm: float6464 1r0 -= 2t21
- # asm 1: subpd <2t21=int6464#13,<1r0=int6464#1
- # asm 2: subpd <2t21=%xmm12,<1r0=%xmm0
- subpd %xmm12,%xmm0
- # qhasm: *(int128 *)(1mysp + 0) = 1r0
- # asm 1: movdqa <1r0=int6464#1,0(<1mysp=int64#4)
- # asm 2: movdqa <1r0=%xmm0,0(<1mysp=%rcx)
- movdqa %xmm0,0(%rcx)
- # qhasm: 1r3 = *(int128 *)(1mysp + 48)
- # asm 1: movdqa 48(<1mysp=int64#4),>1r3=int6464#1
- # asm 2: movdqa 48(<1mysp=%rcx),>1r3=%xmm0
- movdqa 48(%rcx),%xmm0
- # qhasm: float6464 1r3 -= 0t12
- # asm 1: subpd <0t12=int6464#2,<1r3=int6464#1
- # asm 2: subpd <0t12=%xmm1,<1r3=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t15 = 0t15
- # asm 1: movdqa <0t15=int6464#5,>2t15=int6464#13
- # asm 2: movdqa <0t15=%xmm4,>2t15=%xmm12
- movdqa %xmm4,%xmm12
- # qhasm: float6464 2t15 *= FIVE_FIVE
- # asm 1: mulpd FIVE_FIVE,<2t15=int6464#13
- # asm 2: mulpd FIVE_FIVE,<2t15=%xmm12
- mulpd FIVE_FIVE,%xmm12
- # qhasm: float6464 1r3 += 2t15
- # asm 1: addpd <2t15=int6464#13,<1r3=int6464#1
- # asm 2: addpd <2t15=%xmm12,<1r3=%xmm0
- addpd %xmm12,%xmm0
- # qhasm: float6464 1r3 -= 0t18
- # asm 1: subpd <0t18=int6464#8,<1r3=int6464#1
- # asm 2: subpd <0t18=%xmm7,<1r3=%xmm0
- subpd %xmm7,%xmm0
- # qhasm: 2t21 = 0t21
- # asm 1: movdqa <0t21=int6464#11,>2t21=int6464#13
- # asm 2: movdqa <0t21=%xmm10,>2t21=%xmm12
- movdqa %xmm10,%xmm12
- # qhasm: float6464 2t21 *= EIGHT_EIGHT
- # asm 1: mulpd EIGHT_EIGHT,<2t21=int6464#13
- # asm 2: mulpd EIGHT_EIGHT,<2t21=%xmm12
- mulpd EIGHT_EIGHT,%xmm12
- # qhasm: float6464 1r3 -= 2t21
- # asm 1: subpd <2t21=int6464#13,<1r3=int6464#1
- # asm 2: subpd <2t21=%xmm12,<1r3=%xmm0
- subpd %xmm12,%xmm0
- # qhasm: *(int128 *)(1mysp + 48) = 1r3
- # asm 1: movdqa <1r3=int6464#1,48(<1mysp=int64#4)
- # asm 2: movdqa <1r3=%xmm0,48(<1mysp=%rcx)
- movdqa %xmm0,48(%rcx)
- # qhasm: 1r6 = *(int128 *)(1mysp + 96)
- # asm 1: movdqa 96(<1mysp=int64#4),>1r6=int6464#1
- # asm 2: movdqa 96(<1mysp=%rcx),>1r6=%xmm0
- movdqa 96(%rcx),%xmm0
- # qhasm: 2t12 = 0t12
- # asm 1: movdqa <0t12=int6464#2,>2t12=int6464#13
- # asm 2: movdqa <0t12=%xmm1,>2t12=%xmm12
- movdqa %xmm1,%xmm12
- # qhasm: float6464 2t12 *= FOUR_FOUR
- # asm 1: mulpd FOUR_FOUR,<2t12=int6464#13
- # asm 2: mulpd FOUR_FOUR,<2t12=%xmm12
- mulpd FOUR_FOUR,%xmm12
- # qhasm: float6464 1r6 -= 2t12
- # asm 1: subpd <2t12=int6464#13,<1r6=int6464#1
- # asm 2: subpd <2t12=%xmm12,<1r6=%xmm0
- subpd %xmm12,%xmm0
- # qhasm: 2t15 = 0t15
- # asm 1: movdqa <0t15=int6464#5,>2t15=int6464#13
- # asm 2: movdqa <0t15=%xmm4,>2t15=%xmm12
- movdqa %xmm4,%xmm12
- # qhasm: float6464 2t15 *= EIGHTEEN_EIGHTEEN
- # asm 1: mulpd EIGHTEEN_EIGHTEEN,<2t15=int6464#13
- # asm 2: mulpd EIGHTEEN_EIGHTEEN,<2t15=%xmm12
- mulpd EIGHTEEN_EIGHTEEN,%xmm12
- # qhasm: float6464 1r6 += 2t15
- # asm 1: addpd <2t15=int6464#13,<1r6=int6464#1
- # asm 2: addpd <2t15=%xmm12,<1r6=%xmm0
- addpd %xmm12,%xmm0
- # qhasm: 2t18 = 0t18
- # asm 1: movdqa <0t18=int6464#8,>2t18=int6464#13
- # asm 2: movdqa <0t18=%xmm7,>2t18=%xmm12
- movdqa %xmm7,%xmm12
- # qhasm: float6464 2t18 *= THREE_THREE
- # asm 1: mulpd THREE_THREE,<2t18=int6464#13
- # asm 2: mulpd THREE_THREE,<2t18=%xmm12
- mulpd THREE_THREE,%xmm12
- # qhasm: float6464 1r6 -= 2t18
- # asm 1: subpd <2t18=int6464#13,<1r6=int6464#1
- # asm 2: subpd <2t18=%xmm12,<1r6=%xmm0
- subpd %xmm12,%xmm0
- # qhasm: 2t21 = 0t21
- # asm 1: movdqa <0t21=int6464#11,>2t21=int6464#13
- # asm 2: movdqa <0t21=%xmm10,>2t21=%xmm12
- movdqa %xmm10,%xmm12
- # qhasm: float6464 2t21 *= THIRTY_THIRTY
- # asm 1: mulpd THIRTY_THIRTY,<2t21=int6464#13
- # asm 2: mulpd THIRTY_THIRTY,<2t21=%xmm12
- mulpd THIRTY_THIRTY,%xmm12
- # qhasm: float6464 1r6 -= 2t21
- # asm 1: subpd <2t21=int6464#13,<1r6=int6464#1
- # asm 2: subpd <2t21=%xmm12,<1r6=%xmm0
- subpd %xmm12,%xmm0
- # qhasm: *(int128 *)(1mysp + 96) = 1r6
- # asm 1: movdqa <1r6=int6464#1,96(<1mysp=int64#4)
- # asm 2: movdqa <1r6=%xmm0,96(<1mysp=%rcx)
- movdqa %xmm0,96(%rcx)
- # qhasm: 1r9 = *(int128 *)(1mysp + 144)
- # asm 1: movdqa 144(<1mysp=int64#4),>1r9=int6464#1
- # asm 2: movdqa 144(<1mysp=%rcx),>1r9=%xmm0
- movdqa 144(%rcx),%xmm0
- # qhasm: float6464 1r9 -= 0t12
- # asm 1: subpd <0t12=int6464#2,<1r9=int6464#1
- # asm 2: subpd <0t12=%xmm1,<1r9=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t15 = 0t15
- # asm 1: movdqa <0t15=int6464#5,>2t15=int6464#2
- # asm 2: movdqa <0t15=%xmm4,>2t15=%xmm1
- movdqa %xmm4,%xmm1
- # qhasm: float6464 2t15 *= TWO_TWO
- # asm 1: mulpd TWO_TWO,<2t15=int6464#2
- # asm 2: mulpd TWO_TWO,<2t15=%xmm1
- mulpd TWO_TWO,%xmm1
- # qhasm: float6464 1r9 += 2t15
- # asm 1: addpd <2t15=int6464#2,<1r9=int6464#1
- # asm 2: addpd <2t15=%xmm1,<1r9=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: float6464 1r9 += 0t18
- # asm 1: addpd <0t18=int6464#8,<1r9=int6464#1
- # asm 2: addpd <0t18=%xmm7,<1r9=%xmm0
- addpd %xmm7,%xmm0
- # qhasm: 2t21 = 0t21
- # asm 1: movdqa <0t21=int6464#11,>2t21=int6464#2
- # asm 2: movdqa <0t21=%xmm10,>2t21=%xmm1
- movdqa %xmm10,%xmm1
- # qhasm: float6464 2t21 *= NINE_NINE
- # asm 1: mulpd NINE_NINE,<2t21=int6464#2
- # asm 2: mulpd NINE_NINE,<2t21=%xmm1
- mulpd NINE_NINE,%xmm1
- # qhasm: float6464 1r9 -= 2t21
- # asm 1: subpd <2t21=int6464#2,<1r9=int6464#1
- # asm 2: subpd <2t21=%xmm1,<1r9=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 144) = 1r9
- # asm 1: movdqa <1r9=int6464#1,144(<1mysp=int64#4)
- # asm 2: movdqa <1r9=%xmm0,144(<1mysp=%rcx)
- movdqa %xmm0,144(%rcx)
- # qhasm: 1r1 = *(int128 *)(1mysp + 16)
- # asm 1: movdqa 16(<1mysp=int64#4),>1r1=int6464#1
- # asm 2: movdqa 16(<1mysp=%rcx),>1r1=%xmm0
- movdqa 16(%rcx),%xmm0
- # qhasm: float6464 1r1 -= 0t13
- # asm 1: subpd <0t13=int6464#3,<1r1=int6464#1
- # asm 2: subpd <0t13=%xmm2,<1r1=%xmm0
- subpd %xmm2,%xmm0
- # qhasm: float6464 1r1 += 0t16
- # asm 1: addpd <0t16=int6464#6,<1r1=int6464#1
- # asm 2: addpd <0t16=%xmm5,<1r1=%xmm0
- addpd %xmm5,%xmm0
- # qhasm: 2t19 = 0t19
- # asm 1: movdqa <0t19=int6464#9,>2t19=int6464#2
- # asm 2: movdqa <0t19=%xmm8,>2t19=%xmm1
- movdqa %xmm8,%xmm1
- # qhasm: float6464 2t19 *= TWO_TWO
- # asm 1: mulpd TWO_TWO,<2t19=int6464#2
- # asm 2: mulpd TWO_TWO,<2t19=%xmm1
- mulpd TWO_TWO,%xmm1
- # qhasm: float6464 1r1 -= 2t19
- # asm 1: subpd <2t19=int6464#2,<1r1=int6464#1
- # asm 2: subpd <2t19=%xmm1,<1r1=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: float6464 1r1 -= 0t22
- # asm 1: subpd <0t22=int6464#12,<1r1=int6464#1
- # asm 2: subpd <0t22=%xmm11,<1r1=%xmm0
- subpd %xmm11,%xmm0
- # qhasm: *(int128 *)(1mysp + 16) = 1r1
- # asm 1: movdqa <1r1=int6464#1,16(<1mysp=int64#4)
- # asm 2: movdqa <1r1=%xmm0,16(<1mysp=%rcx)
- movdqa %xmm0,16(%rcx)
- # qhasm: 1r4 = *(int128 *)(1mysp + 64)
- # asm 1: movdqa 64(<1mysp=int64#4),>1r4=int6464#1
- # asm 2: movdqa 64(<1mysp=%rcx),>1r4=%xmm0
- movdqa 64(%rcx),%xmm0
- # qhasm: 2t13 = 0t13
- # asm 1: movdqa <0t13=int6464#3,>2t13=int6464#2
- # asm 2: movdqa <0t13=%xmm2,>2t13=%xmm1
- movdqa %xmm2,%xmm1
- # qhasm: float6464 2t13 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t13=int6464#2
- # asm 2: mulpd SIX_SIX,<2t13=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r4 -= 2t13
- # asm 1: subpd <2t13=int6464#2,<1r4=int6464#1
- # asm 2: subpd <2t13=%xmm1,<1r4=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t16 = 0t16
- # asm 1: movdqa <0t16=int6464#6,>2t16=int6464#2
- # asm 2: movdqa <0t16=%xmm5,>2t16=%xmm1
- movdqa %xmm5,%xmm1
- # qhasm: float6464 2t16 *= FIVE_FIVE
- # asm 1: mulpd FIVE_FIVE,<2t16=int6464#2
- # asm 2: mulpd FIVE_FIVE,<2t16=%xmm1
- mulpd FIVE_FIVE,%xmm1
- # qhasm: float6464 1r4 += 2t16
- # asm 1: addpd <2t16=int6464#2,<1r4=int6464#1
- # asm 2: addpd <2t16=%xmm1,<1r4=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 2t19 = 0t19
- # asm 1: movdqa <0t19=int6464#9,>2t19=int6464#2
- # asm 2: movdqa <0t19=%xmm8,>2t19=%xmm1
- movdqa %xmm8,%xmm1
- # qhasm: float6464 2t19 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t19=int6464#2
- # asm 2: mulpd SIX_SIX,<2t19=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r4 -= 2t19
- # asm 1: subpd <2t19=int6464#2,<1r4=int6464#1
- # asm 2: subpd <2t19=%xmm1,<1r4=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t22 = 0t22
- # asm 1: movdqa <0t22=int6464#12,>2t22=int6464#2
- # asm 2: movdqa <0t22=%xmm11,>2t22=%xmm1
- movdqa %xmm11,%xmm1
- # qhasm: float6464 2t22 *= EIGHT_EIGHT
- # asm 1: mulpd EIGHT_EIGHT,<2t22=int6464#2
- # asm 2: mulpd EIGHT_EIGHT,<2t22=%xmm1
- mulpd EIGHT_EIGHT,%xmm1
- # qhasm: float6464 1r4 -= 2t22
- # asm 1: subpd <2t22=int6464#2,<1r4=int6464#1
- # asm 2: subpd <2t22=%xmm1,<1r4=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 64) = 1r4
- # asm 1: movdqa <1r4=int6464#1,64(<1mysp=int64#4)
- # asm 2: movdqa <1r4=%xmm0,64(<1mysp=%rcx)
- movdqa %xmm0,64(%rcx)
- # qhasm: 1r7 = *(int128 *)(1mysp + 112)
- # asm 1: movdqa 112(<1mysp=int64#4),>1r7=int6464#1
- # asm 2: movdqa 112(<1mysp=%rcx),>1r7=%xmm0
- movdqa 112(%rcx),%xmm0
- # qhasm: 2t13 = 0t13
- # asm 1: movdqa <0t13=int6464#3,>2t13=int6464#2
- # asm 2: movdqa <0t13=%xmm2,>2t13=%xmm1
- movdqa %xmm2,%xmm1
- # qhasm: float6464 2t13 *= FOUR_FOUR
- # asm 1: mulpd FOUR_FOUR,<2t13=int6464#2
- # asm 2: mulpd FOUR_FOUR,<2t13=%xmm1
- mulpd FOUR_FOUR,%xmm1
- # qhasm: float6464 1r7 -= 2t13
- # asm 1: subpd <2t13=int6464#2,<1r7=int6464#1
- # asm 2: subpd <2t13=%xmm1,<1r7=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t16 = 0t16
- # asm 1: movdqa <0t16=int6464#6,>2t16=int6464#2
- # asm 2: movdqa <0t16=%xmm5,>2t16=%xmm1
- movdqa %xmm5,%xmm1
- # qhasm: float6464 2t16 *= THREE_THREE
- # asm 1: mulpd THREE_THREE,<2t16=int6464#2
- # asm 2: mulpd THREE_THREE,<2t16=%xmm1
- mulpd THREE_THREE,%xmm1
- # qhasm: float6464 1r7 += 2t16
- # asm 1: addpd <2t16=int6464#2,<1r7=int6464#1
- # asm 2: addpd <2t16=%xmm1,<1r7=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 2t19 = 0t19
- # asm 1: movdqa <0t19=int6464#9,>2t19=int6464#2
- # asm 2: movdqa <0t19=%xmm8,>2t19=%xmm1
- movdqa %xmm8,%xmm1
- # qhasm: float6464 2t19 *= THREE_THREE
- # asm 1: mulpd THREE_THREE,<2t19=int6464#2
- # asm 2: mulpd THREE_THREE,<2t19=%xmm1
- mulpd THREE_THREE,%xmm1
- # qhasm: float6464 1r7 -= 2t19
- # asm 1: subpd <2t19=int6464#2,<1r7=int6464#1
- # asm 2: subpd <2t19=%xmm1,<1r7=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t22 = 0t22
- # asm 1: movdqa <0t22=int6464#12,>2t22=int6464#2
- # asm 2: movdqa <0t22=%xmm11,>2t22=%xmm1
- movdqa %xmm11,%xmm1
- # qhasm: float6464 2t22 *= FIVE_FIVE
- # asm 1: mulpd FIVE_FIVE,<2t22=int6464#2
- # asm 2: mulpd FIVE_FIVE,<2t22=%xmm1
- mulpd FIVE_FIVE,%xmm1
- # qhasm: float6464 1r7 -= 2t22
- # asm 1: subpd <2t22=int6464#2,<1r7=int6464#1
- # asm 2: subpd <2t22=%xmm1,<1r7=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 112) = 1r7
- # asm 1: movdqa <1r7=int6464#1,112(<1mysp=int64#4)
- # asm 2: movdqa <1r7=%xmm0,112(<1mysp=%rcx)
- movdqa %xmm0,112(%rcx)
- # qhasm: 1r10 = *(int128 *)(1mysp + 160)
- # asm 1: movdqa 160(<1mysp=int64#4),>1r10=int6464#1
- # asm 2: movdqa 160(<1mysp=%rcx),>1r10=%xmm0
- movdqa 160(%rcx),%xmm0
- # qhasm: 2t13 = 0t13
- # asm 1: movdqa <0t13=int6464#3,>2t13=int6464#2
- # asm 2: movdqa <0t13=%xmm2,>2t13=%xmm1
- movdqa %xmm2,%xmm1
- # qhasm: float6464 2t13 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t13=int6464#2
- # asm 2: mulpd SIX_SIX,<2t13=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r10 -= 2t13
- # asm 1: subpd <2t13=int6464#2,<1r10=int6464#1
- # asm 2: subpd <2t13=%xmm1,<1r10=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t16 = 0t16
- # asm 1: movdqa <0t16=int6464#6,>2t16=int6464#2
- # asm 2: movdqa <0t16=%xmm5,>2t16=%xmm1
- movdqa %xmm5,%xmm1
- # qhasm: float6464 2t16 *= TWO_TWO
- # asm 1: mulpd TWO_TWO,<2t16=int6464#2
- # asm 2: mulpd TWO_TWO,<2t16=%xmm1
- mulpd TWO_TWO,%xmm1
- # qhasm: float6464 1r10 += 2t16
- # asm 1: addpd <2t16=int6464#2,<1r10=int6464#1
- # asm 2: addpd <2t16=%xmm1,<1r10=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 2t19 = 0t19
- # asm 1: movdqa <0t19=int6464#9,>2t19=int6464#2
- # asm 2: movdqa <0t19=%xmm8,>2t19=%xmm1
- movdqa %xmm8,%xmm1
- # qhasm: float6464 2t19 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t19=int6464#2
- # asm 2: mulpd SIX_SIX,<2t19=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r10 += 2t19
- # asm 1: addpd <2t19=int6464#2,<1r10=int6464#1
- # asm 2: addpd <2t19=%xmm1,<1r10=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 2t22 = 0t22
- # asm 1: movdqa <0t22=int6464#12,>2t22=int6464#2
- # asm 2: movdqa <0t22=%xmm11,>2t22=%xmm1
- movdqa %xmm11,%xmm1
- # qhasm: float6464 2t22 *= NINE_NINE
- # asm 1: mulpd NINE_NINE,<2t22=int6464#2
- # asm 2: mulpd NINE_NINE,<2t22=%xmm1
- mulpd NINE_NINE,%xmm1
- # qhasm: float6464 1r10 -= 2t22
- # asm 1: subpd <2t22=int6464#2,<1r10=int6464#1
- # asm 2: subpd <2t22=%xmm1,<1r10=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 160) = 1r10
- # asm 1: movdqa <1r10=int6464#1,160(<1mysp=int64#4)
- # asm 2: movdqa <1r10=%xmm0,160(<1mysp=%rcx)
- movdqa %xmm0,160(%rcx)
- # qhasm: 1r2 = *(int128 *)(1mysp + 32)
- # asm 1: movdqa 32(<1mysp=int64#4),>1r2=int6464#1
- # asm 2: movdqa 32(<1mysp=%rcx),>1r2=%xmm0
- movdqa 32(%rcx),%xmm0
- # qhasm: float6464 1r2 -= 0t14
- # asm 1: subpd <0t14=int6464#4,<1r2=int6464#1
- # asm 2: subpd <0t14=%xmm3,<1r2=%xmm0
- subpd %xmm3,%xmm0
- # qhasm: float6464 1r2 += 0t17
- # asm 1: addpd <0t17=int6464#7,<1r2=int6464#1
- # asm 2: addpd <0t17=%xmm6,<1r2=%xmm0
- addpd %xmm6,%xmm0
- # qhasm: 2t20 = 0t20
- # asm 1: movdqa <0t20=int6464#10,>2t20=int6464#2
- # asm 2: movdqa <0t20=%xmm9,>2t20=%xmm1
- movdqa %xmm9,%xmm1
- # qhasm: float6464 2t20 *= TWO_TWO
- # asm 1: mulpd TWO_TWO,<2t20=int6464#2
- # asm 2: mulpd TWO_TWO,<2t20=%xmm1
- mulpd TWO_TWO,%xmm1
- # qhasm: float6464 1r2 -= 2t20
- # asm 1: subpd <2t20=int6464#2,<1r2=int6464#1
- # asm 2: subpd <2t20=%xmm1,<1r2=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 32) = 1r2
- # asm 1: movdqa <1r2=int6464#1,32(<1mysp=int64#4)
- # asm 2: movdqa <1r2=%xmm0,32(<1mysp=%rcx)
- movdqa %xmm0,32(%rcx)
- # qhasm: 1r5 = *(int128 *)(1mysp + 80)
- # asm 1: movdqa 80(<1mysp=int64#4),>1r5=int6464#1
- # asm 2: movdqa 80(<1mysp=%rcx),>1r5=%xmm0
- movdqa 80(%rcx),%xmm0
- # qhasm: 2t14 = 0t14
- # asm 1: movdqa <0t14=int6464#4,>2t14=int6464#2
- # asm 2: movdqa <0t14=%xmm3,>2t14=%xmm1
- movdqa %xmm3,%xmm1
- # qhasm: float6464 2t14 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t14=int6464#2
- # asm 2: mulpd SIX_SIX,<2t14=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r5 -= 2t14
- # asm 1: subpd <2t14=int6464#2,<1r5=int6464#1
- # asm 2: subpd <2t14=%xmm1,<1r5=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t17 = 0t17
- # asm 1: movdqa <0t17=int6464#7,>2t17=int6464#2
- # asm 2: movdqa <0t17=%xmm6,>2t17=%xmm1
- movdqa %xmm6,%xmm1
- # qhasm: float6464 2t17 *= FIVE_FIVE
- # asm 1: mulpd FIVE_FIVE,<2t17=int6464#2
- # asm 2: mulpd FIVE_FIVE,<2t17=%xmm1
- mulpd FIVE_FIVE,%xmm1
- # qhasm: float6464 1r5 += 2t17
- # asm 1: addpd <2t17=int6464#2,<1r5=int6464#1
- # asm 2: addpd <2t17=%xmm1,<1r5=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 2t20 = 0t20
- # asm 1: movdqa <0t20=int6464#10,>2t20=int6464#2
- # asm 2: movdqa <0t20=%xmm9,>2t20=%xmm1
- movdqa %xmm9,%xmm1
- # qhasm: float6464 2t20 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t20=int6464#2
- # asm 2: mulpd SIX_SIX,<2t20=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r5 -= 2t20
- # asm 1: subpd <2t20=int6464#2,<1r5=int6464#1
- # asm 2: subpd <2t20=%xmm1,<1r5=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 80) = 1r5
- # asm 1: movdqa <1r5=int6464#1,80(<1mysp=int64#4)
- # asm 2: movdqa <1r5=%xmm0,80(<1mysp=%rcx)
- movdqa %xmm0,80(%rcx)
- # qhasm: 1r8 = *(int128 *)(1mysp + 128)
- # asm 1: movdqa 128(<1mysp=int64#4),>1r8=int6464#1
- # asm 2: movdqa 128(<1mysp=%rcx),>1r8=%xmm0
- movdqa 128(%rcx),%xmm0
- # qhasm: 2t14 = 0t14
- # asm 1: movdqa <0t14=int6464#4,>2t14=int6464#2
- # asm 2: movdqa <0t14=%xmm3,>2t14=%xmm1
- movdqa %xmm3,%xmm1
- # qhasm: float6464 2t14 *= FOUR_FOUR
- # asm 1: mulpd FOUR_FOUR,<2t14=int6464#2
- # asm 2: mulpd FOUR_FOUR,<2t14=%xmm1
- mulpd FOUR_FOUR,%xmm1
- # qhasm: float6464 1r8 -= 2t14
- # asm 1: subpd <2t14=int6464#2,<1r8=int6464#1
- # asm 2: subpd <2t14=%xmm1,<1r8=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t17 = 0t17
- # asm 1: movdqa <0t17=int6464#7,>2t17=int6464#2
- # asm 2: movdqa <0t17=%xmm6,>2t17=%xmm1
- movdqa %xmm6,%xmm1
- # qhasm: float6464 2t17 *= THREE_THREE
- # asm 1: mulpd THREE_THREE,<2t17=int6464#2
- # asm 2: mulpd THREE_THREE,<2t17=%xmm1
- mulpd THREE_THREE,%xmm1
- # qhasm: float6464 1r8 += 2t17
- # asm 1: addpd <2t17=int6464#2,<1r8=int6464#1
- # asm 2: addpd <2t17=%xmm1,<1r8=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 2t20 = 0t20
- # asm 1: movdqa <0t20=int6464#10,>2t20=int6464#2
- # asm 2: movdqa <0t20=%xmm9,>2t20=%xmm1
- movdqa %xmm9,%xmm1
- # qhasm: float6464 2t20 *= THREE_THREE
- # asm 1: mulpd THREE_THREE,<2t20=int6464#2
- # asm 2: mulpd THREE_THREE,<2t20=%xmm1
- mulpd THREE_THREE,%xmm1
- # qhasm: float6464 1r8 -= 2t20
- # asm 1: subpd <2t20=int6464#2,<1r8=int6464#1
- # asm 2: subpd <2t20=%xmm1,<1r8=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 128) = 1r8
- # asm 1: movdqa <1r8=int6464#1,128(<1mysp=int64#4)
- # asm 2: movdqa <1r8=%xmm0,128(<1mysp=%rcx)
- movdqa %xmm0,128(%rcx)
- # qhasm: 1r11 = *(int128 *)(1mysp + 176)
- # asm 1: movdqa 176(<1mysp=int64#4),>1r11=int6464#1
- # asm 2: movdqa 176(<1mysp=%rcx),>1r11=%xmm0
- movdqa 176(%rcx),%xmm0
- # qhasm: 2t14 = 0t14
- # asm 1: movdqa <0t14=int6464#4,>2t14=int6464#2
- # asm 2: movdqa <0t14=%xmm3,>2t14=%xmm1
- movdqa %xmm3,%xmm1
- # qhasm: float6464 2t14 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t14=int6464#2
- # asm 2: mulpd SIX_SIX,<2t14=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r11 -= 2t14
- # asm 1: subpd <2t14=int6464#2,<1r11=int6464#1
- # asm 2: subpd <2t14=%xmm1,<1r11=%xmm0
- subpd %xmm1,%xmm0
- # qhasm: 2t17 = 0t17
- # asm 1: movdqa <0t17=int6464#7,>2t17=int6464#2
- # asm 2: movdqa <0t17=%xmm6,>2t17=%xmm1
- movdqa %xmm6,%xmm1
- # qhasm: float6464 2t17 *= TWO_TWO
- # asm 1: mulpd TWO_TWO,<2t17=int6464#2
- # asm 2: mulpd TWO_TWO,<2t17=%xmm1
- mulpd TWO_TWO,%xmm1
- # qhasm: float6464 1r11 += 2t17
- # asm 1: addpd <2t17=int6464#2,<1r11=int6464#1
- # asm 2: addpd <2t17=%xmm1,<1r11=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: 2t20 = 0t20
- # asm 1: movdqa <0t20=int6464#10,>2t20=int6464#2
- # asm 2: movdqa <0t20=%xmm9,>2t20=%xmm1
- movdqa %xmm9,%xmm1
- # qhasm: float6464 2t20 *= SIX_SIX
- # asm 1: mulpd SIX_SIX,<2t20=int6464#2
- # asm 2: mulpd SIX_SIX,<2t20=%xmm1
- mulpd SIX_SIX,%xmm1
- # qhasm: float6464 1r11 += 2t20
- # asm 1: addpd <2t20=int6464#2,<1r11=int6464#1
- # asm 2: addpd <2t20=%xmm1,<1r11=%xmm0
- addpd %xmm1,%xmm0
- # qhasm: *(int128 *)(1mysp + 176) = 1r11
- # asm 1: movdqa <1r11=int6464#1,176(<1mysp=int64#4)
- # asm 2: movdqa <1r11=%xmm0,176(<1mysp=%rcx)
- movdqa %xmm0,176(%rcx)
- # qhasm: int6464 0round
- # qhasm: int6464 0carry
- # qhasm: int6464 2t6
- # qhasm: r0 = *(int128 *)(1mysp + 0)
- # asm 1: movdqa 0(<1mysp=int64#4),>r0=int6464#1
- # asm 2: movdqa 0(<1mysp=%rcx),>r0=%xmm0
- movdqa 0(%rcx),%xmm0
- # qhasm: r1 = *(int128 *)(1mysp + 16)
- # asm 1: movdqa 16(<1mysp=int64#4),>r1=int6464#2
- # asm 2: movdqa 16(<1mysp=%rcx),>r1=%xmm1
- movdqa 16(%rcx),%xmm1
- # qhasm: r2 = *(int128 *)(1mysp + 32)
- # asm 1: movdqa 32(<1mysp=int64#4),>r2=int6464#3
- # asm 2: movdqa 32(<1mysp=%rcx),>r2=%xmm2
- movdqa 32(%rcx),%xmm2
- # qhasm: r3 = *(int128 *)(1mysp + 48)
- # asm 1: movdqa 48(<1mysp=int64#4),>r3=int6464#4
- # asm 2: movdqa 48(<1mysp=%rcx),>r3=%xmm3
- movdqa 48(%rcx),%xmm3
- # qhasm: r4 = *(int128 *)(1mysp + 64)
- # asm 1: movdqa 64(<1mysp=int64#4),>r4=int6464#5
- # asm 2: movdqa 64(<1mysp=%rcx),>r4=%xmm4
- movdqa 64(%rcx),%xmm4
- # qhasm: r5 = *(int128 *)(1mysp + 80)
- # asm 1: movdqa 80(<1mysp=int64#4),>r5=int6464#6
- # asm 2: movdqa 80(<1mysp=%rcx),>r5=%xmm5
- movdqa 80(%rcx),%xmm5
- # qhasm: r6 = *(int128 *)(1mysp + 96)
- # asm 1: movdqa 96(<1mysp=int64#4),>r6=int6464#7
- # asm 2: movdqa 96(<1mysp=%rcx),>r6=%xmm6
- movdqa 96(%rcx),%xmm6
- # qhasm: r7 = *(int128 *)(1mysp + 112)
- # asm 1: movdqa 112(<1mysp=int64#4),>r7=int6464#8
- # asm 2: movdqa 112(<1mysp=%rcx),>r7=%xmm7
- movdqa 112(%rcx),%xmm7
- # qhasm: r8 = *(int128 *)(1mysp + 128)
- # asm 1: movdqa 128(<1mysp=int64#4),>r8=int6464#9
- # asm 2: movdqa 128(<1mysp=%rcx),>r8=%xmm8
- movdqa 128(%rcx),%xmm8
- # qhasm: r9 = *(int128 *)(1mysp + 144)
- # asm 1: movdqa 144(<1mysp=int64#4),>r9=int6464#10
- # asm 2: movdqa 144(<1mysp=%rcx),>r9=%xmm9
- movdqa 144(%rcx),%xmm9
- # qhasm: r10 = *(int128 *)(1mysp + 160)
- # asm 1: movdqa 160(<1mysp=int64#4),>r10=int6464#11
- # asm 2: movdqa 160(<1mysp=%rcx),>r10=%xmm10
- movdqa 160(%rcx),%xmm10
- # qhasm: r11 = *(int128 *)(1mysp + 176)
- # asm 1: movdqa 176(<1mysp=int64#4),>r11=int6464#12
- # asm 2: movdqa 176(<1mysp=%rcx),>r11=%xmm11
- movdqa 176(%rcx),%xmm11
- # qhasm: 0round = ROUND_ROUND
- # asm 1: movdqa ROUND_ROUND,<0round=int6464#13
- # asm 2: movdqa ROUND_ROUND,<0round=%xmm12
- movdqa ROUND_ROUND,%xmm12
- # qhasm: 0carry = r1
- # asm 1: movdqa <r1=int6464#2,>0carry=int6464#14
- # asm 2: movdqa <r1=%xmm1,>0carry=%xmm13
- movdqa %xmm1,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r2 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r2=int6464#3
- # asm 2: addpd <0carry=%xmm13,<r2=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r1 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r1=int6464#2
- # asm 2: subpd <0carry=%xmm13,<r1=%xmm1
- subpd %xmm13,%xmm1
- # qhasm: 0carry = r4
- # asm 1: movdqa <r4=int6464#5,>0carry=int6464#14
- # asm 2: movdqa <r4=%xmm4,>0carry=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r5 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r5=int6464#6
- # asm 2: addpd <0carry=%xmm13,<r5=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r4 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r4=int6464#5
- # asm 2: subpd <0carry=%xmm13,<r4=%xmm4
- subpd %xmm13,%xmm4
- # qhasm: 0carry = r7
- # asm 1: movdqa <r7=int6464#8,>0carry=int6464#14
- # asm 2: movdqa <r7=%xmm7,>0carry=%xmm13
- movdqa %xmm7,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r8 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r8=int6464#9
- # asm 2: addpd <0carry=%xmm13,<r8=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r7 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r7=int6464#8
- # asm 2: subpd <0carry=%xmm13,<r7=%xmm7
- subpd %xmm13,%xmm7
- # qhasm: 0carry = r10
- # asm 1: movdqa <r10=int6464#11,>0carry=int6464#14
- # asm 2: movdqa <r10=%xmm10,>0carry=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r11 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r11=int6464#12
- # asm 2: addpd <0carry=%xmm13,<r11=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r10 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r10=int6464#11
- # asm 2: subpd <0carry=%xmm13,<r10=%xmm10
- subpd %xmm13,%xmm10
- # qhasm: 0carry = r2
- # asm 1: movdqa <r2=int6464#3,>0carry=int6464#14
- # asm 2: movdqa <r2=%xmm2,>0carry=%xmm13
- movdqa %xmm2,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r3 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r3=int6464#4
- # asm 2: addpd <0carry=%xmm13,<r3=%xmm3
- addpd %xmm13,%xmm3
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r2 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r2=int6464#3
- # asm 2: subpd <0carry=%xmm13,<r2=%xmm2
- subpd %xmm13,%xmm2
- # qhasm: 0carry = r5
- # asm 1: movdqa <r5=int6464#6,>0carry=int6464#14
- # asm 2: movdqa <r5=%xmm5,>0carry=%xmm13
- movdqa %xmm5,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r6 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r6=int6464#7
- # asm 2: addpd <0carry=%xmm13,<r6=%xmm6
- addpd %xmm13,%xmm6
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r5 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r5=int6464#6
- # asm 2: subpd <0carry=%xmm13,<r5=%xmm5
- subpd %xmm13,%xmm5
- # qhasm: 0carry = r8
- # asm 1: movdqa <r8=int6464#9,>0carry=int6464#14
- # asm 2: movdqa <r8=%xmm8,>0carry=%xmm13
- movdqa %xmm8,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r9 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r9=int6464#10
- # asm 2: addpd <0carry=%xmm13,<r9=%xmm9
- addpd %xmm13,%xmm9
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r8 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r8=int6464#9
- # asm 2: subpd <0carry=%xmm13,<r8=%xmm8
- subpd %xmm13,%xmm8
- # qhasm: 0carry = r11
- # asm 1: movdqa <r11=int6464#12,>0carry=int6464#14
- # asm 2: movdqa <r11=%xmm11,>0carry=%xmm13
- movdqa %xmm11,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r0 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r0=int6464#1
- # asm 2: subpd <0carry=%xmm13,<r0=%xmm0
- subpd %xmm13,%xmm0
- # qhasm: float6464 r3 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r3=int6464#4
- # asm 2: subpd <0carry=%xmm13,<r3=%xmm3
- subpd %xmm13,%xmm3
- # qhasm: 2t6 = 0carry
- # asm 1: movdqa <0carry=int6464#14,>2t6=int6464#15
- # asm 2: movdqa <0carry=%xmm13,>2t6=%xmm14
- movdqa %xmm13,%xmm14
- # qhasm: float6464 2t6 *= FOUR_FOUR
- # asm 1: mulpd FOUR_FOUR,<2t6=int6464#15
- # asm 2: mulpd FOUR_FOUR,<2t6=%xmm14
- mulpd FOUR_FOUR,%xmm14
- # qhasm: float6464 r6 -= 2t6
- # asm 1: subpd <2t6=int6464#15,<r6=int6464#7
- # asm 2: subpd <2t6=%xmm14,<r6=%xmm6
- subpd %xmm14,%xmm6
- # qhasm: float6464 r9 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r9=int6464#10
- # asm 2: subpd <0carry=%xmm13,<r9=%xmm9
- subpd %xmm13,%xmm9
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r11 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r11=int6464#12
- # asm 2: subpd <0carry=%xmm13,<r11=%xmm11
- subpd %xmm13,%xmm11
- # qhasm: 0carry = r0
- # asm 1: movdqa <r0=int6464#1,>0carry=int6464#14
- # asm 2: movdqa <r0=%xmm0,>0carry=%xmm13
- movdqa %xmm0,%xmm13
- # qhasm: float6464 0carry *= V6INV_V6INV
- # asm 1: mulpd V6INV_V6INV,<0carry=int6464#14
- # asm 2: mulpd V6INV_V6INV,<0carry=%xmm13
- mulpd V6INV_V6INV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r1 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r1=int6464#2
- # asm 2: addpd <0carry=%xmm13,<r1=%xmm1
- addpd %xmm13,%xmm1
- # qhasm: float6464 0carry *= V6_V6
- # asm 1: mulpd V6_V6,<0carry=int6464#14
- # asm 2: mulpd V6_V6,<0carry=%xmm13
- mulpd V6_V6,%xmm13
- # qhasm: float6464 r0 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r0=int6464#1
- # asm 2: subpd <0carry=%xmm13,<r0=%xmm0
- subpd %xmm13,%xmm0
- # qhasm: 0carry = r3
- # asm 1: movdqa <r3=int6464#4,>0carry=int6464#14
- # asm 2: movdqa <r3=%xmm3,>0carry=%xmm13
- movdqa %xmm3,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r4 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r4=int6464#5
- # asm 2: addpd <0carry=%xmm13,<r4=%xmm4
- addpd %xmm13,%xmm4
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r3 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r3=int6464#4
- # asm 2: subpd <0carry=%xmm13,<r3=%xmm3
- subpd %xmm13,%xmm3
- # qhasm: 0carry = r6
- # asm 1: movdqa <r6=int6464#7,>0carry=int6464#14
- # asm 2: movdqa <r6=%xmm6,>0carry=%xmm13
- movdqa %xmm6,%xmm13
- # qhasm: float6464 0carry *= V6INV_V6INV
- # asm 1: mulpd V6INV_V6INV,<0carry=int6464#14
- # asm 2: mulpd V6INV_V6INV,<0carry=%xmm13
- mulpd V6INV_V6INV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r7 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r7=int6464#8
- # asm 2: addpd <0carry=%xmm13,<r7=%xmm7
- addpd %xmm13,%xmm7
- # qhasm: float6464 0carry *= V6_V6
- # asm 1: mulpd V6_V6,<0carry=int6464#14
- # asm 2: mulpd V6_V6,<0carry=%xmm13
- mulpd V6_V6,%xmm13
- # qhasm: float6464 r6 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r6=int6464#7
- # asm 2: subpd <0carry=%xmm13,<r6=%xmm6
- subpd %xmm13,%xmm6
- # qhasm: 0carry = r9
- # asm 1: movdqa <r9=int6464#10,>0carry=int6464#14
- # asm 2: movdqa <r9=%xmm9,>0carry=%xmm13
- movdqa %xmm9,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r10 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r10=int6464#11
- # asm 2: addpd <0carry=%xmm13,<r10=%xmm10
- addpd %xmm13,%xmm10
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r9 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r9=int6464#10
- # asm 2: subpd <0carry=%xmm13,<r9=%xmm9
- subpd %xmm13,%xmm9
- # qhasm: 0carry = r1
- # asm 1: movdqa <r1=int6464#2,>0carry=int6464#14
- # asm 2: movdqa <r1=%xmm1,>0carry=%xmm13
- movdqa %xmm1,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r2 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r2=int6464#3
- # asm 2: addpd <0carry=%xmm13,<r2=%xmm2
- addpd %xmm13,%xmm2
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r1 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r1=int6464#2
- # asm 2: subpd <0carry=%xmm13,<r1=%xmm1
- subpd %xmm13,%xmm1
- # qhasm: 0carry = r4
- # asm 1: movdqa <r4=int6464#5,>0carry=int6464#14
- # asm 2: movdqa <r4=%xmm4,>0carry=%xmm13
- movdqa %xmm4,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r5 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r5=int6464#6
- # asm 2: addpd <0carry=%xmm13,<r5=%xmm5
- addpd %xmm13,%xmm5
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r4 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r4=int6464#5
- # asm 2: subpd <0carry=%xmm13,<r4=%xmm4
- subpd %xmm13,%xmm4
- # qhasm: 0carry = r7
- # asm 1: movdqa <r7=int6464#8,>0carry=int6464#14
- # asm 2: movdqa <r7=%xmm7,>0carry=%xmm13
- movdqa %xmm7,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r8 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r8=int6464#9
- # asm 2: addpd <0carry=%xmm13,<r8=%xmm8
- addpd %xmm13,%xmm8
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r7 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r7=int6464#8
- # asm 2: subpd <0carry=%xmm13,<r7=%xmm7
- subpd %xmm13,%xmm7
- # qhasm: 0carry = r10
- # asm 1: movdqa <r10=int6464#11,>0carry=int6464#14
- # asm 2: movdqa <r10=%xmm10,>0carry=%xmm13
- movdqa %xmm10,%xmm13
- # qhasm: float6464 0carry *= VINV_VINV
- # asm 1: mulpd VINV_VINV,<0carry=int6464#14
- # asm 2: mulpd VINV_VINV,<0carry=%xmm13
- mulpd VINV_VINV,%xmm13
- # qhasm: float6464 0carry += 0round
- # asm 1: addpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: addpd <0round=%xmm12,<0carry=%xmm13
- addpd %xmm12,%xmm13
- # qhasm: float6464 0carry -= 0round
- # asm 1: subpd <0round=int6464#13,<0carry=int6464#14
- # asm 2: subpd <0round=%xmm12,<0carry=%xmm13
- subpd %xmm12,%xmm13
- # qhasm: float6464 r11 += 0carry
- # asm 1: addpd <0carry=int6464#14,<r11=int6464#12
- # asm 2: addpd <0carry=%xmm13,<r11=%xmm11
- addpd %xmm13,%xmm11
- # qhasm: float6464 0carry *= V_V
- # asm 1: mulpd V_V,<0carry=int6464#14
- # asm 2: mulpd V_V,<0carry=%xmm13
- mulpd V_V,%xmm13
- # qhasm: float6464 r10 -= 0carry
- # asm 1: subpd <0carry=int6464#14,<r10=int6464#11
- # asm 2: subpd <0carry=%xmm13,<r10=%xmm10
- subpd %xmm13,%xmm10
- # qhasm: *(int128 *)(rop + 0) = r0
- # asm 1: movdqa <r0=int6464#1,0(<rop=int64#1)
- # asm 2: movdqa <r0=%xmm0,0(<rop=%rdi)
- movdqa %xmm0,0(%rdi)
- # qhasm: *(int128 *)(rop + 16) = r1
- # asm 1: movdqa <r1=int6464#2,16(<rop=int64#1)
- # asm 2: movdqa <r1=%xmm1,16(<rop=%rdi)
- movdqa %xmm1,16(%rdi)
- # qhasm: *(int128 *)(rop + 32) = r2
- # asm 1: movdqa <r2=int6464#3,32(<rop=int64#1)
- # asm 2: movdqa <r2=%xmm2,32(<rop=%rdi)
- movdqa %xmm2,32(%rdi)
- # qhasm: *(int128 *)(rop + 48) = r3
- # asm 1: movdqa <r3=int6464#4,48(<rop=int64#1)
- # asm 2: movdqa <r3=%xmm3,48(<rop=%rdi)
- movdqa %xmm3,48(%rdi)
- # qhasm: *(int128 *)(rop + 64) = r4
- # asm 1: movdqa <r4=int6464#5,64(<rop=int64#1)
- # asm 2: movdqa <r4=%xmm4,64(<rop=%rdi)
- movdqa %xmm4,64(%rdi)
- # qhasm: *(int128 *)(rop + 80) = r5
- # asm 1: movdqa <r5=int6464#6,80(<rop=int64#1)
- # asm 2: movdqa <r5=%xmm5,80(<rop=%rdi)
- movdqa %xmm5,80(%rdi)
- # qhasm: *(int128 *)(rop + 96) = r6
- # asm 1: movdqa <r6=int6464#7,96(<rop=int64#1)
- # asm 2: movdqa <r6=%xmm6,96(<rop=%rdi)
- movdqa %xmm6,96(%rdi)
- # qhasm: *(int128 *)(rop + 112) = r7
- # asm 1: movdqa <r7=int6464#8,112(<rop=int64#1)
- # asm 2: movdqa <r7=%xmm7,112(<rop=%rdi)
- movdqa %xmm7,112(%rdi)
- # qhasm: *(int128 *)(rop + 128) = r8
- # asm 1: movdqa <r8=int6464#9,128(<rop=int64#1)
- # asm 2: movdqa <r8=%xmm8,128(<rop=%rdi)
- movdqa %xmm8,128(%rdi)
- # qhasm: *(int128 *)(rop + 144) = r9
- # asm 1: movdqa <r9=int6464#10,144(<rop=int64#1)
- # asm 2: movdqa <r9=%xmm9,144(<rop=%rdi)
- movdqa %xmm9,144(%rdi)
- # qhasm: *(int128 *)(rop + 160) = r10
- # asm 1: movdqa <r10=int6464#11,160(<rop=int64#1)
- # asm 2: movdqa <r10=%xmm10,160(<rop=%rdi)
- movdqa %xmm10,160(%rdi)
- # qhasm: *(int128 *)(rop + 176) = r11
- # asm 1: movdqa <r11=int6464#12,176(<rop=int64#1)
- # asm 2: movdqa <r11=%xmm11,176(<rop=%rdi)
- movdqa %xmm11,176(%rdi)
- # qhasm: leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
|