@@ -396,119 +396,119 @@ Lnovec:
396396#elif  defined(__ppc__)
397397
398398DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
399- ;
400- ; void libunwind: :Registers_ppc::jumpto()
401- ;
402- ; On entry: 
403- ;  thread_state pointer is in  r3
404- ;
405- 
406-   ; restore integral registerrs
407-   ; skip r0 for now
408-   ; skip r1 for now
409-   lwz     r2, 16 (r3)
410-   ; skip r3 for now
411-   ; skip r4 for now
412-   ; skip r5 for now
413-   lwz     r6, 32 (r3)
414-   lwz     r7, 36 (r3)
415-   lwz     r8, 40 (r3)
416-   lwz     r9, 44 (r3)
417-   lwz    r10, 48 (r3)
418-   lwz    r11, 52 (r3)
419-   lwz    r12, 56 (r3)
420-   lwz    r13, 60 (r3)
421-   lwz    r14, 64 (r3)
422-   lwz    r15, 68 (r3)
423-   lwz    r16, 72 (r3)
424-   lwz    r17, 76 (r3)
425-   lwz    r18, 80 (r3)
426-   lwz    r19, 84 (r3)
427-   lwz    r20, 88 (r3)
428-   lwz    r21, 92 (r3)
429-   lwz    r22, 96 (r3)
430-   lwz    r23,100 (r3)
431-   lwz    r24,104 (r3)
432-   lwz    r25,108 (r3)
433-   lwz    r26,112 (r3)
434-   lwz    r27,116 (r3)
435-   lwz    r28,120 (r3)
436-   lwz    r29,124 (r3)
437-   lwz    r30,128 (r3)
438-   lwz    r31,132 (r3)
439- 
440-   ; restore float registers
441-   lfd    f0, 160 (r3)
442-   lfd    f1, 168 (r3)
443-   lfd    f2, 176 (r3)
444-   lfd    f3, 184 (r3)
445-   lfd    f4, 192 (r3)
446-   lfd    f5, 200 (r3)
447-   lfd    f6, 208 (r3)
448-   lfd    f7, 216 (r3)
449-   lfd    f8, 224 (r3)
450-   lfd    f9, 232 (r3)
451-   lfd    f10,240 (r3)
452-   lfd    f11,248 (r3)
453-   lfd    f12,256 (r3)
454-   lfd    f13,264 (r3)
455-   lfd    f14,272 (r3)
456-   lfd    f15,280 (r3)
457-   lfd    f16,288 (r3)
458-   lfd    f17,296 (r3)
459-   lfd    f18,304 (r3)
460-   lfd    f19,312 (r3)
461-   lfd    f20,320 (r3)
462-   lfd    f21,328 (r3)
463-   lfd    f22,336 (r3)
464-   lfd    f23,344 (r3)
465-   lfd    f24,352 (r3)
466-   lfd    f25,360 (r3)
467-   lfd    f26,368 (r3)
468-   lfd    f27,376 (r3)
469-   lfd    f28,384 (r3)
470-   lfd    f29,392 (r3)
471-   lfd    f30,400 (r3)
472-   lfd    f31,408 (r3)
473- 
474-   ; restore vector registers if any are in  use
475-   lwz    r5,156 (r3)  ; test  VRsave
476-   cmpwi  r5,0 
477-   beq    Lnovec
478- 
479-   subi  r4,r1,16 
480-   rlwinm  r4,r4,0 ,0 ,27   ; mask low 4 -bits
481-   ; r4 is now a 16 -byte aligned pointer into  the red zone
482-   ; the _vectorRegisters may not  be 16 -byte aligned so copy via red zone temp buffer
399+ // 
400+ // void libunwind::Registers_ppc::jumpto() 
401+ // 
402+ // On entry: 
403+ //  thread_state pointer is in r3 
404+ // 
405+ 
406+   // restore integral registerrs 
407+   // skip r0 for now 
408+   // skip r1 for now 
409+   lwz     %r2,  16 (%r3)
410+   // skip r3 for now 
411+   // skip r4 for now 
412+   // skip r5 for now 
413+   lwz     %r6,  32 (%r3)
414+   lwz     %r7,  36 (%r3)
415+   lwz     %r8 ,  40 (%r3)
416+   lwz     %r9 ,  44 (%r3)
417+   lwz     %r10 , 48 (%r3)
418+   lwz     %r11 , 52 (%r3)
419+   lwz     %r12 , 56 (%r3)
420+   lwz     %r13 , 60 (%r3)
421+   lwz     %r14 , 64 (%r3)
422+   lwz     %r15 , 68 (%r3)
423+   lwz     %r16, 72 (%r3)
424+   lwz     %r17, 76 (%r3)
425+   lwz     %r18, 80 (%r3)
426+   lwz     %r19, 84 (%r3)
427+   lwz     %r20, 88 (%r3)
428+   lwz     %r21, 92 (%r3)
429+   lwz     %r22, 96 (%r3)
430+   lwz     %r23,100 (%r3)
431+   lwz     %r24,104 (%r3)
432+   lwz     %r25,108 (%r3)
433+   lwz     %r26,112 (%r3)
434+   lwz     %r27,116 (%r3)
435+   lwz     %r28,120 (%r3)
436+   lwz     %r29,124 (%r3)
437+   lwz     %r30,128 (%r3)
438+   lwz     %r31,132 (%r3)
439+ 
440+   // restore float registers 
441+   lfd     %f0, 160 (%r3)
442+   lfd     %f1, 168 (%r3)
443+   lfd     %f2, 176 (%r3)
444+   lfd     %f3, 184 (%r3)
445+   lfd     %f4, 192 (%r3)
446+   lfd     %f5, 200 (%r3)
447+   lfd     %f6, 208 (%r3)
448+   lfd     %f7, 216 (%r3)
449+   lfd     %f8, 224 (%r3)
450+   lfd     %f9, 232 (%r3)
451+   lfd     %f10,240 (%r3)
452+   lfd     %f11,248 (%r3)
453+   lfd     %f12,256 (%r3)
454+   lfd     %f13,264 (%r3)
455+   lfd     %f14,272 (%r3)
456+   lfd     %f15,280 (%r3)
457+   lfd     %f16,288 (%r3)
458+   lfd     %f17,296 (%r3)
459+   lfd     %f18,304 (%r3)
460+   lfd     %f19,312 (%r3)
461+   lfd     %f20,320 (%r3)
462+   lfd     %f21,328 (%r3)
463+   lfd     %f22,336 (%r3)
464+   lfd     %f23,344 (%r3)
465+   lfd     %f24,352 (%r3)
466+   lfd     %f25,360 (%r3)
467+   lfd     %f26,368 (%r3)
468+   lfd     %f27,376 (%r3)
469+   lfd     %f28,384 (%r3)
470+   lfd     %f29,392 (%r3)
471+   lfd     %f30,400 (%r3)
472+   lfd     %f31,408 (%r3)
473+ 
474+   // restore vector registers if any are in use 
475+   lwz     %r5, 156 (%r3)       // test VRsave 
476+   cmpwi   %r5, 0 
477+   beq     Lnovec
483478
479+   subi    %r4, %r1, 16 
480+   rlwinm  %r4, %r4, 0 , 0 , 27   // mask low 4-bits 
481+   // r4 is now a 16-byte aligned pointer into the red zone 
482+   // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer 
483+  
484484
485485#define  LOAD_VECTOR_UNALIGNEDl(_index) \
486-   andis.  r0,r5,(1 <<(15 -_index))  @ \
487-   beq    Ldone   ## _index     @ \ 
488-   lwz    r0, 424 +_index*16 (r3)  @ \
489-   stw    r0, 0 (r4)        @ \
490-   lwz    r0, 424 +_index*16 +4 (r3)  @ \
491-   stw    r0, 4 (r4)        @ \
492-   lwz    r0, 424 +_index*16 +8 (r3)  @ \
493-   stw    r0, 8 (r4)        @ \
494-   lwz    r0, 424 +_index*16 +12 (r3)@ \
495-   stw    r0, 12 (r4)        @ \
496-   lvx    v ## _index,0, r4    @ \ 
497- Ldone   ## _index: 
486+   andis.  % r0, % r5,  (1 <<(15 -_index))  SEPARATOR  \
487+   beq      Ldone ## _index             SEPARATOR  \ 
488+   lwz     % r0, 424 +_index*16 (% r3)     SEPARATOR  \
489+   stw     % r0, 0 (% r4)                 SEPARATOR  \
490+   lwz     % r0, 424 +_index*16 +4 (% r3)   SEPARATOR  \
491+   stw     % r0, 4 (% r4)                 SEPARATOR  \
492+   lwz     % r0, 424 +_index*16 +8 (% r3)   SEPARATOR  \
493+   stw     % r0, 8 (% r4)                 SEPARATOR  \
494+   lwz     % r0, 424 +_index*16 +12 (% r3)  SEPARATOR  \
495+   stw     % r0, 12 (% r4)                SEPARATOR  \
496+   lvx     % v ## _index, 0, % r4        SEPARATOR  \ 
497+   Ldone  ## _index: 
498498
499499#define  LOAD_VECTOR_UNALIGNEDh(_index) \
500-   andi.  r0,r5,(1 <<(31 -_index))  @ \
501-   beq    Ldone   ## _index    @ \ 
502-   lwz    r0, 424 +_index*16 (r3)  @ \
503-   stw    r0, 0 (r4)        @ \
504-   lwz    r0, 424 +_index*16 +4 (r3)  @ \
505-   stw    r0, 4 (r4)        @ \
506-   lwz    r0, 424 +_index*16 +8 (r3)  @ \
507-   stw    r0, 8 (r4)        @ \
508-   lwz    r0, 424 +_index*16 +12 (r3)@ \
509-   stw    r0, 12 (r4)        @ \
510-   lvx    v ## _index,0, r4    @ \ 
511-   Ldone   ## _index: 
500+   andi.   % r0, % r5,  (1 <<(31 -_index))  SEPARATOR  \
501+   beq      Ldone ## _index             SEPARATOR  \ 
502+   lwz     % r0, 424 +_index*16 (% r3)     SEPARATOR  \
503+   stw     % r0, 0 (% r4)                 SEPARATOR  \
504+   lwz     % r0, 424 +_index*16 +4 (% r3)   SEPARATOR  \
505+   stw     % r0, 4 (% r4)                 SEPARATOR  \
506+   lwz     % r0, 424 +_index*16 +8 (% r3)   SEPARATOR  \
507+   stw     % r0, 8 (% r4)                 SEPARATOR  \
508+   lwz     % r0, 424 +_index*16 +12 (% r3)  SEPARATOR  \
509+   stw     % r0, 12 (% r4)                SEPARATOR  \
510+   lvx     % v ## _index, 0, % r4        SEPARATOR  \ 
511+   Ldone ## _index: 
512512
513513
514514  LOAD_VECTOR_UNALIGNEDl(0 )
@@ -545,17 +545,17 @@ Ldone  ## _index:
545545  LOAD_VECTOR_UNALIGNEDh(31 )
546546
547547Lnovec: 
548-   lwz    r0, 136 (r3) ;  __cr
549-   mtocrf   255 , r0
550-   lwz    r0, 148 (r3) ;  __ctr
551-   mtctr  r0
552-   lwz    r0, 0 ( r3)  ;  __ssr0
553-   mtctr  r0
554-   lwz    r0, 8 ( r3)  ;  do r0 now
555-   lwz    r5,28 (r3)  ;  do r5 now
556-   lwz    r4,24 (r3)  ;  do r4 now
557-   lwz    r1,12 (r3)  ;  do sp now
558-   lwz    r3,20 (r3)  ;  do r3 last
548+   lwz     % r0, 136 (% r3)    //  __cr
549+   mtcr    % r0
550+   lwz     % r0, 148 (% r3)    //  __ctr
551+   mtctr   % r0
552+   lwz     % r0,    0 (% r3)    //  __ssr0
553+   mtctr   % r0
554+   lwz     % r0,    8 (% r3)    //  do r0 now
555+   lwz     % r5,   28 (% r3)    //  do r5 now
556+   lwz     % r4,   24 (% r3)    //  do r4 now
557+   lwz     % r1,   12 (% r3)    //  do sp now
558+   lwz     % r3,   20 (% r3)    //  do r3 last
559559  bctr
560560
561561#elif  defined(__arm64__) || defined(__aarch64__)
0 commit comments