shithub: dav1d

Download patch

ref: 5d888dde5422d34ff0847ad14352943564f49902
parent: 0d936a1aef07b6b6f950761df9c03c9621f8e206
author: Martin Storsjö <[email protected]>
date: Wed Apr 3 10:57:13 EDT 2019

arm: Consistently use 8/24 columns indentation for assembly

For cases with indented, nested .if/.macro in asm.S, ident those
by 4 chars.

Some initial assembly files were indented to 4/16 columns, while all
the actual implementation files, starting with src/arm/64/mc.S, have
used 8/24 for indentation.

--- a/src/arm/32/util.S
+++ b/src/arm/32/util.S
@@ -34,32 +34,32 @@
 
 .macro movrel rd, val, offset=0
 #if defined(PIC) && defined(__APPLE__)
-    ldr         \rd,  1f
-    b           2f
+        ldr             \rd,  1f
+        b               2f
 1:
-    .word       3f - (2f + 8 - 4 * CONFIG_THUMB)
+        .word           3f - (2f + 8 - 4 * CONFIG_THUMB)
 2:
-    ldr         \rd,  [pc, \rd]
+        ldr             \rd,  [pc, \rd]
 .if \offset < 0
-    sub         \rd,  \rd,  #-(\offset)
+        sub             \rd,  \rd,  #-(\offset)
 .elseif \offset > 0
-    add         \rd,  \rd,  #\offset
+        add             \rd,  \rd,  #\offset
 .endif
-    .non_lazy_symbol_pointer
+        .non_lazy_symbol_pointer
 3:
-    .indirect_symbol \val
-    .word       0
-    .text
+        .indirect_symbol \val
+        .word       0
+        .text
 #elif defined(PIC)
-    ldr         \rd,  1f
-    b           2f
+        ldr             \rd,  1f
+        b               2f
 1:
-    .word       \val + \offset - (2f + 8 - 4 * CONFIG_THUMB)
+        .word           \val + \offset - (2f + 8 - 4 * CONFIG_THUMB)
 2:
-    add         \rd,  \rd,  pc
+        add             \rd,  \rd,  pc
 #else
-    movw        \rd, #:lower16:\val+\offset
-    movt        \rd, #:upper16:\val+\offset
+        movw            \rd, #:lower16:\val+\offset
+        movt            \rd, #:upper16:\val+\offset
 #endif
 .endm
 
--- a/src/arm/64/util.S
+++ b/src/arm/64/util.S
@@ -35,57 +35,57 @@
 .macro  movrel rd, val, offset=0
 #if defined(__APPLE__)
   .if \offset < 0
-    adrp        \rd, \val@PAGE
-    add         \rd, \rd, \val@PAGEOFF
-    sub         \rd, \rd, -(\offset)
+        adrp            \rd, \val@PAGE
+        add             \rd, \rd, \val@PAGEOFF
+        sub             \rd, \rd, -(\offset)
   .else
-    adrp        \rd, \val+(\offset)@PAGE
-    add         \rd, \rd, \val+(\offset)@PAGEOFF
+        adrp            \rd, \val+(\offset)@PAGE
+        add             \rd, \rd, \val+(\offset)@PAGEOFF
   .endif
 #elif defined(PIC) && defined(_WIN32)
   .if \offset < 0
-    adrp        \rd, \val
-    add         \rd, \rd, :lo12:\val
-    sub         \rd, \rd, -(\offset)
+        adrp            \rd, \val
+        add             \rd, \rd, :lo12:\val
+        sub             \rd, \rd, -(\offset)
   .else
-    adrp        \rd, \val+(\offset)
-    add         \rd, \rd, :lo12:\val+(\offset)
+        adrp            \rd, \val+(\offset)
+        add             \rd, \rd, :lo12:\val+(\offset)
   .endif
 #elif defined(PIC)
-    adrp        \rd, \val+(\offset)
-    add         \rd, \rd, :lo12:\val+(\offset)
+        adrp            \rd, \val+(\offset)
+        add             \rd, \rd, :lo12:\val+(\offset)
 #else
-    ldr         \rd, =\val+\offset
+        ldr             \rd, =\val+\offset
 #endif
 .endm
 
 .macro transpose_8x8b r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
-    trn1        \r8\().8b,  \r0\().8b,  \r1\().8b
-    trn2        \r9\().8b,  \r0\().8b,  \r1\().8b
-    trn1        \r1\().8b,  \r2\().8b,  \r3\().8b
-    trn2        \r3\().8b,  \r2\().8b,  \r3\().8b
-    trn1        \r0\().8b,  \r4\().8b,  \r5\().8b
-    trn2        \r5\().8b,  \r4\().8b,  \r5\().8b
-    trn1        \r2\().8b,  \r6\().8b,  \r7\().8b
-    trn2        \r7\().8b,  \r6\().8b,  \r7\().8b
+        trn1            \r8\().8b,  \r0\().8b,  \r1\().8b
+        trn2            \r9\().8b,  \r0\().8b,  \r1\().8b
+        trn1            \r1\().8b,  \r2\().8b,  \r3\().8b
+        trn2            \r3\().8b,  \r2\().8b,  \r3\().8b
+        trn1            \r0\().8b,  \r4\().8b,  \r5\().8b
+        trn2            \r5\().8b,  \r4\().8b,  \r5\().8b
+        trn1            \r2\().8b,  \r6\().8b,  \r7\().8b
+        trn2            \r7\().8b,  \r6\().8b,  \r7\().8b
 
-    trn1        \r4\().4h,  \r0\().4h,  \r2\().4h
-    trn2        \r2\().4h,  \r0\().4h,  \r2\().4h
-    trn1        \r6\().4h,  \r5\().4h,  \r7\().4h
-    trn2        \r7\().4h,  \r5\().4h,  \r7\().4h
-    trn1        \r5\().4h,  \r9\().4h,  \r3\().4h
-    trn2        \r9\().4h,  \r9\().4h,  \r3\().4h
-    trn1        \r3\().4h,  \r8\().4h,  \r1\().4h
-    trn2        \r8\().4h,  \r8\().4h,  \r1\().4h
-
-    trn1        \r0\().2s,  \r3\().2s,  \r4\().2s
-    trn2        \r4\().2s,  \r3\().2s,  \r4\().2s
-    trn1        \r1\().2s,  \r5\().2s,  \r6\().2s
-    trn2        \r5\().2s,  \r5\().2s,  \r6\().2s
-    trn2        \r6\().2s,  \r8\().2s,  \r2\().2s
-    trn1        \r2\().2s,  \r8\().2s,  \r2\().2s
-    trn1        \r3\().2s,  \r9\().2s,  \r7\().2s
-    trn2        \r7\().2s,  \r9\().2s,  \r7\().2s
+        trn1            \r4\().4h,  \r0\().4h,  \r2\().4h
+        trn2            \r2\().4h,  \r0\().4h,  \r2\().4h
+        trn1            \r6\().4h,  \r5\().4h,  \r7\().4h
+        trn2            \r7\().4h,  \r5\().4h,  \r7\().4h
+        trn1            \r5\().4h,  \r9\().4h,  \r3\().4h
+        trn2            \r9\().4h,  \r9\().4h,  \r3\().4h
+        trn1            \r3\().4h,  \r8\().4h,  \r1\().4h
+        trn2            \r8\().4h,  \r8\().4h,  \r1\().4h
+
+        trn1            \r0\().2s,  \r3\().2s,  \r4\().2s
+        trn2            \r4\().2s,  \r3\().2s,  \r4\().2s
+        trn1            \r1\().2s,  \r5\().2s,  \r6\().2s
+        trn2            \r5\().2s,  \r5\().2s,  \r6\().2s
+        trn2            \r6\().2s,  \r8\().2s,  \r2\().2s
+        trn1            \r2\().2s,  \r8\().2s,  \r2\().2s
+        trn1            \r3\().2s,  \r9\().2s,  \r7\().2s
+        trn2            \r7\().2s,  \r9\().2s,  \r7\().2s
 .endm
 
 #endif /* DAV1D_SRC_ARM_64_UTIL_S */
--- a/src/arm/asm.S
+++ b/src/arm/asm.S
@@ -31,12 +31,12 @@
 #include "config.h"
 
 #if ARCH_ARM
-    .syntax unified
+        .syntax unified
 #ifdef __ELF__
-    .arch armv7-a
-    .fpu neon
-    .eabi_attribute 10, 0           // suppress Tag_FP_arch
-    .eabi_attribute 12, 0           // suppress Tag_Advanced_SIMD_arch
+        .arch armv7-a
+        .fpu neon
+        .eabi_attribute 10, 0           // suppress Tag_FP_arch
+        .eabi_attribute 12, 0           // suppress Tag_Advanced_SIMD_arch
 #endif
 
 #ifdef _WIN32
@@ -46,7 +46,7 @@
 #endif
 
 #if CONFIG_THUMB
-    .thumb
+        .thumb
 #define A @
 #define T
 #else
@@ -86,25 +86,25 @@
 #endif
         .purgem endfunc
     .endm
-    .text
-    .align \align
-  .if \export
-    .global EXTERN\name
+        .text
+        .align \align
+    .if \export
+        .global EXTERN\name
 #ifdef __ELF__
-    .type   EXTERN\name, %function
+        .type   EXTERN\name, %function
 #endif
 #if HAVE_AS_FUNC
-    .func   EXTERN\name
+        .func   EXTERN\name
 #endif
 EXTERN\name:
-  .else
+    .else
 #ifdef __ELF__
-    .type \name, %function
+        .type \name, %function
 #endif
 #if HAVE_AS_FUNC
-    .func \name
+        .func \name
 #endif
-  .endif
+    .endif
 \name:
 .endm
 
--- a/tests/checkasm/arm/checkasm_32.S
+++ b/tests/checkasm/arm/checkasm_32.S
@@ -32,22 +32,22 @@
 #include "src/arm/32/util.S"
 
 const register_init, align=3
-    .quad 0x21f86d66c8ca00ce
-    .quad 0x75b6ba21077c48ad
-    .quad 0xed56bb2dcb3c7736
-    .quad 0x8bda43d3fd1a7e06
-    .quad 0xb64a9c9e5d318408
-    .quad 0xdf9a54b303f1d3a3
-    .quad 0x4a75479abd64e097
-    .quad 0x249214109d5d1c88
+        .quad 0x21f86d66c8ca00ce
+        .quad 0x75b6ba21077c48ad
+        .quad 0xed56bb2dcb3c7736
+        .quad 0x8bda43d3fd1a7e06
+        .quad 0xb64a9c9e5d318408
+        .quad 0xdf9a54b303f1d3a3
+        .quad 0x4a75479abd64e097
+        .quad 0x249214109d5d1c88
 endconst
 
 const error_message_fpscr
-    .asciz "failed to preserve register FPSCR, changed bits: %x"
+        .asciz "failed to preserve register FPSCR, changed bits: %x"
 error_message_gpr:
-    .asciz "failed to preserve register r%d"
+        .asciz "failed to preserve register r%d"
 error_message_vfp:
-    .asciz "failed to preserve register d%d"
+        .asciz "failed to preserve register d%d"
 endconst
 
 @ max number of args used by any asm function.
@@ -61,111 +61,111 @@
 .macro clobbercheck variant
 .equ pushed, 4*9
 function checked_call_\variant, export=1
-    push        {r4-r11, lr}
+        push            {r4-r11, lr}
 .ifc \variant, vfp
-    vpush       {d8-d15}
-    fmrx        r4,  FPSCR
-    push        {r4}
+        vpush           {d8-d15}
+        fmrx            r4,  FPSCR
+        push            {r4}
 .equ pushed, pushed + 16*4 + 4
 .endif
 
-    movrel      r12, register_init
+        movrel          r12, register_init
 .ifc \variant, vfp
-    vldm        r12, {d8-d15}
+        vldm            r12, {d8-d15}
 .endif
-    ldm         r12, {r4-r11}
+        ldm             r12, {r4-r11}
 
-    sub         sp,  sp,  #ARG_STACK_A
+        sub             sp,  sp,  #ARG_STACK_A
 .equ pos, 0
 .rept MAX_ARGS-4
-    ldr         r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
-    str         r12, [sp, #pos]
+        ldr             r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
+        str             r12, [sp, #pos]
 .equ pos, pos + 4
 .endr
 
-    mov         r12, r0
-    mov         r0,  r2
-    mov         r1,  r3
-    ldrd        r2,  r3,  [sp, #ARG_STACK_A + pushed]
-    blx         r12
-    add         sp,  sp,  #ARG_STACK_A
+        mov             r12, r0
+        mov             r0,  r2
+        mov             r1,  r3
+        ldrd            r2,  r3,  [sp, #ARG_STACK_A + pushed]
+        blx             r12
+        add             sp,  sp,  #ARG_STACK_A
 
-    push        {r0, r1}
-    movrel      r12, register_init
+        push            {r0, r1}
+        movrel          r12, register_init
 .ifc \variant, vfp
 .macro check_reg_vfp, dreg, offset
-    ldrd        r2,  r3,  [r12, #8 * (\offset)]
-    vmov        r0,  lr,  \dreg
-    eor         r2,  r2,  r0
-    eor         r3,  r3,  lr
-    orrs        r2,  r2,  r3
-    bne         4f
+        ldrd            r2,  r3,  [r12, #8 * (\offset)]
+        vmov            r0,  lr,  \dreg
+        eor             r2,  r2,  r0
+        eor             r3,  r3,  lr
+        orrs            r2,  r2,  r3
+        bne             4f
 .endm
 
 .irp n, 8, 9, 10, 11, 12, 13, 14, 15
-    @ keep track of the checked double/SIMD register
-    mov         r1,  #\n
-    check_reg_vfp d\n, \n-8
+        @ keep track of the checked double/SIMD register
+        mov             r1,  #\n
+        check_reg_vfp   d\n, \n-8
 .endr
 .purgem check_reg_vfp
 
-    fmrx        r1,  FPSCR
-    ldr         r3,  [sp, #8]
-    eor         r1,  r1,  r3
-    @ Ignore changes in bits 0-4 and 7
-    bic         r1,  r1,  #0x9f
-    @ Ignore changes in the topmost 5 bits
-    bics        r1,  r1,  #0xf8000000
-    bne         3f
+        fmrx            r1,  FPSCR
+        ldr             r3,  [sp, #8]
+        eor             r1,  r1,  r3
+        @ Ignore changes in bits 0-4 and 7
+        bic             r1,  r1,  #0x9f
+        @ Ignore changes in the topmost 5 bits
+        bics            r1,  r1,  #0xf8000000
+        bne             3f
 .endif
 
-    @ keep track of the checked GPR
-    mov         r1,  #4
+        @ keep track of the checked GPR
+        mov             r1,  #4
 .macro check_reg reg1, reg2=
-    ldrd        r2,  r3,  [r12], #8
-    eors        r2,  r2,  \reg1
-    bne         2f
-    add         r1,  r1,  #1
+        ldrd            r2,  r3,  [r12], #8
+        eors            r2,  r2,  \reg1
+        bne             2f
+        add             r1,  r1,  #1
 .ifnb \reg2
-    eors        r3,  r3,  \reg2
-    bne         2f
+        eors            r3,  r3,  \reg2
+        bne             2f
 .endif
-    add         r1,  r1,  #1
+        add             r1,  r1,  #1
 .endm
-    check_reg   r4,  r5
-    check_reg   r6,  r7
+        check_reg       r4,  r5
+        check_reg       r6,  r7
 @ r9 is a volatile register in the ios ABI
 #ifdef __APPLE__
-    check_reg   r8
+        check_reg       r8
 #else
-    check_reg   r8,  r9
+        check_reg       r8,  r9
 #endif
-    check_reg   r10, r11
+        check_reg       r10, r11
 .purgem check_reg
 
-    b           0f
+        b               0f
 4:
-    movrel      r0, error_message_vfp
-    b           1f
+        movrel          r0, error_message_vfp
+        b               1f
 3:
-    movrel      r0, error_message_fpscr
-    b           1f
+        movrel          r0, error_message_fpscr
+        b               1f
 2:
-    movrel      r0, error_message_gpr
+        movrel          r0, error_message_gpr
 1:
 #ifdef PREFIX
-    blx         _checkasm_fail_func
+        blx             _checkasm_fail_func
 #else
-    blx         checkasm_fail_func
+        blx             checkasm_fail_func
 #endif
 0:
-    pop         {r0, r1}
+        pop             {r0, r1}
 .ifc \variant, vfp
-    pop         {r2}
-    fmxr        FPSCR, r2
-    vpop        {d8-d15}
+        pop             {r2}
+        fmxr            FPSCR, r2
+        vpop            {d8-d15}
 .endif
-    pop         {r4-r11, pc}
+        pop             {r4-r11, pc}
 endfunc
 .endm
 
--- a/tests/checkasm/arm/checkasm_64.S
+++ b/tests/checkasm/arm/checkasm_64.S
@@ -32,29 +32,29 @@
 #include "src/arm/64/util.S"
 
 const register_init, align=4
-    .quad 0x21f86d66c8ca00ce
-    .quad 0x75b6ba21077c48ad
-    .quad 0xed56bb2dcb3c7736
-    .quad 0x8bda43d3fd1a7e06
-    .quad 0xb64a9c9e5d318408
-    .quad 0xdf9a54b303f1d3a3
-    .quad 0x4a75479abd64e097
-    .quad 0x249214109d5d1c88
-    .quad 0x1a1b2550a612b48c
-    .quad 0x79445c159ce79064
-    .quad 0x2eed899d5a28ddcd
-    .quad 0x86b2536fcd8cf636
-    .quad 0xb0856806085e7943
-    .quad 0x3f2bf84fc0fcca4e
-    .quad 0xacbd382dcf5b8de2
-    .quad 0xd229e1f5b281303f
-    .quad 0x71aeaff20b095fd9
-    .quad 0xab63e2e11fa38ed9
+        .quad 0x21f86d66c8ca00ce
+        .quad 0x75b6ba21077c48ad
+        .quad 0xed56bb2dcb3c7736
+        .quad 0x8bda43d3fd1a7e06
+        .quad 0xb64a9c9e5d318408
+        .quad 0xdf9a54b303f1d3a3
+        .quad 0x4a75479abd64e097
+        .quad 0x249214109d5d1c88
+        .quad 0x1a1b2550a612b48c
+        .quad 0x79445c159ce79064
+        .quad 0x2eed899d5a28ddcd
+        .quad 0x86b2536fcd8cf636
+        .quad 0xb0856806085e7943
+        .quad 0x3f2bf84fc0fcca4e
+        .quad 0xacbd382dcf5b8de2
+        .quad 0xd229e1f5b281303f
+        .quad 0x71aeaff20b095fd9
+        .quad 0xab63e2e11fa38ed9
 endconst
 
 
 const error_message
-    .asciz "failed to preserve register"
+        .asciz "failed to preserve register"
 endconst
 
 
@@ -64,107 +64,107 @@
 #define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15)
 
 function stack_clobber, export=1
-    mov         x3,  sp
-    mov         x2,  #CLOBBER_STACK
+        mov             x3,  sp
+        mov             x2,  #CLOBBER_STACK
 1:
-    stp         x0,  x1,  [sp, #-16]!
-    subs        x2,  x2,  #16
-    b.gt        1b
-    mov         sp,  x3
-    ret
+        stp             x0,  x1,  [sp, #-16]!
+        subs            x2,  x2,  #16
+        b.gt            1b
+        mov             sp,  x3
+        ret
 endfunc
 
 #define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15)
 
 function checked_call, export=1
-    stp         x29, x30, [sp, #-16]!
-    mov         x29, sp
-    stp         x19, x20, [sp, #-16]!
-    stp         x21, x22, [sp, #-16]!
-    stp         x23, x24, [sp, #-16]!
-    stp         x25, x26, [sp, #-16]!
-    stp         x27, x28, [sp, #-16]!
-    stp         d8,  d9,  [sp, #-16]!
-    stp         d10, d11, [sp, #-16]!
-    stp         d12, d13, [sp, #-16]!
-    stp         d14, d15, [sp, #-16]!
+        stp             x29, x30, [sp, #-16]!
+        mov             x29, sp
+        stp             x19, x20, [sp, #-16]!
+        stp             x21, x22, [sp, #-16]!
+        stp             x23, x24, [sp, #-16]!
+        stp             x25, x26, [sp, #-16]!
+        stp             x27, x28, [sp, #-16]!
+        stp             d8,  d9,  [sp, #-16]!
+        stp             d10, d11, [sp, #-16]!
+        stp             d12, d13, [sp, #-16]!
+        stp             d14, d15, [sp, #-16]!
 
-    movrel      x9, register_init
-    ldp         d8,  d9,  [x9], #16
-    ldp         d10, d11, [x9], #16
-    ldp         d12, d13, [x9], #16
-    ldp         d14, d15, [x9], #16
-    ldp         x19, x20, [x9], #16
-    ldp         x21, x22, [x9], #16
-    ldp         x23, x24, [x9], #16
-    ldp         x25, x26, [x9], #16
-    ldp         x27, x28, [x9], #16
+        movrel          x9, register_init
+        ldp             d8,  d9,  [x9], #16
+        ldp             d10, d11, [x9], #16
+        ldp             d12, d13, [x9], #16
+        ldp             d14, d15, [x9], #16
+        ldp             x19, x20, [x9], #16
+        ldp             x21, x22, [x9], #16
+        ldp             x23, x24, [x9], #16
+        ldp             x25, x26, [x9], #16
+        ldp             x27, x28, [x9], #16
 
-    sub         sp,  sp,  #ARG_STACK
+        sub             sp,  sp,  #ARG_STACK
 .equ pos, 0
 .rept MAX_ARGS-8
-    // Skip the first 8 args, that are loaded into registers
-    ldr         x9, [x29, #16 + 8*8 + pos]
-    str         x9, [sp, #pos]
+        // Skip the first 8 args, that are loaded into registers
+        ldr             x9, [x29, #16 + 8*8 + pos]
+        str             x9, [sp, #pos]
 .equ pos, pos + 8
 .endr
 
-    mov         x12, x0
-    ldp         x0,  x1,  [x29, #16]
-    ldp         x2,  x3,  [x29, #32]
-    ldp         x4,  x5,  [x29, #48]
-    ldp         x6,  x7,  [x29, #64]
-    blr         x12
-    add         sp,  sp,  #ARG_STACK
-    stp         x0,  x1,  [sp, #-16]!
-    movrel      x9, register_init
-    movi        v3.8h,  #0
+        mov             x12, x0
+        ldp             x0,  x1,  [x29, #16]
+        ldp             x2,  x3,  [x29, #32]
+        ldp             x4,  x5,  [x29, #48]
+        ldp             x6,  x7,  [x29, #64]
+        blr             x12
+        add             sp,  sp,  #ARG_STACK
+        stp             x0,  x1,  [sp, #-16]!
+        movrel          x9, register_init
+        movi            v3.8h,  #0
 
 .macro check_reg_neon reg1, reg2
-    ldr         q0,  [x9], #16
-    uzp1        v1.2d,  v\reg1\().2d, v\reg2\().2d
-    eor         v0.16b, v0.16b, v1.16b
-    orr         v3.16b, v3.16b, v0.16b
+        ldr             q0,  [x9], #16
+        uzp1            v1.2d,  v\reg1\().2d, v\reg2\().2d
+        eor             v0.16b, v0.16b, v1.16b
+        orr             v3.16b, v3.16b, v0.16b
 .endm
-    check_reg_neon  8,  9
-    check_reg_neon  10, 11
-    check_reg_neon  12, 13
-    check_reg_neon  14, 15
-    uqxtn       v3.8b,  v3.8h
-    umov        x3,  v3.d[0]
+        check_reg_neon  8,  9
+        check_reg_neon  10, 11
+        check_reg_neon  12, 13
+        check_reg_neon  14, 15
+        uqxtn           v3.8b,  v3.8h
+        umov            x3,  v3.d[0]
 
 .macro check_reg reg1, reg2
-    ldp         x0,  x1,  [x9], #16
-    eor         x0,  x0,  \reg1
-    eor         x1,  x1,  \reg2
-    orr         x3,  x3,  x0
-    orr         x3,  x3,  x1
+        ldp             x0,  x1,  [x9], #16
+        eor             x0,  x0,  \reg1
+        eor             x1,  x1,  \reg2
+        orr             x3,  x3,  x0
+        orr             x3,  x3,  x1
 .endm
-    check_reg   x19, x20
-    check_reg   x21, x22
-    check_reg   x23, x24
-    check_reg   x25, x26
-    check_reg   x27, x28
+        check_reg       x19, x20
+        check_reg       x21, x22
+        check_reg       x23, x24
+        check_reg       x25, x26
+        check_reg       x27, x28
 
-    cbz         x3,  0f
+        cbz             x3,  0f
 
-    movrel      x0, error_message
+        movrel          x0, error_message
 #ifdef PREFIX
-    bl          _checkasm_fail_func
+        bl              _checkasm_fail_func
 #else
-    bl          checkasm_fail_func
+        bl              checkasm_fail_func
 #endif
 0:
-    ldp         x0,  x1,  [sp], #16
-    ldp         d14, d15, [sp], #16
-    ldp         d12, d13, [sp], #16
-    ldp         d10, d11, [sp], #16
-    ldp         d8,  d9,  [sp], #16
-    ldp         x27, x28, [sp], #16
-    ldp         x25, x26, [sp], #16
-    ldp         x23, x24, [sp], #16
-    ldp         x21, x22, [sp], #16
-    ldp         x19, x20, [sp], #16
-    ldp         x29, x30, [sp], #16
-    ret
+        ldp             x0,  x1,  [sp], #16
+        ldp             d14, d15, [sp], #16
+        ldp             d12, d13, [sp], #16
+        ldp             d10, d11, [sp], #16
+        ldp             d8,  d9,  [sp], #16
+        ldp             x27, x28, [sp], #16
+        ldp             x25, x26, [sp], #16
+        ldp             x23, x24, [sp], #16
+        ldp             x21, x22, [sp], #16
+        ldp             x19, x20, [sp], #16
+        ldp             x29, x30, [sp], #16
+        ret
 endfunc