ref: fb348f64dc0bcd4c0aaf686afbdd810484a5536f
parent: 52e9b4353f968fd27e2bd912b0e2302509063068
author: Martin Storsjö <[email protected]>
date: Tue Mar 3 11:21:38 EST 2020
arm64: mc: NEON implementation of blend for 16bpc Checkasm numbers: Cortex A53 A72 A73 blend_h_w2_16bpc_neon: 109.3 83.1 56.7 blend_h_w4_16bpc_neon: 114.1 61.4 62.3 blend_h_w8_16bpc_neon: 133.3 80.8 81.1 blend_h_w16_16bpc_neon: 215.6 132.7 149.5 blend_h_w32_16bpc_neon: 390.4 254.2 235.8 blend_h_w64_16bpc_neon: 719.1 456.3 453.8 blend_h_w128_16bpc_neon: 1646.1 1112.3 1065.9 blend_v_w2_16bpc_neon: 185.9 175.9 180.0 blend_v_w4_16bpc_neon: 338.0 183.4 232.1 blend_v_w8_16bpc_neon: 426.5 213.8 250.6 blend_v_w16_16bpc_neon: 678.2 357.8 382.6 blend_v_w32_16bpc_neon: 1098.3 686.2 695.6 blend_w4_16bpc_neon: 75.7 31.5 32.0 blend_w8_16bpc_neon: 134.0 75.0 75.8 blend_w16_16bpc_neon: 467.9 267.3 310.0 blend_w32_16bpc_neon: 1201.9 658.7 779.7 Corresponding numbers for 8bpc for comparison: blend_h_w2_8bpc_neon: 104.1 55.9 60.8 blend_h_w4_8bpc_neon: 108.9 58.7 48.2 blend_h_w8_8bpc_neon: 99.3 64.4 67.4 blend_h_w16_8bpc_neon: 145.2 93.4 85.1 blend_h_w32_8bpc_neon: 262.2 157.5 148.6 blend_h_w64_8bpc_neon: 466.7 278.9 256.6 blend_h_w128_8bpc_neon: 1054.2 624.7 571.0 blend_v_w2_8bpc_neon: 170.5 106.6 113.4 blend_v_w4_8bpc_neon: 333.0 189.9 225.9 blend_v_w8_8bpc_neon: 314.9 199.0 203.5 blend_v_w16_8bpc_neon: 476.9 300.8 241.1 blend_v_w32_8bpc_neon: 766.9 430.4 415.1 blend_w4_8bpc_neon: 66.7 35.4 26.0 blend_w8_8bpc_neon: 110.7 47.9 48.1 blend_w16_8bpc_neon: 299.4 161.8 162.3 blend_w32_8bpc_neon: 725.8 417.0 432.8
--- a/src/arm/64/mc16.S
+++ b/src/arm/64/mc16.S
@@ -241,6 +241,438 @@
bidir_fn mask, w7
+function blend_16bpc_neon, export=1
+ adr x6, L(blend_tbl)
+ clz w3, w3
+ sub w3, w3, #26
+ ldrh w3, [x6, x3, lsl #1]
+ sub x6, x6, w3, uxtw
+ add x8, x0, x1
+ br x6
+40:
+ lsl x1, x1, #1
+4:
+ ld1 {v2.8b}, [x5], #8
+ ld1 {v1.8h}, [x2], #16
+ ld1 {v0.d}[0], [x0]
+ neg v2.8b, v2.8b // -m
+ subs w4, w4, #2
+ ld1 {v0.d}[1], [x8]
+ sxtl v2.8h, v2.8b
+ shl v2.8h, v2.8h, #9 // -m << 9
+ sub v1.8h, v0.8h, v1.8h // a - b
+ sqrdmulh v1.8h, v1.8h, v2.8h // ((a-b)*-m + 32) >> 6
+ add v0.8h, v0.8h, v1.8h
+ st1 {v0.d}[0], [x0], x1
+ st1 {v0.d}[1], [x8], x1
+ b.gt 4b
+ ret
+80:
+ lsl x1, x1, #1
+8:
+ ld1 {v4.16b}, [x5], #16
+ ld1 {v2.8h, v3.8h}, [x2], #32
+ neg v5.16b, v4.16b // -m
+ ld1 {v0.8h}, [x0]
+ ld1 {v1.8h}, [x8]
+ sxtl v4.8h, v5.8b
+ sxtl2 v5.8h, v5.16b
+ shl v4.8h, v4.8h, #9 // -m << 9
+ shl v5.8h, v5.8h, #9
+ sub v2.8h, v0.8h, v2.8h // a - b
+ sub v3.8h, v1.8h, v3.8h
+ subs w4, w4, #2
+ sqrdmulh v2.8h, v2.8h, v4.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v3.8h, v3.8h, v5.8h
+ add v0.8h, v0.8h, v2.8h
+ add v1.8h, v1.8h, v3.8h
+ st1 {v0.8h}, [x0], x1
+ st1 {v1.8h}, [x8], x1
+ b.gt 8b
+ ret
+160:
+ lsl x1, x1, #1
+16:
+ ld1 {v16.16b, v17.16b}, [x5], #32
+ ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
+ subs w4, w4, #2
+ neg v18.16b, v16.16b // -m
+ neg v19.16b, v17.16b
+ ld1 {v0.8h, v1.8h}, [x0]
+ sxtl v16.8h, v18.8b
+ sxtl2 v17.8h, v18.16b
+ sxtl v18.8h, v19.8b
+ sxtl2 v19.8h, v19.16b
+ ld1 {v2.8h, v3.8h}, [x8]
+ shl v16.8h, v16.8h, #9 // -m << 9
+ shl v17.8h, v17.8h, #9
+ shl v18.8h, v18.8h, #9
+ shl v19.8h, v19.8h, #9
+ sub v4.8h, v0.8h, v4.8h // a - b
+ sub v5.8h, v1.8h, v5.8h
+ sub v6.8h, v2.8h, v6.8h
+ sub v7.8h, v3.8h, v7.8h
+ sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v5.8h, v5.8h, v17.8h
+ sqrdmulh v6.8h, v6.8h, v18.8h
+ sqrdmulh v7.8h, v7.8h, v19.8h
+ add v0.8h, v0.8h, v4.8h
+ add v1.8h, v1.8h, v5.8h
+ add v2.8h, v2.8h, v6.8h
+ add v3.8h, v3.8h, v7.8h
+ st1 {v0.8h, v1.8h}, [x0], x1
+ st1 {v2.8h, v3.8h}, [x8], x1
+ b.gt 16b
+ ret
+32:
+ ld1 {v16.16b, v17.16b}, [x5], #32
+ ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
+ subs w4, w4, #1
+ neg v18.16b, v16.16b // -m
+ neg v19.16b, v17.16b
+ sxtl v16.8h, v18.8b
+ sxtl2 v17.8h, v18.16b
+ sxtl v18.8h, v19.8b
+ sxtl2 v19.8h, v19.16b
+ ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0]
+ shl v16.8h, v16.8h, #9 // -m << 9
+ shl v17.8h, v17.8h, #9
+ shl v18.8h, v18.8h, #9
+ shl v19.8h, v19.8h, #9
+ sub v4.8h, v0.8h, v4.8h // a - b
+ sub v5.8h, v1.8h, v5.8h
+ sub v6.8h, v2.8h, v6.8h
+ sub v7.8h, v3.8h, v7.8h
+ sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v5.8h, v5.8h, v17.8h
+ sqrdmulh v6.8h, v6.8h, v18.8h
+ sqrdmulh v7.8h, v7.8h, v19.8h
+ add v0.8h, v0.8h, v4.8h
+ add v1.8h, v1.8h, v5.8h
+ add v2.8h, v2.8h, v6.8h
+ add v3.8h, v3.8h, v7.8h
+ st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
+ b.gt 32b
+ ret
+L(blend_tbl):
+ .hword L(blend_tbl) - 32b
+ .hword L(blend_tbl) - 160b
+ .hword L(blend_tbl) - 80b
+ .hword L(blend_tbl) - 40b
+endfunc
+
+function blend_h_16bpc_neon, export=1
+ adr x6, L(blend_h_tbl)
+ movrel x5, X(obmc_masks)
+ add x5, x5, w4, uxtw
+ sub w4, w4, w4, lsr #2
+ clz w7, w3
+ add x8, x0, x1
+ lsl x1, x1, #1
+ sub w7, w7, #24
+ ldrh w7, [x6, x7, lsl #1]
+ sub x6, x6, w7, uxtw
+ br x6
+2:
+ ld2r {v2.8b, v3.8b}, [x5], #2
+ ld1 {v1.4h}, [x2], #8
+ ext v2.8b, v2.8b, v3.8b, #6
+ subs w4, w4, #2
+ neg v2.8b, v2.8b // -m
+ ld1 {v0.s}[0], [x0]
+ ld1 {v0.s}[1], [x8]
+ sxtl v2.8h, v2.8b
+ shl v2.4h, v2.4h, #9 // -m << 9
+ sub v1.4h, v0.4h, v1.4h // a - b
+ sqrdmulh v1.4h, v1.4h, v2.4h // ((a-b)*-m + 32) >> 6
+ add v0.4h, v0.4h, v1.4h
+ st1 {v0.s}[0], [x0], x1
+ st1 {v0.s}[1], [x8], x1
+ b.gt 2b
+ ret
+4:
+ ld2r {v2.8b, v3.8b}, [x5], #2
+ ld1 {v1.8h}, [x2], #16
+ ext v2.8b, v2.8b, v3.8b, #4
+ subs w4, w4, #2
+ neg v2.8b, v2.8b // -m
+ ld1 {v0.d}[0], [x0]
+ ld1 {v0.d}[1], [x8]
+ sxtl v2.8h, v2.8b
+ shl v2.8h, v2.8h, #9 // -m << 9
+ sub v1.8h, v0.8h, v1.8h // a - b
+ sqrdmulh v1.8h, v1.8h, v2.8h // ((a-b)*-m + 32) >> 6
+ add v0.8h, v0.8h, v1.8h
+ st1 {v0.d}[0], [x0], x1
+ st1 {v0.d}[1], [x8], x1
+ b.gt 4b
+ ret
+8:
+ ld2r {v4.8b, v5.8b}, [x5], #2
+ ld1 {v2.8h, v3.8h}, [x2], #32
+ neg v4.8b, v4.8b // -m
+ neg v5.8b, v5.8b
+ ld1 {v0.8h}, [x0]
+ subs w4, w4, #2
+ sxtl v4.8h, v4.8b
+ sxtl v5.8h, v5.8b
+ ld1 {v1.8h}, [x8]
+ shl v4.8h, v4.8h, #9 // -m << 9
+ shl v5.8h, v5.8h, #9
+ sub v2.8h, v0.8h, v2.8h // a - b
+ sub v3.8h, v1.8h, v3.8h
+ sqrdmulh v2.8h, v2.8h, v4.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v3.8h, v3.8h, v5.8h
+ add v0.8h, v0.8h, v2.8h
+ add v1.8h, v1.8h, v3.8h
+ st1 {v0.8h}, [x0], x1
+ st1 {v1.8h}, [x8], x1
+ b.gt 8b
+ ret
+16:
+ ld2r {v16.8b, v17.8b}, [x5], #2
+ ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
+ neg v16.8b, v16.8b // -m
+ neg v17.8b, v17.8b
+ ld1 {v0.8h, v1.8h}, [x0]
+ ld1 {v2.8h, v3.8h}, [x8]
+ subs w4, w4, #2
+ sxtl v16.8h, v16.8b
+ sxtl v17.8h, v17.8b
+ shl v16.8h, v16.8h, #9 // -m << 9
+ shl v17.8h, v17.8h, #9
+ sub v4.8h, v0.8h, v4.8h // a - b
+ sub v5.8h, v1.8h, v5.8h
+ sub v6.8h, v2.8h, v6.8h
+ sub v7.8h, v3.8h, v7.8h
+ sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v5.8h, v5.8h, v16.8h
+ sqrdmulh v6.8h, v6.8h, v17.8h
+ sqrdmulh v7.8h, v7.8h, v17.8h
+ add v0.8h, v0.8h, v4.8h
+ add v1.8h, v1.8h, v5.8h
+ add v2.8h, v2.8h, v6.8h
+ add v3.8h, v3.8h, v7.8h
+ st1 {v0.8h, v1.8h}, [x0], x1
+ st1 {v2.8h, v3.8h}, [x8], x1
+ b.gt 16b
+ ret
+1280:
+640:
+320:
+ sub x1, x1, w3, uxtw #1
+ add x7, x2, w3, uxtw #1
+321:
+ ld2r {v24.8b, v25.8b}, [x5], #2
+ mov w6, w3
+ neg v24.8b, v24.8b // -m
+ neg v25.8b, v25.8b
+ sxtl v24.8h, v24.8b
+ sxtl v25.8h, v25.8b
+ shl v24.8h, v24.8h, #9 // -m << 9
+ shl v25.8h, v25.8h, #9
+32:
+ ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x2], #64
+ ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0]
+ subs w6, w6, #32
+ sub v16.8h, v0.8h, v16.8h // a - b
+ sub v17.8h, v1.8h, v17.8h
+ sub v18.8h, v2.8h, v18.8h
+ sub v19.8h, v3.8h, v19.8h
+ ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x7], #64
+ ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x8]
+ sqrdmulh v16.8h, v16.8h, v24.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v17.8h, v17.8h, v24.8h
+ sqrdmulh v18.8h, v18.8h, v24.8h
+ sqrdmulh v19.8h, v19.8h, v24.8h
+ sub v20.8h, v4.8h, v20.8h // a - b
+ sub v21.8h, v5.8h, v21.8h
+ sub v22.8h, v6.8h, v22.8h
+ sub v23.8h, v7.8h, v23.8h
+ add v0.8h, v0.8h, v16.8h
+ add v1.8h, v1.8h, v17.8h
+ add v2.8h, v2.8h, v18.8h
+ add v3.8h, v3.8h, v19.8h
+ sqrdmulh v20.8h, v20.8h, v25.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v21.8h, v21.8h, v25.8h
+ sqrdmulh v22.8h, v22.8h, v25.8h
+ sqrdmulh v23.8h, v23.8h, v25.8h
+ st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
+ add v4.8h, v4.8h, v20.8h
+ add v5.8h, v5.8h, v21.8h
+ add v6.8h, v6.8h, v22.8h
+ add v7.8h, v7.8h, v23.8h
+ st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x8], #64
+ b.gt 32b
+ subs w4, w4, #2
+ add x0, x0, x1
+ add x8, x8, x1
+ add x2, x2, w3, uxtw #1
+ add x7, x7, w3, uxtw #1
+ b.gt 321b
+ ret
+L(blend_h_tbl):
+ .hword L(blend_h_tbl) - 1280b
+ .hword L(blend_h_tbl) - 640b
+ .hword L(blend_h_tbl) - 320b
+ .hword L(blend_h_tbl) - 16b
+ .hword L(blend_h_tbl) - 8b
+ .hword L(blend_h_tbl) - 4b
+ .hword L(blend_h_tbl) - 2b
+endfunc
+
+function blend_v_16bpc_neon, export=1
+ adr x6, L(blend_v_tbl)
+ movrel x5, X(obmc_masks)
+ add x5, x5, w3, uxtw
+ clz w3, w3
+ add x8, x0, x1
+ lsl x1, x1, #1
+ sub w3, w3, #26
+ ldrh w3, [x6, x3, lsl #1]
+ sub x6, x6, w3, uxtw
+ br x6
+20:
+ ld1r {v2.8b}, [x5]
+ neg v2.8b, v2.8b // -m
+ sxtl v2.8h, v2.8b
+ shl v2.4h, v2.4h, #9 // -m << 9
+2:
+ ld1 {v1.s}[0], [x2], #4
+ ld1 {v0.h}[0], [x0]
+ subs w4, w4, #2
+ ld1 {v1.h}[1], [x2]
+ ld1 {v0.h}[1], [x8]
+ add x2, x2, #4
+ sub v1.4h, v0.4h, v1.4h // a - b
+ sqrdmulh v1.4h, v1.4h, v2.4h // ((a-b)*-m + 32) >> 6
+ add v0.4h, v0.4h, v1.4h
+ st1 {v0.h}[0], [x0], x1
+ st1 {v0.h}[1], [x8], x1
+ b.gt 2b
+ ret
+40:
+ ld1r {v2.2s}, [x5]
+ sub x1, x1, #4
+ neg v2.8b, v2.8b // -m
+ sxtl v2.8h, v2.8b
+ shl v2.8h, v2.8h, #9 // -m << 9
+4:
+ ld1 {v1.8h}, [x2], #16
+ ld1 {v0.d}[0], [x0]
+ ld1 {v0.d}[1], [x8]
+ subs w4, w4, #2
+ sub v1.8h, v0.8h, v1.8h // a - b
+ sqrdmulh v1.8h, v1.8h, v2.8h // ((a-b)*-m + 32) >> 6
+ add v0.8h, v0.8h, v1.8h
+ st1 {v0.s}[0], [x0], #4
+ st1 {v0.s}[2], [x8], #4
+ st1 {v0.h}[2], [x0], x1
+ st1 {v0.h}[6], [x8], x1
+ b.gt 4b
+ ret
+80:
+ ld1 {v4.8b}, [x5]
+ sub x1, x1, #8
+ neg v4.8b, v4.8b // -m
+ sxtl v4.8h, v4.8b
+ shl v4.8h, v4.8h, #9 // -m << 9
+8:
+ ld1 {v2.8h, v3.8h}, [x2], #32
+ ld1 {v0.8h}, [x0]
+ ld1 {v1.8h}, [x8]
+ subs w4, w4, #2
+ sub v2.8h, v0.8h, v2.8h // a - b
+ sub v3.8h, v1.8h, v3.8h
+ sqrdmulh v2.8h, v2.8h, v4.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v3.8h, v3.8h, v4.8h
+ add v0.8h, v0.8h, v2.8h
+ add v1.8h, v1.8h, v3.8h
+ st1 {v0.d}[0], [x0], #8
+ st1 {v1.d}[0], [x8], #8
+ st1 {v0.s}[2], [x0], x1
+ st1 {v1.s}[2], [x8], x1
+ b.gt 8b
+ ret
+160:
+ ld1 {v16.8b, v17.8b}, [x5]
+ sub x1, x1, #16
+ neg v16.8b, v16.8b // -m
+ neg v17.8b, v17.8b
+ sxtl v16.8h, v16.8b
+ sxtl v17.8h, v17.8b
+ shl v16.8h, v16.8h, #9 // -m << 9
+ shl v17.4h, v17.4h, #9
+16:
+ ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
+ ld1 {v0.8h, v1.8h}, [x0]
+ subs w4, w4, #2
+ ld1 {v2.8h, v3.8h}, [x8]
+ sub v4.8h, v0.8h, v4.8h // a - b
+ sub v5.4h, v1.4h, v5.4h
+ sub v6.8h, v2.8h, v6.8h
+ sub v7.4h, v3.4h, v7.4h
+ sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v5.4h, v5.4h, v17.4h
+ sqrdmulh v6.8h, v6.8h, v16.8h
+ sqrdmulh v7.4h, v7.4h, v17.4h
+ add v0.8h, v0.8h, v4.8h
+ add v1.4h, v1.4h, v5.4h
+ add v2.8h, v2.8h, v6.8h
+ add v3.4h, v3.4h, v7.4h
+ st1 {v0.8h}, [x0], #16
+ st1 {v2.8h}, [x8], #16
+ st1 {v1.4h}, [x0], x1
+ st1 {v3.4h}, [x8], x1
+ b.gt 16b
+ ret
+320:
+ ld1 {v24.16b, v25.16b}, [x5]
+ neg v26.16b, v24.16b // -m
+ neg v27.8b, v25.8b
+ sxtl v24.8h, v26.8b
+ sxtl2 v25.8h, v26.16b
+ sxtl v26.8h, v27.8b
+ shl v24.8h, v24.8h, #9 // -m << 9
+ shl v25.8h, v25.8h, #9
+ shl v26.8h, v26.8h, #9
+32:
+ ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x2], #64
+ ld1 {v0.8h, v1.8h, v2.8h}, [x0]
+ ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x2], #64
+ ld1 {v4.8h, v5.8h, v6.8h}, [x8]
+ subs w4, w4, #2
+ sub v16.8h, v0.8h, v16.8h // a - b
+ sub v17.8h, v1.8h, v17.8h
+ sub v18.8h, v2.8h, v18.8h
+ sub v20.8h, v4.8h, v20.8h
+ sub v21.8h, v5.8h, v21.8h
+ sub v22.8h, v6.8h, v22.8h
+ sqrdmulh v16.8h, v16.8h, v24.8h // ((a-b)*-m + 32) >> 6
+ sqrdmulh v17.8h, v17.8h, v25.8h
+ sqrdmulh v18.8h, v18.8h, v26.8h
+ sqrdmulh v20.8h, v20.8h, v24.8h
+ sqrdmulh v21.8h, v21.8h, v25.8h
+ sqrdmulh v22.8h, v22.8h, v26.8h
+ add v0.8h, v0.8h, v16.8h
+ add v1.8h, v1.8h, v17.8h
+ add v2.8h, v2.8h, v18.8h
+ add v4.8h, v4.8h, v20.8h
+ add v5.8h, v5.8h, v21.8h
+ add v6.8h, v6.8h, v22.8h
+ st1 {v0.8h, v1.8h, v2.8h}, [x0], x1
+ st1 {v4.8h, v5.8h, v6.8h}, [x8], x1
+ b.gt 32b
+ ret
+L(blend_v_tbl):
+ .hword L(blend_v_tbl) - 320b
+ .hword L(blend_v_tbl) - 160b
+ .hword L(blend_v_tbl) - 80b
+ .hword L(blend_v_tbl) - 40b
+ .hword L(blend_v_tbl) - 20b
+endfunc
+
+
// This has got the same signature as the put_8tap functions,
// and assumes that x9 is set to (clz(w)-24).
function put_neon
--- a/src/arm/mc_init_tmpl.c
+++ b/src/arm/mc_init_tmpl.c
@@ -101,11 +101,11 @@
c->avg = BF(dav1d_avg, neon);
c->w_avg = BF(dav1d_w_avg, neon);
c->mask = BF(dav1d_mask, neon);
-#endif
-#if BITDEPTH == 8
c->blend = BF(dav1d_blend, neon);
c->blend_h = BF(dav1d_blend_h, neon);
c->blend_v = BF(dav1d_blend_v, neon);
+#endif
+#if BITDEPTH == 8
c->w_mask[0] = BF(dav1d_w_mask_444, neon);
c->w_mask[1] = BF(dav1d_w_mask_422, neon);
c->w_mask[2] = BF(dav1d_w_mask_420, neon);