summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
authorTimothy B. Terriberry <tterribe@xiph.org>2012-05-02 10:11:36 -0700
committerJohn Koleszar <jkoleszar@google.com>2012-05-02 10:36:01 -0700
commite50c842755993b183c1c56d72000be7918bf0bfb (patch)
tree99f934117f6fdadfeeb2f7facae275b53831dcfd /vp8
parent22ae1403e99e417dac4f6432915d852c824a94c9 (diff)
downloadlibvpx-e50c842755993b183c1c56d72000be7918bf0bfb.tar
libvpx-e50c842755993b183c1c56d72000be7918bf0bfb.tar.gz
libvpx-e50c842755993b183c1c56d72000be7918bf0bfb.tar.bz2
libvpx-e50c842755993b183c1c56d72000be7918bf0bfb.zip
Fix TEXTRELs in the ARM asm.
Besides imposing a performance penalty at startup in most configurations, these relocations break the dynamic linker for native Fennec, since it does not support them at all. Change-Id: Id5dc768609354ebb4379966eb61a7313e6fd18de
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm14
-rw-r--r--vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm4
-rw-r--r--vp8/encoder/arm/neon/fastquantizeb_neon.asm7
3 files changed, 9 insertions, 16 deletions
diff --git a/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm b/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
index d753ad129..e7a3ed173 100644
--- a/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
+++ b/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
@@ -9,6 +9,11 @@
;
+bilinear_taps_coeff
+ DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
+;-----------------
+
EXPORT |vp8_sub_pixel_variance16x16_neon_func|
ARM
REQUIRE8
@@ -27,7 +32,7 @@
|vp8_sub_pixel_variance16x16_neon_func| PROC
push {r4-r6, lr}
- ldr r12, _BilinearTaps_coeff_
+ adr r12, bilinear_taps_coeff
ldr r4, [sp, #16] ;load *dst_ptr from stack
ldr r5, [sp, #20] ;load dst_pixels_per_line from stack
ldr r6, [sp, #24] ;load *sse from stack
@@ -415,11 +420,4 @@ sub_pixel_variance16x16_neon_loop
ENDP
-;-----------------
-
-_BilinearTaps_coeff_
- DCD bilinear_taps_coeff
-bilinear_taps_coeff
- DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
-
END
diff --git a/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm b/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
index cc7ae52c9..f6b684753 100644
--- a/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
+++ b/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
@@ -27,7 +27,7 @@
|vp8_sub_pixel_variance8x8_neon| PROC
push {r4-r5, lr}
- ldr r12, _BilinearTaps_coeff_
+ adr r12, bilinear_taps_coeff
ldr r4, [sp, #12] ;load *dst_ptr from stack
ldr r5, [sp, #16] ;load dst_pixels_per_line from stack
ldr lr, [sp, #20] ;load *sse from stack
@@ -216,8 +216,6 @@ sub_pixel_variance8x8_neon_loop
;-----------------
-_BilinearTaps_coeff_
- DCD bilinear_taps_coeff
bilinear_taps_coeff
DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
diff --git a/vp8/encoder/arm/neon/fastquantizeb_neon.asm b/vp8/encoder/arm/neon/fastquantizeb_neon.asm
index c970cb73e..143058842 100644
--- a/vp8/encoder/arm/neon/fastquantizeb_neon.asm
+++ b/vp8/encoder/arm/neon/fastquantizeb_neon.asm
@@ -98,7 +98,7 @@
vmul.s16 q2, q6, q4 ; x * Dequant
vmul.s16 q3, q7, q5
- ldr r0, _inv_zig_zag_ ; load ptr of inverse zigzag table
+ adr r0, inv_zig_zag ; load ptr of inverse zigzag table
vceq.s16 q8, q8 ; set q8 to all 1
@@ -181,7 +181,7 @@
vadd.s16 q12, q14 ; x + Round
vadd.s16 q13, q15
- ldr r0, _inv_zig_zag_ ; load ptr of inverse zigzag table
+ adr r0, inv_zig_zag ; load ptr of inverse zigzag table
vqdmulh.s16 q12, q8 ; y = ((Round+abs(z)) * Quant) >> 16
vqdmulh.s16 q13, q9
@@ -247,9 +247,6 @@ zero_output
ENDP
; default inverse zigzag table is defined in vp8/common/entropy.c
-_inv_zig_zag_
- DCD inv_zig_zag
-
ALIGN 16 ; enable use of @128 bit aligned loads
inv_zig_zag
DCW 0x0001, 0x0002, 0x0006, 0x0007