summaryrefslogtreecommitdiff
path: root/vpx_util/vpx_atomics.h
diff options
context:
space:
mode:
authorPeter Boström <pbos@google.com>2017-08-25 15:48:11 -0700
committerPeter Boström <pbos@google.com>2017-08-31 17:55:57 -0700
commitd42e876164c63eead9d0db6f53cda866f508cd4a (patch)
tree63297238ef8c9f5e4321d41f2b42748daf072810 /vpx_util/vpx_atomics.h
parent2d0c11093e9854980a84d9415da2eb6a4cd42647 (diff)
downloadlibvpx-d42e876164c63eead9d0db6f53cda866f508cd4a.tar
libvpx-d42e876164c63eead9d0db6f53cda866f508cd4a.tar.gz
libvpx-d42e876164c63eead9d0db6f53cda866f508cd4a.tar.bz2
libvpx-d42e876164c63eead9d0db6f53cda866f508cd4a.zip
Add atomics to vp8 synchronization primitives.
Fixes issue on iPad Pro 10.5 (and probably other places) where threads are not properly synchronized. On x86 this data race was benign as load and store instructions are atomic, they were being atomic in practice as the program hasn't been observed to be miscompiled. Such guarantees are not made outside x86, and real problems manifested where libvpx reliably reproduced a broken bitstream for even just the initial keyframe. This was detected in WebRTC where this device started using multithreading (as its CPU count is higher than earlier devices, where the problem did not manifest as single-threading was used in practice). This issue was not detected under thread-sanitizer bots as mutexes were conditionally used under this platform to simulate the protected read and write semantics that were in practice provided on x86 platforms. This change also removes several mutexes, so encoder/decoder state is lighter-weight after this change and we do not need to initialize so many mutexes (this was done even on non-thread-sanitizer platforms where they were unused). Change-Id: If41fcb0d99944f7bbc8ec40877cdc34d672ae72a
Diffstat (limited to 'vpx_util/vpx_atomics.h')
-rw-r--r--vpx_util/vpx_atomics.h133
1 files changed, 133 insertions, 0 deletions
diff --git a/vpx_util/vpx_atomics.h b/vpx_util/vpx_atomics.h
new file mode 100644
index 000000000..a471fd186
--- /dev/null
+++ b/vpx_util/vpx_atomics.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_UTIL_VPX_ATOMICS_H_
+#define VPX_UTIL_VPX_ATOMICS_H_
+
+#include "./vpx_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+#if CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ (defined(__cplusplus) && __cplusplus >= 201112L)
+// Where available, use <stdatomic.h>
+#include <stdatomic.h>
+#define VPX_USE_STD_ATOMIC
+#else
+// Look for built-ins.
+#if !defined(__has_builtin)
+#define __has_builtin(x) 0 // Compatibility with non-clang compilers.
+#endif // !defined(__has_builtin)
+
+#if (__has_builtin(__atomic_load_n)) || \
+ (defined(__GNUC__) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)))
+// For GCC >= 4.7 and Clang that support __atomic builtins, use those.
+#define VPX_USE_ATOMIC_BUILTINS
+#else
+// Use platform-specific asm barriers.
+#if defined(_MSC_VER)
+// TODO(pbos): This assumes that newer versions of MSVC are building with the
+// default /volatile:ms (or older, where this is always true. Consider adding
+// support for using <atomic> instead of stdatomic.h when building C++11 under
+// MSVC. It's unclear what to do for plain C under /volatile:iso (inline asm?),
+// there're no explicit Interlocked* functions for only storing or loading
+// (presumably because volatile has historically implied that on MSVC).
+//
+// For earlier versions of MSVC or the default /volatile:ms volatile int are
+// acquire/release and require no barrier.
+#define vpx_atomic_memory_barrier() \
+ do { \
+ } while (0)
+#else
+#if ARCH_X86 || ARCH_X86_64
+// Use a compiler barrier on x86, no runtime penalty.
+#define vpx_atomic_memory_barrier() __asm__ __volatile__("" ::: "memory")
+#elif ARCH_ARM
+#define vpx_atomic_memory_barrier() __asm__ __volatile__("dmb ish" ::: "memory")
+#elif ARCH_MIPS
+#define vpx_atomic_memory_barrier() __asm__ __volatile__("sync" ::: "memory")
+#else
+#error Unsupported architecture!
+#endif // ARCH_X86 || ARCH_X86_64
+#endif // defined(_MSC_VER)
+#endif // atomic builtin availability check
+#endif // stdatomic availability check
+
+// These are wrapped in a struct so that they are not easily accessed directly
+// on any platform (to discourage programmer errors by setting values directly).
+// This primitive MUST be initialized using vpx_atomic_init or VPX_ATOMIC_INIT
+// (NOT memset) and accessed through vpx_atomic_ functions.
+typedef struct vpx_atomic_int {
+#if defined(VPX_USE_STD_ATOMIC)
+ atomic_int value;
+#else
+ volatile int value;
+#endif // defined(USE_STD_ATOMIC)
+} vpx_atomic_int;
+
+#if defined(VPX_USE_STD_ATOMIC)
+#define VPX_ATOMIC_INIT(num) \
+ { ATOMIC_VAR_INIT(num) }
+#else
+#define VPX_ATOMIC_INIT(num) \
+ { num }
+#endif // defined(VPX_USE_STD_ATOMIC)
+
+// Initialization of an atomic int, not thread safe.
+static INLINE void vpx_atomic_init(vpx_atomic_int *atomic, int value) {
+#if defined(VPX_USE_STD_ATOMIC)
+ atomic_init(&atomic->value, value);
+#else
+ atomic->value = value;
+#endif // defined(USE_STD_ATOMIC)
+}
+
+static INLINE void vpx_atomic_store_release(vpx_atomic_int *atomic, int value) {
+#if defined(VPX_USE_STD_ATOMIC)
+ atomic_store_explicit(&atomic->value, value, memory_order_release);
+#elif defined(VPX_USE_ATOMIC_BUILTINS)
+ __atomic_store_n(&atomic->value, value, __ATOMIC_RELEASE);
+#else
+ vpx_atomic_memory_barrier();
+ atomic->value = value;
+#endif // defined(VPX_USE_STD_ATOMIC)
+}
+
+static INLINE int vpx_atomic_load_acquire(const vpx_atomic_int *atomic) {
+#if defined(VPX_USE_STD_ATOMIC)
+ // const_cast (in C) that doesn't trigger -Wcast-qual.
+ return atomic_load_explicit(
+ (atomic_int *)(uintptr_t)(const void *)&atomic->value,
+ memory_order_acquire);
+#elif defined(VPX_USE_ATOMIC_BUILTINS)
+ return __atomic_load_n(&atomic->value, __ATOMIC_ACQUIRE);
+#else
+ int v = atomic->value;
+ vpx_atomic_memory_barrier();
+ return v;
+#endif // defined(VPX_USE_STD_ATOMIC)
+}
+
+#undef VPX_USE_STD_ATOMIC
+#undef VPX_USE_ATOMIC_BUILTINS
+#undef vpx_atomic_memory_barrier
+
+#endif /* CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD */
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif // VPX_UTIL_VPX_ATOMICS_H_