summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
authorJim Bankoski <jimbankoski@google.com>2016-12-16 08:50:55 -0800
committerJim Bankoski <jimbankoski@google.com>2016-12-16 08:50:55 -0800
commit318a1ff5ecddd6d60fcf87147f1ff741d39b3dc4 (patch)
tree9fc90d98b03cabdea20f46b058cf2459e5bffd34 /vp8
parent2b1ec65b5d17a833a9f2330afba38ca2ad71c03d (diff)
downloadlibvpx-318a1ff5ecddd6d60fcf87147f1ff741d39b3dc4.tar
libvpx-318a1ff5ecddd6d60fcf87147f1ff741d39b3dc4.tar.gz
libvpx-318a1ff5ecddd6d60fcf87147f1ff741d39b3dc4.tar.bz2
libvpx-318a1ff5ecddd6d60fcf87147f1ff741d39b3dc4.zip
vp8 : use threading mutex's for tsan only.
To avoid decode performance hit of 2% when running on hyperthreaded cores. This patch only uses the mutex's when we are running tsan. This is safe because 32 bit operations like read and store are atomic on all the platforms we care about. Tsan warns about race situations, but in this case either situation ( read occurs before write or write before read) the worst case is that we go around one extra time in the loop. So the ordering doesn't really matter. That said a few other things have been tried : for instance as per here: webrtc/base/atomicops.h#52 In this patch they use: __atomic_load_n(i, __ATOMIC_ACQUIRE); __atomic_store_n(i, value, __ATOMIC_RELEASE); This code works on gcc, clang ( replacing protected write and read), and avoids tsan errors. Incurring no penalty in performance. In C11 its replaced by straight atomic operands. However there is no equivalent in the visual studio's we support as int32 on all windows platforms is already atomic. To avoid tsan like warnings on windows we'd need to use interlocked exchange and the end result doesn't gain us any thing. Change-Id: I2066e3c7f42641ebb23d53feb1f16f23f85bcf59
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/threading.h35
1 files changed, 18 insertions, 17 deletions
diff --git a/vp8/common/threading.h b/vp8/common/threading.h
index 63fd4ccb9..ece64f3fb 100644
--- a/vp8/common/threading.h
+++ b/vp8/common/threading.h
@@ -191,29 +191,24 @@ static inline int sem_destroy(sem_t *sem) {
#define x86_pause_hint()
#endif
-#include "vpx_util/vpx_thread.h"
-
-static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
- const int kMaxTryLocks = 4000;
- int locked = 0;
- int i;
-
- for (i = 0; i < kMaxTryLocks; ++i) {
- if (!pthread_mutex_trylock(mutex)) {
- locked = 1;
- break;
- }
- }
+#if defined(__has_feature)
+#if __has_feature(thread_sanitizer)
+#define USE_MUTEX_LOCK 1
+#endif
+#endif
- if (!locked) pthread_mutex_lock(mutex);
-}
+#include "vpx_util/vpx_thread.h"
static INLINE int protected_read(pthread_mutex_t *const mutex, const int *p) {
+ (void)mutex;
+#if defined(USE_MUTEX_LOCK)
int ret;
- mutex_lock(mutex);
+ pthread_mutex_lock(mutex);
ret = *p;
pthread_mutex_unlock(mutex);
return ret;
+#endif
+ return *p;
}
static INLINE void sync_read(pthread_mutex_t *const mutex, int mb_col,
@@ -226,11 +221,17 @@ static INLINE void sync_read(pthread_mutex_t *const mutex, int mb_col,
}
static INLINE void protected_write(pthread_mutex_t *mutex, int *p, int v) {
- mutex_lock(mutex);
+ (void)mutex;
+#if defined(USE_MUTEX_LOCK)
+ pthread_mutex_lock(mutex);
*p = v;
pthread_mutex_unlock(mutex);
+ return;
+#endif
+ *p = v;
}
+#undef USE_MUTEX_LOCK
#endif /* CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD */
#ifdef __cplusplus