summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
authorJohn Koleszar <jkoleszar@google.com>2012-10-03 12:51:29 -0700
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2012-10-03 12:51:29 -0700
commit7ca59827e358b7ee830a1c8df3729ac66fb38434 (patch)
treed80803ef861d3a6700bb2048e0c2dc959db1d7c2 /vp8
parentea90cee2a90a25b0a762bab824b187c40335426b (diff)
parent9bbab8e457ff4b4b60ffd8deac98ce49797d4133 (diff)
downloadlibvpx-7ca59827e358b7ee830a1c8df3729ac66fb38434.tar
libvpx-7ca59827e358b7ee830a1c8df3729ac66fb38434.tar.gz
libvpx-7ca59827e358b7ee830a1c8df3729ac66fb38434.tar.bz2
libvpx-7ca59827e358b7ee830a1c8df3729ac66fb38434.zip
Merge "rtcd/win32: use InitializeCriticalSection explicitly"
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/rtcd.c52
1 files changed, 44 insertions, 8 deletions
diff --git a/vp8/common/rtcd.c b/vp8/common/rtcd.c
index 3150fff26..01dad4691 100644
--- a/vp8/common/rtcd.c
+++ b/vp8/common/rtcd.c
@@ -13,17 +13,43 @@
#if CONFIG_MULTITHREAD && defined(_WIN32)
#include <windows.h>
+#include <stdlib.h>
static void once(void (*func)(void))
{
- /* Using a static initializer here rather than InitializeCriticalSection()
- * since there's no race-free context in which to execute it. Protecting
- * it with an atomic op like InterlockedCompareExchangePointer introduces
- * an x86 dependency, and InitOnceExecuteOnce requires Vista.
- */
- static CRITICAL_SECTION lock = {(void *)-1, -1, 0, 0, 0, 0};
+ static CRITICAL_SECTION *lock;
+ static LONG waiters;
static int done;
+ void *lock_ptr = &lock;
+
+ /* If the initialization is complete, return early. This isn't just an
+ * optimization, it prevents races on the destruction of the global
+ * lock.
+ */
+ if(done)
+ return;
+
+ InterlockedIncrement(&waiters);
- EnterCriticalSection(&lock);
+ /* Get a lock. We create one and try to make it the one-true-lock,
+ * throwing it away if we lost the race.
+ */
+
+ {
+ /* Scope to protect access to new_lock */
+ CRITICAL_SECTION *new_lock = malloc(sizeof(CRITICAL_SECTION));
+ InitializeCriticalSection(new_lock);
+ if (InterlockedCompareExchangePointer(lock_ptr, new_lock, NULL) != NULL)
+ {
+ DeleteCriticalSection(new_lock);
+ free(new_lock);
+ }
+ }
+
+ /* At this point, we have a lock that can be synchronized on. We don't
+ * care which thread actually performed the allocation.
+ */
+
+ EnterCriticalSection(lock);
if (!done)
{
@@ -31,7 +57,17 @@ static void once(void (*func)(void))
done = 1;
}
- LeaveCriticalSection(&lock);
+ LeaveCriticalSection(lock);
+
+ /* Last one out should free resources. The destructed objects are
+ * protected by checking if(done) above.
+ */
+ if(!InterlockedDecrement(&waiters))
+ {
+ DeleteCriticalSection(lock);
+ free(lock);
+ lock = NULL;
+ }
}