aboutsummaryrefslogtreecommitdiff
path: root/nptl
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2015-01-23 14:48:40 -0800
committerH.J. Lu <hjl.tools@gmail.com>2015-01-23 14:48:40 -0800
commit22971c35e2de34ec3e1b02e9bceebcba2ead7bfe (patch)
tree64179073200de33895177a542748e069a9fb096a /nptl
parent2ec2d7032ff9220da1577c37d41ae85c0721ad66 (diff)
downloadglibc-22971c35e2de34ec3e1b02e9bceebcba2ead7bfe.tar
glibc-22971c35e2de34ec3e1b02e9bceebcba2ead7bfe.tar.gz
glibc-22971c35e2de34ec3e1b02e9bceebcba2ead7bfe.tar.bz2
glibc-22971c35e2de34ec3e1b02e9bceebcba2ead7bfe.zip
Use uint64_t and (uint64_t) 1 for 64-bit int
This patch replaces unsigned long int and 1UL with uint64_t and (uint64_t) 1 to support ILP32 targets like x32. [BZ #17870] * nptl/sem_post.c (__new_sem_post): Replace unsigned long int with uint64_t. * nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with (uint64_t) 1. (__new_sem_wait_slow): Replace unsigned long int with uint64_t. Replace 1UL with (uint64_t) 1. * sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long int with uint64_t.
Diffstat (limited to 'nptl')
-rw-r--r--nptl/sem_post.c2
-rw-r--r--nptl/sem_waitcommon.c10
2 files changed, 6 insertions, 6 deletions
diff --git a/nptl/sem_post.c b/nptl/sem_post.c
index 9162e4c8a6..6e495ed810 100644
--- a/nptl/sem_post.c
+++ b/nptl/sem_post.c
@@ -65,7 +65,7 @@ __new_sem_post (sem_t *sem)
added tokens before (the release sequence includes atomic RMW operations
by other threads). */
/* TODO Use atomic_fetch_add to make it scale better than a CAS loop? */
- unsigned long int d = atomic_load_relaxed (&isem->data);
+ uint64_t d = atomic_load_relaxed (&isem->data);
do
{
if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c
index 96848d7ac5..c60daa3865 100644
--- a/nptl/sem_waitcommon.c
+++ b/nptl/sem_waitcommon.c
@@ -187,7 +187,7 @@ __sem_wait_cleanup (void *arg)
#if __HAVE_64B_ATOMICS
/* Stop being registered as a waiter. See below for MO. */
- atomic_fetch_add_relaxed (&sem->data, -(1UL << SEM_NWAITERS_SHIFT));
+ atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
#else
__sem_wait_32_finish (sem);
#endif
@@ -263,8 +263,8 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
#if __HAVE_64B_ATOMICS
/* Add a waiter. Relaxed MO is sufficient because we can rely on the
ordering provided by the RMW operations we use. */
- unsigned long d = atomic_fetch_add_relaxed (&sem->data,
- 1UL << SEM_NWAITERS_SHIFT);
+ uint64_t d = atomic_fetch_add_relaxed (&sem->data,
+ (uint64_t) 1 << SEM_NWAITERS_SHIFT);
pthread_cleanup_push (__sem_wait_cleanup, sem);
@@ -304,7 +304,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
err = -1;
/* Stop being registered as a waiter. */
atomic_fetch_add_relaxed (&sem->data,
- -(1UL << SEM_NWAITERS_SHIFT));
+ -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
break;
}
/* Relaxed MO is sufficient; see below. */
@@ -320,7 +320,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
up-to-date value; the futex_wait or the CAS perform the real
work. */
if (atomic_compare_exchange_weak_acquire (&sem->data,
- &d, d - 1 - (1UL << SEM_NWAITERS_SHIFT)))
+ &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
{
err = 0;
break;