aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/unix/sysv/linux/powerpc/htm.h
diff options
context:
space:
mode:
authorAdhemerval Zanella <azanella@linux.vnet.ibm.com>2014-11-07 12:25:32 -0500
committerAdhemerval Zanella <azanella@linux.vnet.ibm.com>2015-01-12 06:30:19 -0500
commit8d2c0a593bdefd220be0822fb70de6b8d3bfd39d (patch)
treef99ff4ee80aebb75309fad56b244535115d97288 /sysdeps/unix/sysv/linux/powerpc/htm.h
parentec4fbd48009314515eb31daaff7c25393f29d62b (diff)
downloadglibc-8d2c0a593bdefd220be0822fb70de6b8d3bfd39d.tar
glibc-8d2c0a593bdefd220be0822fb70de6b8d3bfd39d.tar.gz
glibc-8d2c0a593bdefd220be0822fb70de6b8d3bfd39d.tar.bz2
glibc-8d2c0a593bdefd220be0822fb70de6b8d3bfd39d.zip
powerpc: Add the lock elision using HTM
This patch adds support for lock elision using ISA 2.07 hardware transactional memory instructions for pthread_mutex primitives. Similar to s390 version, the for elision logic defined in 'force-elision.h' is only enabled if ENABLE_LOCK_ELISION is defined. Also, the lock elision code should be able to be built even with a compiler that does not provide HTM support with builtins. However I have noted the performance is sub-optimal due scheduling pressures.
Diffstat (limited to 'sysdeps/unix/sysv/linux/powerpc/htm.h')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/htm.h138
1 files changed, 138 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/htm.h b/sysdeps/unix/sysv/linux/powerpc/htm.h
new file mode 100644
index 0000000000..4e8fb6bbc3
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/powerpc/htm.h
@@ -0,0 +1,138 @@
+/* Shared HTM header. Emulate transactional execution facility intrinsics for
+ compilers and assemblers that do not support the intrinsics and instructions
+ yet.
+
+ Copyright (C) 2014 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _HTM_H
+#define _HTM_H 1
+
+#ifdef __ASSEMBLER__
+
+/* tbegin. */
+.macro TBEGIN
+ .long 0x7c00051d
+.endm
+
+/* tend. 0 */
+.macro TEND
+ .long 0x7c00055d
+.endm
+
+/* tabort. code */
+.macro TABORT code
+ .byte 0x7c
+ .byte \code
+ .byte 0x07
+ .byte 0x1d
+.endm
+
+/*"TEXASR - Transaction EXception And Summary Register"
+ mfspr %dst,130 */
+.macro TEXASR dst
+ mfspr \dst,130
+.endm
+
+#else
+
+#include <endian.h>
+
+/* Official HTM intrinsics interface matching GCC, but works
+ on older GCC compatible compilers and binutils.
+ We should somehow detect if the compiler supports it, because
+ it may be able to generate slightly better code. */
+
+#define TBEGIN ".long 0x7c00051d"
+#define TEND ".long 0x7c00055d"
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+# define TABORT ".byte 0x1d,0x07,%1,0x1d"
+#else
+# define TABORT ".byte 0x7c,%1,0x07,0x1d"
+#endif
+
+#define __force_inline inline __attribute__((__always_inline__))
+
+#ifndef __HTM__
+
+#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
+ (((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1))
+#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \
+ _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1)
+
+#define _tbegin() \
+ ({ unsigned int __ret; \
+ asm volatile ( \
+ TBEGIN "\t\n" \
+ "mfcr %0\t\n" \
+ "rlwinm %0,%0,3,1\t\n" \
+ "xori %0,%0,1\t\n" \
+ : "=r" (__ret) : \
+ : "cr0", "memory"); \
+ __ret; \
+ })
+
+#define _tend() \
+ ({ unsigned int __ret; \
+ asm volatile ( \
+ TEND "\t\n" \
+ "mfcr %0\t\n" \
+ "rlwinm %0,%0,3,1\t\n" \
+ "xori %0,%0,1\t\n" \
+ : "=r" (__ret) : \
+ : "cr0", "memory"); \
+ __ret; \
+ })
+
+#define _tabort(__code) \
+ ({ unsigned int __ret; \
+ asm volatile ( \
+ TABORT "\t\n" \
+ "mfcr %0\t\n" \
+ "rlwinm %0,%0,3,1\t\n" \
+ "xori %0,%0,1\t\n" \
+ : "=r" (__ret) : "r" (__code) \
+ : "cr0", "memory"); \
+ __ret; \
+ })
+
+#define _texasru() \
+ ({ unsigned long __ret; \
+ asm volatile ( \
+ "mfspr %0,131\t\n" \
+ : "=r" (__ret)); \
+ __ret; \
+ })
+
+#define __builtin_tbegin(tdb) _tbegin ()
+#define __builtin_tend(nested) _tend ()
+#define __builtin_tabort(abortcode) _tabort (abortcode)
+#define __builtin_get_texasru() _texasru ()
+
+#else
+# include <htmintrin.h>
+#endif /* __HTM__ */
+
+#endif /* __ASSEMBLER__ */
+
+/* Definitions used for TEXASR Failure code (bits 0:6), they need to be even
+ because tabort. always sets the first bit. */
+#define _ABORT_LOCK_BUSY 0x3f /* Lock already used. */
+#define _ABORT_NESTED_TRYLOCK 0x3e /* Write operation in trylock. */
+#define _ABORT_SYSCALL 0x3d /* Syscall issued. */
+
+#endif