]> granicus.if.org Git - php/commitdiff
Low-level ARM optimizations
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Mon, 11 Feb 2013 13:36:58 +0000 (14:36 +0100)
committerArd Biesheuvel <ard.biesheuvel@linaro.org>
Mon, 11 Feb 2013 13:36:58 +0000 (14:36 +0100)
- added ARM versions of zend_mm_high_bit()/zend_mm_low_bit()
- improved safe_address()

Zend/zend_alloc.c
Zend/zend_multiply.h

index 98ab6bea36ba0d5e5758f2b203b39f663af63149..1c76bd403d89c5c245ff70363b9c69071ce75dad 100644 (file)
@@ -678,6 +678,8 @@ static inline unsigned int zend_mm_high_bit(size_t _size)
        __asm {
                bsr eax, _size
        }
+#elif defined(__GNUC__) && (defined(__arm__) ||  defined(__aarch64__))
+       return (8 * SIZEOF_SIZE_T - 1) - __builtin_clzl(_size);
 #else
        unsigned int n = 0;
        while (_size != 0) {
@@ -704,6 +706,8 @@ static inline unsigned int zend_mm_low_bit(size_t _size)
        __asm {
                bsf eax, _size
        }
+#elif defined(__GNUC__) && (defined(__arm__) || defined(__aarch64__))
+       return __builtin_ctzl(_size);
 #else
        static const int offset[16] = {4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0};
        unsigned int n;
@@ -2494,6 +2498,47 @@ static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
         return res;
 }
 
+#elif defined(__GNUC__) && defined(__arm__)
+
+static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
+{
+        size_t res;
+        unsigned long overflow;
+
+        __asm__ ("umlal %0,%1,%2,%3"
+             : "=r"(res), "=r"(overflow)
+             : "r"(nmemb),
+               "r"(size),
+               "0"(offset),
+               "1"(0));
+
+        if (UNEXPECTED(overflow)) {
+                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
+                return 0;
+        }
+        return res;
+}
+
+#elif defined(__GNUC__) && defined(__aarch64__)
+
+static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
+{
+        size_t res;
+        unsigned long overflow;
+
+        __asm__ ("mul %0,%2,%3\n\tumulh %1,%2,%3\n\tadds %0,%0,%4\n\tadc %1,%1,%1"
+             : "=&r"(res), "=&r"(overflow)
+             : "r"(nmemb),
+               "r"(size),
+               "r"(offset));
+
+        if (UNEXPECTED(overflow)) {
+                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
+                return 0;
+        }
+        return res;
+}
+
 #elif SIZEOF_SIZE_T == 4 && defined(HAVE_ZEND_LONG64)
 
 static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
index c3c9657f3b25e987d43748bdb96327c244012d3f..e52956f417627f4433238a7c052fb4a1245ea8f7 100644 (file)
@@ -13,7 +13,7 @@
    | license@zend.com so we can mail you a copy immediately.              |
    +----------------------------------------------------------------------+
    | Authors: Sascha Schumann <sascha@schumann.cx>                        |
-   |          Ard Biesheuvel <ard@ard.nu>                                 |
+   |          Ard Biesheuvel <ard.biesheuvel@linaro.org>                  |
    +----------------------------------------------------------------------+
 */
 
        else (lval) = __tmpvar;                                                                                 \
 } while (0)
 
+#elif defined(__arm__) && defined(__GNUC__)
+
+#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {      \
+       long __tmpvar;                                                                                                  \
+       __asm__("smull %0, %1, %2, %3\n"                                                                \
+               "sub %1, %1, %0, asr #31"                                                                       \
+                       : "=r"(__tmpvar), "=r"(usedval)                                                 \
+                       : "r"(a), "r"(b));                                                                              \
+       if (usedval) (dval) = (double) (a) * (double) (b);                              \
+       else (lval) = __tmpvar;                                                                                 \
+} while (0)
+
+#elif defined(__aarch64__) && defined(__GNUC__)
+
+#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {      \
+       long __tmpvar;                                                                                                  \
+       __asm__("mul %0, %2, %3\n"                                                                              \
+               "smulh %1, %2, %3\n"                                                                            \
+               "sub %1, %1, %0, asr #63\n"                                                                     \
+                       : "=X"(__tmpvar), "=X"(usedval)                                                 \
+                       : "X"(a), "X"(b));                                                                              \
+       if (usedval) (dval) = (double) (a) * (double) (b);                              \
+       else (lval) = __tmpvar;                                                                                 \
+} while (0)
+
 #elif SIZEOF_LONG == 4 && defined(HAVE_ZEND_LONG64)
 
 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {      \
 
 #else
 
-#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {              \
-       long   __lres  = (a) * (b);                                                                                     \
-       long double __dres  = (long double)(a) * (long double)(b);                                                      \
-       long double __delta = (long double) __lres - __dres;                                                    \
-       if ( ((usedval) = (( __dres + __delta ) != __dres))) {                          \
-               (dval) = __dres;                                                                                                \
-       } else {                                                                                                                        \
-               (lval) = __lres;                                                                                                \
-       }                                                                                                                                       \
+#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {      \
+       long   __lres  = (a) * (b);                                                                             \
+       long double __dres  = (long double)(a) * (long double)(b);              \
+       long double __delta = (long double) __lres - __dres;                    \
+       if ( ((usedval) = (( __dres + __delta ) != __dres))) {                  \
+               (dval) = __dres;                                                                                        \
+       } else {                                                                                                                \
+               (lval) = __lres;                                                                                        \
+       }                                                                                                                               \
 } while (0)
 
 #endif