From: Andrea Faulds Date: Tue, 9 Aug 2016 23:15:13 +0000 (+0100) Subject: Use checked arithmetic intrinsics instead of asm, when possible X-Git-Tag: php-7.2.0alpha1~1545^2~1 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=0987737397c28f8df48a5085cb0d5e857962af93;p=php Use checked arithmetic intrinsics instead of asm, when possible --- diff --git a/Zend/zend_multiply.h b/Zend/zend_multiply.h index dfd21f7da3..7fb500a5f0 100644 --- a/Zend/zend_multiply.h +++ b/Zend/zend_multiply.h @@ -19,10 +19,32 @@ /* $Id$ */ +#include "zend_portability.h" + #ifndef ZEND_MULTIPLY_H #define ZEND_MULTIPLY_H -#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) +#if __has_builtin(__builtin_smull_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG + +#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \ + long __tmpvar; \ + if (((usedval) = __builtin_smull_overflow((a), (b), &__tmpvar))) { \ + (dval) = (double) (a) * (double) (b); \ + } \ + else (lval) = __tmpvar; \ +} while (0) + +#elif __has_builtin(__builtin_smulll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG + +#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \ + long long __tmpvar; \ + if (((usedval) = __builtin_smulll_overflow((a), (b), &__tmpvar))) { \ + (dval) = (double) (a) * (double) (b); \ + } \ + else (lval) = __tmpvar; \ +} while (0) + +#elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \ zend_long __tmpvar; \ diff --git a/Zend/zend_operators.h b/Zend/zend_operators.h index 370502282d..d4bdb7fd47 100644 --- a/Zend/zend_operators.h +++ b/Zend/zend_operators.h @@ -35,6 +35,7 @@ #include #endif +#include "zend_portability.h" #include "zend_strtod.h" #include "zend_multiply.h" @@ -520,7 +521,21 @@ static zend_always_inline void fast_long_decrement_function(zval *op1) static zend_always_inline void fast_long_add_function(zval *result, zval *op1, zval *op2) { -#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__) +#if __has_builtin(__builtin_saddl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG + long lresult; + if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) { + ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2)); + } else { + ZVAL_LONG(result, lresult); + } +#elif __has_builtin(__builtin_saddll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG + long long llresult; + if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) { + ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2)); + } else { + ZVAL_LONG(result, llresult); + } +#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__) __asm__( "movl (%1), %%eax\n\t" "addl (%2), %%eax\n\t" @@ -606,7 +621,21 @@ static zend_always_inline int fast_add_function(zval *result, zval *op1, zval *o static zend_always_inline void fast_long_sub_function(zval *result, zval *op1, zval *op2) { -#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__) +#if __has_builtin(__builtin_ssubl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG + long lresult; + if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) { + ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2)); + } else { + ZVAL_LONG(result, lresult); + } +#elif __has_builtin(__builtin_ssubll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG + long long llresult; + if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) { + ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2)); + } else { + ZVAL_LONG(result, llresult); + } +#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__) __asm__( "movl (%1), %%eax\n\t" "subl (%2), %%eax\n\t"