]> granicus.if.org Git - php/commitdiff
Use checked arithmetic intrinsics instead of asm, when possible
authorAndrea Faulds <ajf@ajf.me>
Tue, 9 Aug 2016 23:15:13 +0000 (00:15 +0100)
committerAndrea Faulds <ajf@ajf.me>
Tue, 9 Aug 2016 23:46:05 +0000 (00:46 +0100)
Zend/zend_multiply.h
Zend/zend_operators.h

index dfd21f7da35b3df56c951cc79c3c0eb0a796f334..7fb500a5f043f9a65899284bdacafeb28378b7bb 100644 (file)
 
 /* $Id$ */
 
+#include "zend_portability.h"
+
 #ifndef ZEND_MULTIPLY_H
 #define ZEND_MULTIPLY_H
 
-#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+#if __has_builtin(__builtin_smull_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
+
+#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {      \
+       long __tmpvar;                                                                                                  \
+       if (((usedval) = __builtin_smull_overflow((a), (b), &__tmpvar))) {      \
+               (dval) = (double) (a) * (double) (b);                                           \
+       }                                                                                                                               \
+       else (lval) = __tmpvar;                                                                                 \
+} while (0)
+
+#elif __has_builtin(__builtin_smulll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
+
+#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {      \
+       long long __tmpvar;                                                                                     \
+       if (((usedval) = __builtin_smulll_overflow((a), (b), &__tmpvar))) {     \
+               (dval) = (double) (a) * (double) (b);                                           \
+       }                                                                                                                               \
+       else (lval) = __tmpvar;                                                                                 \
+} while (0)
+
+#elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
 
 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {      \
        zend_long __tmpvar;                                                                                                     \
index 370502282df0d940b4c88e8b7e18c8fd1ed7150a..d4bdb7fd474f8a4cde6ffd627fcf9781529bcf01 100644 (file)
@@ -35,6 +35,7 @@
 #include <ieeefp.h>
 #endif
 
+#include "zend_portability.h"
 #include "zend_strtod.h"
 #include "zend_multiply.h"
 
@@ -520,7 +521,21 @@ static zend_always_inline void fast_long_decrement_function(zval *op1)
 
 static zend_always_inline void fast_long_add_function(zval *result, zval *op1, zval *op2)
 {
-#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
+#if __has_builtin(__builtin_saddl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
+       long lresult;
+       if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) {
+               ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2));
+       } else {
+               ZVAL_LONG(result, lresult);
+       }
+#elif __has_builtin(__builtin_saddll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
+       long long llresult;
+       if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) {
+               ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2));
+       } else {
+               ZVAL_LONG(result, llresult);
+       }
+#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
        __asm__(
                "movl   (%1), %%eax\n\t"
                "addl   (%2), %%eax\n\t"
@@ -606,7 +621,21 @@ static zend_always_inline int fast_add_function(zval *result, zval *op1, zval *o
 
 static zend_always_inline void fast_long_sub_function(zval *result, zval *op1, zval *op2)
 {
-#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
+#if __has_builtin(__builtin_ssubl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
+       long lresult;
+       if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) {
+               ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2));
+       } else {
+               ZVAL_LONG(result, lresult);
+       }
+#elif __has_builtin(__builtin_ssubll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
+       long long llresult;
+       if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) {
+               ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2));
+       } else {
+               ZVAL_LONG(result, llresult);
+       }
+#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
        __asm__(
                "movl   (%1), %%eax\n\t"
                "subl   (%2), %%eax\n\t"