#elif defined(__GNUC__) && defined(__x86_64__)
unsigned long n;
- __asm__("bsrq %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc");
- __asm__("bsr %1,%0\n\t" : "=r" (n) : "rm" (_size));
++ __asm__("bsr %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc");
return (unsigned int)n;
#elif defined(_MSC_VER) && defined(_M_IX86)
__asm {
#elif defined(__GNUC__) && defined(__x86_64__)
unsigned long n;
- __asm__("bsfq %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc");
- __asm__("bsf %1,%0\n\t" : "=r" (n) : "rm" (_size));
++ __asm__("bsf %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc");
return (unsigned int)n;
#elif defined(_MSC_VER) && defined(_M_IX86)
__asm {
bsf eax, _size
-- }
++ }
+#elif defined(__GNUC__) && (defined(__arm__) || defined(__aarch64__))
+ return __builtin_ctzl(_size);
#else
static const int offset[16] = {4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0};
unsigned int n;
"rm"(size),
"rm"(offset));
+ #undef LP_SUFF
+ if (UNEXPECTED(overflow)) {
+ zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
+ return 0;
+ }
+ return res;
+}
+
+#elif defined(__GNUC__) && defined(__arm__)
+
+static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
+{
+ size_t res;
+ unsigned long overflow;
+
+ __asm__ ("umlal %0,%1,%2,%3"
+ : "=r"(res), "=r"(overflow)
+ : "r"(nmemb),
+ "r"(size),
+ "0"(offset),
+ "1"(0));
+
+ if (UNEXPECTED(overflow)) {
+ zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
+ return 0;
+ }
+ return res;
+}
+
+#elif defined(__GNUC__) && defined(__aarch64__)
+
+static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
+{
+ size_t res;
+ unsigned long overflow;
+
+ __asm__ ("mul %0,%2,%3\n\tumulh %1,%2,%3\n\tadds %0,%0,%4\n\tadc %1,%1,%1"
+ : "=&r"(res), "=&r"(overflow)
+ : "r"(nmemb),
+ "r"(size),
+ "r"(offset));
if (UNEXPECTED(overflow)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);