From: Raymond Hettinger Date: Mon, 6 Jan 2003 10:33:56 +0000 (+0000) Subject: Optimize string_repeat. X-Git-Tag: v2.3c1~2577 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=698258a1991db03207353b8403bfff1287ba92cc;p=python Optimize string_repeat. Christian Tismer pointed out the high cost of the loop overhead and function call overhead for 'c' * n where n is large. Accordingly, the new code only makes lg2(n) loops. Interestingly, 'c' * 1000 * 1000 ran a bit faster with old code. At some point, the loop and function call overhead became cheaper than invalidating the cache with lengthy memcpys. But for more typical sizes of n, the new code runs much faster and for larger values of n it runs only a bit slower. --- diff --git a/Objects/stringobject.c b/Objects/stringobject.c index 748592ea50..acfce8b104 100644 --- a/Objects/stringobject.c +++ b/Objects/stringobject.c @@ -933,6 +933,7 @@ static PyObject * string_repeat(register PyStringObject *a, register int n) { register int i; + register int j; register int size; register PyStringObject *op; size_t nbytes; @@ -965,8 +966,16 @@ string_repeat(register PyStringObject *a, register int n) PyObject_INIT_VAR(op, &PyString_Type, size); op->ob_shash = -1; op->ob_sstate = SSTATE_NOT_INTERNED; - for (i = 0; i < size; i += a->ob_size) - memcpy(op->ob_sval+i, a->ob_sval, (int) a->ob_size); + i = 0; + if (i < size) { + memcpy(op->ob_sval, a->ob_sval, (int) a->ob_size); + i = (int) a->ob_size; + } + while (i < size) { + j = (i <= size-i) ? i : size-i; + memcpy(op->ob_sval+i, op->ob_sval, j); + i += j; + } op->ob_sval[size] = '\0'; return (PyObject *) op; }