use as arguments to functionals:
\samp{map(mydict.__getitem__, keylist)}.
+\item Added an newcode opcode, \code{LIST_APPEND}, that simplifies
+ the generated bytecode for list comprehensions and speeds them up
+ by about a third.
+
\end{itemize}
The net result of the 2.4 optimizations is that Python 2.4 runs the
#define UNARY_INVERT 15
+#define LIST_APPEND 18
#define BINARY_POWER 19
#define BINARY_MULTIPLY 20
def_op('UNARY_INVERT', 15)
+def_op('LIST_APPEND', 18)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
Core and builtins
-----------------
+- Implemented a newcode opcode, LIST_APPEND, that simplifies
+ the generated bytecode for list comprehensions and further
+ improves their performance (about 35%).
+
- Implemented rich comparisons for floats, which seems to make
comparisons involving NaNs somewhat less surprising when the
underlying C compiler actually implements C99 semantics.
if (x != NULL) continue;
break;
+ case LIST_APPEND:
+ w = POP();
+ v = POP();
+ err = PyList_Append(v, w);
+ Py_DECREF(v);
+ Py_DECREF(w);
+ if (err == 0) continue;
+ break;
+
case INPLACE_POWER:
w = POP();
v = TOP();
com_addop_varname(c, VAR_LOAD, t);
com_push(c, 1);
com_node(c, e);
- com_addoparg(c, CALL_FUNCTION, 1);
- com_addbyte(c, POP_TOP);
+ com_addbyte(c, LIST_APPEND);
com_pop(c, 2);
}
}
com_addoparg(c, BUILD_LIST, 0);
com_addbyte(c, DUP_TOP); /* leave the result on the stack */
com_push(c, 2);
- com_addop_name(c, LOAD_ATTR, "append");
com_addop_varname(c, VAR_STORE, tmpname);
com_pop(c, 1);
com_list_for(c, CHILD(n, 1), CHILD(n, 0), tmpname);