REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
+ iscased = None
tolower = None
fixes = None
if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE:
if flags & SRE_FLAG_UNICODE and not flags & SRE_FLAG_ASCII:
+ iscased = _sre.unicode_iscased
tolower = _sre.unicode_tolower
fixes = _ignorecase_fixes
else:
+ iscased = _sre.ascii_iscased
tolower = _sre.ascii_tolower
for op, av in pattern:
if op in LITERAL_CODES:
elif flags & SRE_FLAG_LOCALE:
emit(OP_LOC_IGNORE[op])
emit(av)
+ elif not iscased(av):
+ emit(op)
+ emit(av)
else:
lo = tolower(av)
if fixes and lo in fixes:
emit(OP_IGNORE[op])
emit(lo)
elif op is IN:
- if not flags & SRE_FLAG_IGNORECASE:
- emit(op)
- elif flags & SRE_FLAG_LOCALE:
+ charset, hascased = _optimize_charset(av, iscased, tolower, fixes)
+ if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE:
emit(IN_LOC_IGNORE)
- else:
+ elif hascased:
emit(IN_IGNORE)
+ else:
+ emit(IN)
skip = _len(code); emit(0)
- _compile_charset(av, flags, code, tolower, fixes)
+ _compile_charset(charset, flags, code)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
else:
raise error("internal: unsupported operand type %r" % (op,))
-def _compile_charset(charset, flags, code, fixup=None, fixes=None):
+def _compile_charset(charset, flags, code):
# compile charset subprogram
emit = code.append
- for op, av in _optimize_charset(charset, fixup, fixes):
+ for op, av in charset:
emit(op)
if op is NEGATE:
pass
raise error("internal: unsupported set operator %r" % (op,))
emit(FAILURE)
-def _optimize_charset(charset, fixup, fixes):
+def _optimize_charset(charset, iscased=None, fixup=None, fixes=None):
# internal: optimize character set
out = []
tail = []
charmap = bytearray(256)
+ hascased = False
for op, av in charset:
while True:
try:
if fixes and lo in fixes:
for k in fixes[lo]:
charmap[k] = 1
+ if not hascased and iscased(av):
+ hascased = True
else:
charmap[av] = 1
elif op is RANGE:
r = range(av[0], av[1]+1)
if fixup:
- r = map(fixup, r)
- if fixup and fixes:
- for i in r:
- charmap[i] = 1
- if i in fixes:
- for k in fixes[i]:
- charmap[k] = 1
+ if fixes:
+ for i in map(fixup, r):
+ charmap[i] = 1
+ if i in fixes:
+ for k in fixes[i]:
+ charmap[k] = 1
+ else:
+ for i in map(fixup, r):
+ charmap[i] = 1
+ if not hascased:
+ hascased = any(map(iscased, r))
else:
for i in r:
charmap[i] = 1
charmap += b'\0' * 0xff00
continue
# Character set contains non-BMP character codes.
- # There are only two ranges of cased non-BMP characters:
- # 10400-1044F (Deseret) and 118A0-118DF (Warang Citi),
- # and for both ranges RANGE_IGNORE works.
- if fixup and op is RANGE:
- op = RANGE_IGNORE
+ if fixup:
+ hascased = True
+ # There are only two ranges of cased non-BMP characters:
+ # 10400-1044F (Deseret) and 118A0-118DF (Warang Citi),
+ # and for both ranges RANGE_IGNORE works.
+ if op is RANGE:
+ op = RANGE_IGNORE
tail.append((op, av))
break
out.append((RANGE, (p, q - 1)))
out += tail
# if the case was changed or new representation is more compact
- if fixup or len(out) < len(charset):
- return out
+ if hascased or len(out) < len(charset):
+ return out, hascased
# else original character set is good enough
- return charset
+ return charset, hascased
# use bitmap
if len(charmap) == 256:
data = _mk_bitmap(charmap)
out.append((CHARSET, data))
out += tail
- return out
+ return out, hascased
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
data[0:0] = [block] + _bytes_to_codes(mapping)
out.append((BIGCHARSET, data))
out += tail
- return out
+ return out, hascased
_CODEBITS = _sre.CODESIZE * 8
MAXCODE = (1 << _CODEBITS) - 1
table[i] = idx + 1
return table
-def _get_literal_prefix(pattern):
+def _get_iscased(flags):
+ if not flags & SRE_FLAG_IGNORECASE:
+ return None
+ elif flags & SRE_FLAG_UNICODE and not flags & SRE_FLAG_ASCII:
+ return _sre.unicode_iscased
+ else:
+ return _sre.ascii_iscased
+
+def _get_literal_prefix(pattern, flags):
# look for literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = None
+ iscased = _get_iscased(flags)
for op, av in pattern.data:
if op is LITERAL:
+ if iscased and iscased(av):
+ break
prefixappend(av)
elif op is SUBPATTERN:
group, add_flags, del_flags, p = av
- if add_flags & SRE_FLAG_IGNORECASE:
+ flags1 = (flags | add_flags) & ~del_flags
+ if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE:
break
- prefix1, prefix_skip1, got_all = _get_literal_prefix(p)
+ prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1)
if prefix_skip is None:
if group is not None:
prefix_skip = len(prefix)
return prefix, prefix_skip, True
return prefix, prefix_skip, False
-def _get_charset_prefix(pattern):
- charset = [] # not used
- charsetappend = charset.append
- if pattern.data:
+def _get_charset_prefix(pattern, flags):
+ while True:
+ if not pattern.data:
+ return None
op, av = pattern.data[0]
- if op is SUBPATTERN:
- group, add_flags, del_flags, p = av
- if p and not (add_flags & SRE_FLAG_IGNORECASE):
- op, av = p[0]
- if op is LITERAL:
- charsetappend((op, av))
- elif op is BRANCH:
- c = []
- cappend = c.append
- for p in av[1]:
- if not p:
- break
- op, av = p[0]
- if op is LITERAL:
- cappend((op, av))
- else:
- break
- else:
- charset = c
- elif op is BRANCH:
- c = []
- cappend = c.append
- for p in av[1]:
- if not p:
- break
- op, av = p[0]
- if op is LITERAL:
- cappend((op, av))
- else:
- break
+ if op is not SUBPATTERN:
+ break
+ group, add_flags, del_flags, pattern = av
+ flags = (flags | add_flags) & ~del_flags
+ if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE:
+ return None
+
+ iscased = _get_iscased(flags)
+ if op is LITERAL:
+ if iscased and iscased(av):
+ return None
+ return [(op, av)]
+ elif op is BRANCH:
+ charset = []
+ charsetappend = charset.append
+ for p in av[1]:
+ if not p:
+ return None
+ op, av = p[0]
+ if op is LITERAL and not (iscased and iscased(av)):
+ charsetappend((op, av))
else:
- charset = c
- elif op is IN:
- charset = av
- return charset
+ return None
+ return charset
+ elif op is IN:
+ charset = av
+ if iscased:
+ for op, av in charset:
+ if op is LITERAL:
+ if iscased(av):
+ return None
+ elif op is RANGE:
+ if av[1] > 0xffff:
+ return None
+ if any(map(iscased, range(av[0], av[1]+1))):
+ return None
+ return charset
+ return None
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
prefix = []
prefix_skip = 0
charset = [] # not used
- if not (flags & SRE_FLAG_IGNORECASE):
+ if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE):
# look for literal prefix
- prefix, prefix_skip, got_all = _get_literal_prefix(pattern)
+ prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags)
# if no prefix, look for charset prefix
if not prefix:
- charset = _get_charset_prefix(pattern)
+ charset = _get_charset_prefix(pattern, flags)
## if prefix:
## print("*** PREFIX", prefix, prefix_skip)
## if charset:
# generate overlap table
code.extend(_generate_overlap_table(prefix))
elif charset:
+ charset, hascased = _optimize_charset(charset)
+ assert not hascased
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
return return_value;
}
+PyDoc_STRVAR(_sre_ascii_iscased__doc__,
+"ascii_iscased($module, character, /)\n"
+"--\n"
+"\n");
+
+#define _SRE_ASCII_ISCASED_METHODDEF \
+ {"ascii_iscased", (PyCFunction)_sre_ascii_iscased, METH_O, _sre_ascii_iscased__doc__},
+
+static int
+_sre_ascii_iscased_impl(PyObject *module, int character);
+
+static PyObject *
+_sre_ascii_iscased(PyObject *module, PyObject *arg)
+{
+ PyObject *return_value = NULL;
+ int character;
+ int _return_value;
+
+ if (!PyArg_Parse(arg, "i:ascii_iscased", &character)) {
+ goto exit;
+ }
+ _return_value = _sre_ascii_iscased_impl(module, character);
+ if ((_return_value == -1) && PyErr_Occurred()) {
+ goto exit;
+ }
+ return_value = PyBool_FromLong((long)_return_value);
+
+exit:
+ return return_value;
+}
+
+PyDoc_STRVAR(_sre_unicode_iscased__doc__,
+"unicode_iscased($module, character, /)\n"
+"--\n"
+"\n");
+
+#define _SRE_UNICODE_ISCASED_METHODDEF \
+ {"unicode_iscased", (PyCFunction)_sre_unicode_iscased, METH_O, _sre_unicode_iscased__doc__},
+
+static int
+_sre_unicode_iscased_impl(PyObject *module, int character);
+
+static PyObject *
+_sre_unicode_iscased(PyObject *module, PyObject *arg)
+{
+ PyObject *return_value = NULL;
+ int character;
+ int _return_value;
+
+ if (!PyArg_Parse(arg, "i:unicode_iscased", &character)) {
+ goto exit;
+ }
+ _return_value = _sre_unicode_iscased_impl(module, character);
+ if ((_return_value == -1) && PyErr_Occurred()) {
+ goto exit;
+ }
+ return_value = PyBool_FromLong((long)_return_value);
+
+exit:
+ return return_value;
+}
+
PyDoc_STRVAR(_sre_ascii_tolower__doc__,
"ascii_tolower($module, character, /)\n"
"--\n"
{
return _sre_SRE_Scanner_search_impl(self);
}
-/*[clinic end generated code: output=811e67d7f8f5052e input=a9049054013a1b77]*/
+/*[clinic end generated code: output=5fe47c49e475cccb input=a9049054013a1b77]*/