__all__ = ["StringIO"]
class StringIO:
- """class StringIO([buffer])
-
+ """class StringIO([buffer])
+
When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor. If no string is given,
- the StringIO will start empty.
+ the StringIO will start empty.
The StringIO object can accept either Unicode or 8-bit strings, but
mixing the two may take some care. If both are used, 8-bit strings that
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
- a UnicodeError to be raised when getvalue() is called.
+ a UnicodeError to be raised when getvalue() is called.
"""
def __init__(self, buf = ''):
# Force self.buf to be a string or unicode
return iter(self.readline, '')
def close(self):
- """Free the memory buffer.
+ """Free the memory buffer.
"""
if not self.closed:
self.closed = 1
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
- is called.
+ is called.
"""
if self.buflist:
self.buf += ''.join(self.buflist)
BASE64 = 2 # Base64
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
-MISC_LEN = 7
+MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
- 'iso-8859-1': (QP, QP, None),
+ 'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
- 'gb2312': (BASE64, BASE64, None),
+ 'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
converting between character sets, given the availability of the
applicable codecs. Given an character set, it will do its best to provide
information on how to use that character set in an email.
-
+
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
if not ecre.search(line):
decoded.append((line, None))
continue
-
+
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if charset is None:
charset = self._charset
self._chunks.append((s, charset))
-
+
def _split(self, s, charset):
# Split up a header safely for use with encode_chunks. BAW: this
# appears to be a private convenience method.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable)
elen = charset.encoded_header_len(encoded)
-
+
if elen <= self._maxlinelen:
return [(encoded, charset)]
# BAW: should we use encoded?
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
-
+
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
def get_charset(self):
"""Return the Charset object associated with the message's payload."""
return self._charset
-
+
#
# MAPPING INTERFACE (partial)
#
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From:, To: or Cc:.
-
+
If the first element of pair is false, then the second element is
returned unmodified.
"""
# Helpers
def base64_len(s):
"""Return the length of s when it is encoded with base64."""
- groups_of_3, leftover = divmod(len(s), 3)
- # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+ groups_of_3, leftover = divmod(len(s), 3)
+ # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
# Thanks, Tim!
- n = groups_of_3 * 4
- if leftover:
- n += 4
- return n
+ n = groups_of_3 * 4
+ if leftover:
+ n += 4
+ return n
\f
def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76,
eol=NL):
"""Encode a single header line with Base64 encoding in a given charset.
-
+
Defined in RFC 2045, this Base64 encoding is identical to normal Base64
encoding, except that each line must be intelligently wrapped (respecting
the Base64 encoding), and subsequent lines must start with a space.
"=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
=?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
-
+
with each line wrapped at, at most, maxlinelen characters (defaults to 76
characters).
"""
if not keep_eols:
header = fix_eols(header)
-
+
# Base64 encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
base64ed = []
# BAW: Ben's original code used a step of max_unencoded, but I think it
# ought to be max_encoded. Otherwise, where's max_encoded used? I'm
- # still not sure what the
+ # still not sure what the
for i in range(0, len(header), max_unencoded):
base64ed.append(b2a_base64(header[i:i+max_unencoded]))
"""
if not s:
return s
-
+
if not binary:
s = fix_eols(s)
-
+
encvec = []
max_unencoded = maxlinelen * 3 / 4
for i in range(0, len(s), max_unencoded):
"""
if not s:
return s
-
+
dec = a2b_base64(s)
if convert_eols:
return dec.replace(CRLF, convert_eols)
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
-wrapping issues, use the email.Header module.
+wrapping issues, use the email.Header module.
"""
import re
"""Return true if the character should be escaped with body quopri."""
return bqre.match(c) and 1
-
+
def header_quopri_len(s):
"""Return the length of str when it is encoded with header quopri."""
count = 0
# lenght, after the RFC chrome is added in.
quoted = []
max_encoded = maxlinelen - len(charset) - MISC_LEN
-
+
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
line = line[:-2]
elif line[-1] in CRLF:
line = line[:-1]
-
+
lineno += 1
encoded_line = ''
prev = None
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
- along to the constructor of the FileInput class.
+ along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
- last file has been read, this function has no effect.
+ last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
def filename():
"""
Return the name of the file currently being read.
- Before the first line has been read, returns None.
+ Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
- of the last file has been read, returns the line number of that line.
+ of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
- been read, returns the line number of that line within the file.
+ been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
def isfirstline():
"""
Returns true the line just read is the first line of its file,
- otherwise returns false.
+ otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
- otherwise returns false.
+ otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
class FileInput:
"""class FileInput([files[, inplace[, backup]]])
-
+
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), nextfile()
and close() correspond to the functions of the same name in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
- sequential order; random access and readline() cannot be mixed.
+ sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0):
# realpath is a no-op on systems without islink support
def realpath(path):
- path = abspath(path)
- try:
- import macfs
- except ImportError:
- return path
- if not path:
- return path
- components = path.split(':')
- path = components[0] + ':'
- for c in components[1:]:
- path = join(path, c)
- path = macfs.ResolveAliasFile(path)[0].as_pathname()
- return path
+ path = abspath(path)
+ try:
+ import macfs
+ except ImportError:
+ return path
+ if not path:
+ return path
+ components = path.split(':')
+ path = components[0] + ':'
+ for c in components[1:]:
+ path = join(path, c)
+ path = macfs.ResolveAliasFile(path)[0].as_pathname()
+ return path
import warnings
warnings.warn("The None return argument form of __reduce__ is "
"deprecated. Return a tuple of arguments instead.",
- DeprecationWarning)
+ DeprecationWarning)
value = callable.__basicnew__()
else:
value = apply(callable, arg_tup)
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
-
+
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
-
+
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
-
+
random = self.random
if alpha > 1.0:
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
-
+
while 1:
u1 = random()
u2 = random()
_test_generator(N, 'vonmisesvariate(0.0, 1.0)')
_test_generator(N, 'gammavariate(0.01, 1.0)')
_test_generator(N, 'gammavariate(0.1, 1.0)')
- _test_generator(N, 'gammavariate(0.1, 2.0)')
+ _test_generator(N, 'gammavariate(0.1, 2.0)')
_test_generator(N, 'gammavariate(0.5, 1.0)')
_test_generator(N, 'gammavariate(0.9, 1.0)')
_test_generator(N, 'gammavariate(1.0, 1.0)')
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
-tabnanny -- Detection of ambiguous indentation
+tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
-check() described below.
+check() described below.
Warning: The API provided by this module is likely to change in future
-releases; such changes may not be backward compatible.
+releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
- Captured and handled in check().
+ Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def check(file):
"""check(file_or_dir)
-
+
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
- written to standard output using the print statement.
+ written to standard output using the print statement.
"""
-
+
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%s: listing directory" % `file`
# strip/lstrip/rstrip with unicode arg
if have_unicode:
- test('strip', 'xyzzyhelloxyzzy',
+ test('strip', 'xyzzyhelloxyzzy',
unicode('hello', 'ascii'), unicode('xyz', 'ascii'))
- test('lstrip', 'xyzzyhelloxyzzy',
+ test('lstrip', 'xyzzyhelloxyzzy',
unicode('helloxyzzy', 'ascii'), unicode('xyz', 'ascii'))
test('rstrip', 'xyzzyhelloxyzzy',
unicode('xyzzyhello', 'ascii'), unicode('xyz', 'ascii'))
- test('strip', 'hello',
+ test('strip', 'hello',
unicode('hello', 'ascii'), unicode('xyz', 'ascii'))
test('swapcase', 'HeLLo cOmpUteRs', 'hEllO CoMPuTErS')
class Base64TestCase(unittest.TestCase):
def test_encode_string(self):
"""Testing encode string"""
- test_support.verify(base64.encodestring("www.python.org") ==
- "d3d3LnB5dGhvbi5vcmc=\n",
+ test_support.verify(base64.encodestring("www.python.org") ==
+ "d3d3LnB5dGhvbi5vcmc=\n",
reason="www.python.org encodestring failed")
- test_support.verify(base64.encodestring("a") ==
- "YQ==\n",
+ test_support.verify(base64.encodestring("a") ==
+ "YQ==\n",
reason="a encodestring failed")
- test_support.verify(base64.encodestring("ab") ==
- "YWI=\n",
+ test_support.verify(base64.encodestring("ab") ==
+ "YWI=\n",
reason="ab encodestring failed")
- test_support.verify(base64.encodestring("abc") ==
- "YWJj\n",
+ test_support.verify(base64.encodestring("abc") ==
+ "YWJj\n",
reason="abc encodestring failed")
- test_support.verify(base64.encodestring("") ==
- "",
+ test_support.verify(base64.encodestring("") ==
+ "",
reason="null encodestring failed")
test_support.verify(base64.encodestring(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}") ==
def test_decode_string(self):
"""Testing decode string"""
test_support.verify(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n") ==
- "www.python.org",
+ "www.python.org",
reason="www.python.org decodestring failed")
test_support.verify(base64.decodestring("YQ==\n") ==
- "a",
+ "a",
reason="a decodestring failed")
test_support.verify(base64.decodestring("YWI=\n") ==
- "ab",
+ "ab",
reason="ab decodestring failed")
test_support.verify(base64.decodestring("YWJj\n") ==
- "abc",
+ "abc",
reason="abc decodestring failed")
test_support.verify(base64.decodestring(
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n") ==
pass
else:
self.fail("expected a binascii.Error on null decode request")
-
+
def test_main():
test_support.run_unittest(Base64TestCase)
if __name__ == "__main__":
test_main()
-
# Verify the treatment of Unicode strings
if have_unicode:
- verify(binascii.hexlify(unicode('a', 'ascii')) == '61',
+ verify(binascii.hexlify(unicode('a', 'ascii')) == '61',
"hexlify failed for Unicode")
# A test for SF bug 534347 (segfaults without the proper fix)
__bases__ = property(getbases)
class S(C): pass
-
+
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# combinations.
def test_isinstance_normal(self):
- # normal instances
+ # normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
- # abstract instances
+ # abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
-
+
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
-
+
\f
print 'radians'
testit('radians(180)', math.radians(180), math.pi)
testit('radians(90)', math.radians(90), math.pi/2)
-testit('radians(-45)', math.radians(-45), -math.pi/4)
+testit('radians(-45)', math.radians(-45), -math.pi/4)
print 'sin'
testit('sin(0)', math.sin(0), 0)
value = apply(f, (input,) + args)
if value is input:
if verbose:
- print 'no'
+ print 'no'
print '*',f, `input`, `output`, `value`
return
if value != output:
exc = sys.exc_info()[:2]
if value is input:
if verbose:
- print 'no'
+ print 'no'
print '*',f, `input`, `output`, `value`
return
if value != output or type(value) is not type(output):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
-
+
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
- Each call to the function should return one line of input as a string.
+ Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
- tuples generated by generate_tokens().
+ tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string.
-
+
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
- logical line; continuation lines are included.
+ logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'