]> granicus.if.org Git - python/commitdiff
Mass check-in after untabifying all files that need it.
authorGuido van Rossum <guido@python.org>
Thu, 26 Mar 1998 21:13:24 +0000 (21:13 +0000)
committerGuido van Rossum <guido@python.org>
Thu, 26 Mar 1998 21:13:24 +0000 (21:13 +0000)
50 files changed:
Lib/BaseHTTPServer.py
Lib/Bastion.py
Lib/CGIHTTPServer.py
Lib/ConfigParser.py
Lib/MimeWriter.py
Lib/Queue.py
Lib/SimpleHTTPServer.py
Lib/SocketServer.py
Lib/UserDict.py
Lib/cgi.py
Lib/code.py
Lib/commands.py
Lib/compileall.py
Lib/exceptions.py
Lib/fileinput.py
Lib/formatter.py
Lib/getopt.py
Lib/gzip.py
Lib/htmllib.py
Lib/ihooks.py
Lib/keyword.py
Lib/knee.py
Lib/locale.py
Lib/macurl2path.py
Lib/mailcap.py
Lib/mhlib.py
Lib/mimetypes.py
Lib/pickle.py
Lib/popen2.py
Lib/posixfile.py
Lib/pprint.py
Lib/py_compile.py
Lib/re.py
Lib/reconvert.py
Lib/rfc822.py
Lib/rlcompleter.py
Lib/sgmllib.py
Lib/shutil.py
Lib/site.py
Lib/smtplib.py
Lib/symbol.py
Lib/telnetlib.py
Lib/tempfile.py
Lib/token.py
Lib/types.py
Lib/user.py
Lib/uu.py
Lib/whichdb.py
Lib/xdrlib.py
Lib/xmllib.py

index 7dadd71a555d0845fa418db3016bb75b9945aff6..7c8975d3a421858e213d0e122bded5ea50b114d4 100644 (file)
@@ -89,19 +89,19 @@ DEFAULT_ERROR_MESSAGE = """\
 class HTTPServer(SocketServer.TCPServer):
 
     def server_bind(self):
-       """Override server_bind to store the server name."""
-       SocketServer.TCPServer.server_bind(self)
-       host, port = self.socket.getsockname()
-       if not host or host == '0.0.0.0':
-           host = socket.gethostname()
-       hostname, hostnames, hostaddrs = socket.gethostbyaddr(host)
-       if '.' not in hostname:
-           for host in hostnames:
-               if '.' in host:
-                   hostname = host
-                   break
-       self.server_name = hostname
-       self.server_port = port
+        """Override server_bind to store the server name."""
+        SocketServer.TCPServer.server_bind(self)
+        host, port = self.socket.getsockname()
+        if not host or host == '0.0.0.0':
+            host = socket.gethostname()
+        hostname, hostnames, hostaddrs = socket.gethostbyaddr(host)
+        if '.' not in hostname:
+            for host in hostnames:
+                if '.' in host:
+                    hostname = host
+                    break
+        self.server_name = hostname
+        self.server_port = port
 
 
 class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
@@ -217,196 +217,196 @@ class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
     server_version = "BaseHTTP/" + __version__
 
     def handle(self):
-       """Handle a single HTTP request.
-
-       You normally don't need to override this method; see the class
-       __doc__ string for information on how to handle specific HTTP
-       commands such as GET and POST.
-
-       """
-
-       self.raw_requestline = self.rfile.readline()
-       self.request_version = version = "HTTP/0.9" # Default
-       requestline = self.raw_requestline
-       if requestline[-2:] == '\r\n':
-           requestline = requestline[:-2]
-       elif requestline[-1:] == '\n':
-           requestline = requestline[:-1]
-       self.requestline = requestline
-       words = string.split(requestline)
-       if len(words) == 3:
-           [command, path, version] = words
-           if version[:5] != 'HTTP/':
-               self.send_error(400, "Bad request version (%s)" % `version`)
-               return
-       elif len(words) == 2:
-           [command, path] = words
-           if command != 'GET':
-               self.send_error(400,
-                               "Bad HTTP/0.9 request type (%s)" % `command`)
-               return
-       else:
-           self.send_error(400, "Bad request syntax (%s)" % `requestline`)
-           return
-       self.command, self.path, self.request_version = command, path, version
-       self.headers = self.MessageClass(self.rfile, 0)
-       mname = 'do_' + command
-       if not hasattr(self, mname):
-           self.send_error(501, "Unsupported method (%s)" % `mname`)
-           return
-       method = getattr(self, mname)
-       method()
+        """Handle a single HTTP request.
+
+        You normally don't need to override this method; see the class
+        __doc__ string for information on how to handle specific HTTP
+        commands such as GET and POST.
+
+        """
+
+        self.raw_requestline = self.rfile.readline()
+        self.request_version = version = "HTTP/0.9" # Default
+        requestline = self.raw_requestline
+        if requestline[-2:] == '\r\n':
+            requestline = requestline[:-2]
+        elif requestline[-1:] == '\n':
+            requestline = requestline[:-1]
+        self.requestline = requestline
+        words = string.split(requestline)
+        if len(words) == 3:
+            [command, path, version] = words
+            if version[:5] != 'HTTP/':
+                self.send_error(400, "Bad request version (%s)" % `version`)
+                return
+        elif len(words) == 2:
+            [command, path] = words
+            if command != 'GET':
+                self.send_error(400,
+                                "Bad HTTP/0.9 request type (%s)" % `command`)
+                return
+        else:
+            self.send_error(400, "Bad request syntax (%s)" % `requestline`)
+            return
+        self.command, self.path, self.request_version = command, path, version
+        self.headers = self.MessageClass(self.rfile, 0)
+        mname = 'do_' + command
+        if not hasattr(self, mname):
+            self.send_error(501, "Unsupported method (%s)" % `mname`)
+            return
+        method = getattr(self, mname)
+        method()
 
     def send_error(self, code, message=None):
-       """Send and log an error reply.
-
-       Arguments are the error code, and a detailed message.
-       The detailed message defaults to the short entry matching the
-       response code.
-
-       This sends an error response (so it must be called before any
-       output has been generated), logs the error, and finally sends
-       a piece of HTML explaining the error to the user.
-
-       """
-
-       try:
-           short, long = self.responses[code]
-       except KeyError:
-           short, long = '???', '???'
-       if not message:
-           message = short
-       explain = long
-       self.log_error("code %d, message %s", code, message)
-       self.send_response(code, message)
-       self.end_headers()
-       self.wfile.write(self.error_message_format %
-                        {'code': code,
-                         'message': message,
-                         'explain': explain})
+        """Send and log an error reply.
+
+        Arguments are the error code, and a detailed message.
+        The detailed message defaults to the short entry matching the
+        response code.
+
+        This sends an error response (so it must be called before any
+        output has been generated), logs the error, and finally sends
+        a piece of HTML explaining the error to the user.
+
+        """
+
+        try:
+            short, long = self.responses[code]
+        except KeyError:
+            short, long = '???', '???'
+        if not message:
+            message = short
+        explain = long
+        self.log_error("code %d, message %s", code, message)
+        self.send_response(code, message)
+        self.end_headers()
+        self.wfile.write(self.error_message_format %
+                         {'code': code,
+                          'message': message,
+                          'explain': explain})
 
     error_message_format = DEFAULT_ERROR_MESSAGE
 
     def send_response(self, code, message=None):
-       """Send the response header and log the response code.
-
-       Also send two standard headers with the server software
-       version and the current date.
-
-       """
-       self.log_request(code)
-       if message is None:
-           if self.responses.has_key(code):
-               message = self.responses[code][0]
-           else:
-               message = ''
-       if self.request_version != 'HTTP/0.9':
-           self.wfile.write("%s %s %s\r\n" %
-                            (self.protocol_version, str(code), message))
-       self.send_header('Server', self.version_string())
-       self.send_header('Date', self.date_time_string())
+        """Send the response header and log the response code.
+
+        Also send two standard headers with the server software
+        version and the current date.
+
+        """
+        self.log_request(code)
+        if message is None:
+            if self.responses.has_key(code):
+                message = self.responses[code][0]
+            else:
+                message = ''
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("%s %s %s\r\n" %
+                             (self.protocol_version, str(code), message))
+        self.send_header('Server', self.version_string())
+        self.send_header('Date', self.date_time_string())
 
     def send_header(self, keyword, value):
-       """Send a MIME header."""
-       if self.request_version != 'HTTP/0.9':
-           self.wfile.write("%s: %s\r\n" % (keyword, value))
+        """Send a MIME header."""
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("%s: %s\r\n" % (keyword, value))
 
     def end_headers(self):
-       """Send the blank line ending the MIME headers."""
-       if self.request_version != 'HTTP/0.9':
-           self.wfile.write("\r\n")
+        """Send the blank line ending the MIME headers."""
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("\r\n")
 
     def log_request(self, code='-', size='-'):
-       """Log an accepted request.
+        """Log an accepted request.
 
-       This is called by send_reponse().
+        This is called by send_reponse().
 
-       """
+        """
 
-       self.log_message('"%s" %s %s',
-                        self.requestline, str(code), str(size))
+        self.log_message('"%s" %s %s',
+                         self.requestline, str(code), str(size))
 
     def log_error(self, *args):
-       """Log an error.
+        """Log an error.
 
-       This is called when a request cannot be fulfilled.  By
-       default it passes the message on to log_message().
+        This is called when a request cannot be fulfilled.  By
+        default it passes the message on to log_message().
 
-       Arguments are the same as for log_message().
+        Arguments are the same as for log_message().
 
-       XXX This should go to the separate error log.
+        XXX This should go to the separate error log.
 
-       """
+        """
 
-       apply(self.log_message, args)
+        apply(self.log_message, args)
 
     def log_message(self, format, *args):
-       """Log an arbitrary message.
+        """Log an arbitrary message.
 
-       This is used by all other logging functions.  Override
-       it if you have specific logging wishes.
+        This is used by all other logging functions.  Override
+        it if you have specific logging wishes.
 
-       The first argument, FORMAT, is a format string for the
-       message to be logged.  If the format string contains
-       any % escapes requiring parameters, they should be
-       specified as subsequent arguments (it's just like
-       printf!).
+        The first argument, FORMAT, is a format string for the
+        message to be logged.  If the format string contains
+        any % escapes requiring parameters, they should be
+        specified as subsequent arguments (it's just like
+        printf!).
 
-       The client host and current date/time are prefixed to
-       every message.
+        The client host and current date/time are prefixed to
+        every message.
 
-       """
+        """
 
-       sys.stderr.write("%s - - [%s] %s\n" %
-                        (self.address_string(),
-                         self.log_date_time_string(),
-                         format%args))
+        sys.stderr.write("%s - - [%s] %s\n" %
+                         (self.address_string(),
+                          self.log_date_time_string(),
+                          format%args))
 
     def version_string(self):
-       """Return the server software version string."""
-       return self.server_version + ' ' + self.sys_version
+        """Return the server software version string."""
+        return self.server_version + ' ' + self.sys_version
 
     def date_time_string(self):
-       """Return the current date and time formatted for a message header."""
-       now = time.time()
-       year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
-       s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
-               self.weekdayname[wd],
-               day, self.monthname[month], year,
-               hh, mm, ss)
-       return s
+        """Return the current date and time formatted for a message header."""
+        now = time.time()
+        year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
+        s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+                self.weekdayname[wd],
+                day, self.monthname[month], year,
+                hh, mm, ss)
+        return s
 
     def log_date_time_string(self):
-       """Return the current time formatted for logging."""
-       now = time.time()
-       year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
-       s = "%02d/%3s/%04d %02d:%02d:%02d" % (
-               day, self.monthname[month], year, hh, mm, ss)
-       return s
+        """Return the current time formatted for logging."""
+        now = time.time()
+        year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+        s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+                day, self.monthname[month], year, hh, mm, ss)
+        return s
 
     weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
 
     monthname = [None,
-                'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
-                'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+                 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+                 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
 
     def address_string(self):
-       """Return the client address formatted for logging.
+        """Return the client address formatted for logging.
 
-       This version looks up the full hostname using gethostbyaddr(),
-       and tries to find a name that contains at least one dot.
+        This version looks up the full hostname using gethostbyaddr(),
+        and tries to find a name that contains at least one dot.
 
-       """
+        """
 
-       (host, port) = self.client_address
-       try:
-           name, names, addresses = socket.gethostbyaddr(host)
-       except socket.error, msg:
-           return host
-       names.insert(0, name)
-       for name in names:
-           if '.' in name: return name
-       return names[0]
+        (host, port) = self.client_address
+        try:
+            name, names, addresses = socket.gethostbyaddr(host)
+        except socket.error, msg:
+            return host
+        names.insert(0, name)
+        for name in names:
+            if '.' in name: return name
+        return names[0]
 
 
     # Essentially static class variables
@@ -423,42 +423,42 @@ class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
     # form {code: (shortmessage, longmessage)}.
     # See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
     responses = {
-       200: ('OK', 'Request fulfilled, document follows'),
-       201: ('Created', 'Document created, URL follows'),
-       202: ('Accepted',
-             'Request accepted, processing continues off-line'),
-       203: ('Partial information', 'Request fulfilled from cache'),
-       204: ('No response', 'Request fulfilled, nothing follows'),
-       
-       301: ('Moved', 'Object moved permanently -- see URI list'),
-       302: ('Found', 'Object moved temporarily -- see URI list'),
-       303: ('Method', 'Object moved -- see Method and URL list'),
-       304: ('Not modified',
-             'Document has not changed singe given time'),
-       
-       400: ('Bad request',
-             'Bad request syntax or unsupported method'),
-       401: ('Unauthorized',
-             'No permission -- see authorization schemes'),
-       402: ('Payment required',
-             'No payment -- see charging schemes'),
-       403: ('Forbidden',
-             'Request forbidden -- authorization will not help'),
-       404: ('Not found', 'Nothing matches the given URI'),
-       
-       500: ('Internal error', 'Server got itself in trouble'),
-       501: ('Not implemented',
-             'Server does not support this operation'),
-       502: ('Service temporarily overloaded',
-             'The server cannot process the request due to a high load'),
-       503: ('Gateway timeout',
-             'The gateway server did not receive a timely response'),
-       
-       }
+        200: ('OK', 'Request fulfilled, document follows'),
+        201: ('Created', 'Document created, URL follows'),
+        202: ('Accepted',
+              'Request accepted, processing continues off-line'),
+        203: ('Partial information', 'Request fulfilled from cache'),
+        204: ('No response', 'Request fulfilled, nothing follows'),
+        
+        301: ('Moved', 'Object moved permanently -- see URI list'),
+        302: ('Found', 'Object moved temporarily -- see URI list'),
+        303: ('Method', 'Object moved -- see Method and URL list'),
+        304: ('Not modified',
+              'Document has not changed singe given time'),
+        
+        400: ('Bad request',
+              'Bad request syntax or unsupported method'),
+        401: ('Unauthorized',
+              'No permission -- see authorization schemes'),
+        402: ('Payment required',
+              'No payment -- see charging schemes'),
+        403: ('Forbidden',
+              'Request forbidden -- authorization will not help'),
+        404: ('Not found', 'Nothing matches the given URI'),
+        
+        500: ('Internal error', 'Server got itself in trouble'),
+        501: ('Not implemented',
+              'Server does not support this operation'),
+        502: ('Service temporarily overloaded',
+              'The server cannot process the request due to a high load'),
+        503: ('Gateway timeout',
+              'The gateway server did not receive a timely response'),
+        
+        }
 
 
 def test(HandlerClass = BaseHTTPRequestHandler,
-        ServerClass = HTTPServer):
+         ServerClass = HTTPServer):
     """Test the HTTP request handler class.
 
     This runs an HTTP server on port 8000 (or the first command line
@@ -467,9 +467,9 @@ def test(HandlerClass = BaseHTTPRequestHandler,
     """
 
     if sys.argv[1:]:
-       port = string.atoi(sys.argv[1])
+        port = string.atoi(sys.argv[1])
     else:
-       port = 8000
+        port = 8000
     server_address = ('', port)
 
     httpd = ServerClass(server_address, HandlerClass)
index 9411ff998db44be64435979352ffe8e2d1592ec6..a6e716bf308619ffa5c38fde4a0e844d920a9d12 100644 (file)
@@ -41,47 +41,47 @@ class BastionClass:
     """
 
     def __init__(self, get, name):
-       """Constructor.
+        """Constructor.
 
-       Arguments:
+        Arguments:
 
-       get - a function that gets the attribute value (by name)
-       name - a human-readable name for the original object
-              (suggestion: use repr(object))
+        get - a function that gets the attribute value (by name)
+        name - a human-readable name for the original object
+               (suggestion: use repr(object))
 
-       """
-       self._get_ = get
-       self._name_ = name
+        """
+        self._get_ = get
+        self._name_ = name
 
     def __repr__(self):
-       """Return a representation string.
+        """Return a representation string.
 
-       This includes the name passed in to the constructor, so that
-       if you print the bastion during debugging, at least you have
-       some idea of what it is.
+        This includes the name passed in to the constructor, so that
+        if you print the bastion during debugging, at least you have
+        some idea of what it is.
 
-       """
-       return "<Bastion for %s>" % self._name_
+        """
+        return "<Bastion for %s>" % self._name_
 
     def __getattr__(self, name):
-       """Get an as-yet undefined attribute value.
+        """Get an as-yet undefined attribute value.
 
-       This calls the get() function that was passed to the
-       constructor.  The result is stored as an instance variable so
-       that the next time the same attribute is requested,
-       __getattr__() won't be invoked.
+        This calls the get() function that was passed to the
+        constructor.  The result is stored as an instance variable so
+        that the next time the same attribute is requested,
+        __getattr__() won't be invoked.
 
-       If the get() function raises an exception, this is simply
-       passed on -- exceptions are not cached.
+        If the get() function raises an exception, this is simply
+        passed on -- exceptions are not cached.
 
-       """
-       attribute = self._get_(name)
-       self.__dict__[name] = attribute
-       return attribute
+        """
+        attribute = self._get_(name)
+        self.__dict__[name] = attribute
+        return attribute
 
 
 def Bastion(object, filter = lambda name: name[:1] != '_',
-           name=None, bastionclass=BastionClass):
+            name=None, bastionclass=BastionClass):
     """Create a bastion for an object, using an optional filter.
 
     See the Bastion module's documentation for background.
@@ -109,33 +109,33 @@ def Bastion(object, filter = lambda name: name[:1] != '_',
     # the user has full access to all instance variables!
 
     def get1(name, object=object, filter=filter):
-       """Internal function for Bastion().  See source comments."""
-       if filter(name):
-           attribute = getattr(object, name)
-           if type(attribute) == MethodType:
-               return attribute
-       raise AttributeError, name
+        """Internal function for Bastion().  See source comments."""
+        if filter(name):
+            attribute = getattr(object, name)
+            if type(attribute) == MethodType:
+                return attribute
+        raise AttributeError, name
 
     def get2(name, get1=get1):
-       """Internal function for Bastion().  See source comments."""
-       return get1(name)
+        """Internal function for Bastion().  See source comments."""
+        return get1(name)
 
     if name is None:
-       name = `object`
+        name = `object`
     return bastionclass(get2, name)
 
 
 def _test():
     """Test the Bastion() function."""
     class Original:
-       def __init__(self):
-           self.sum = 0
-       def add(self, n):
-           self._add(n)
-       def _add(self, n):
-           self.sum = self.sum + n
-       def total(self):
-           return self.sum
+        def __init__(self):
+            self.sum = 0
+        def add(self, n):
+            self._add(n)
+        def _add(self, n):
+            self.sum = self.sum + n
+        def total(self):
+            return self.sum
     o = Original()
     b = Bastion(o)
     testcode = """if 1:
@@ -143,23 +143,23 @@ def _test():
     b.add(18)
     print "b.total() =", b.total()
     try:
-       print "b.sum =", b.sum,
+        print "b.sum =", b.sum,
     except:
-       print "inaccessible"
+        print "inaccessible"
     else:
-       print "accessible"
+        print "accessible"
     try:
-       print "b._add =", b._add,
+        print "b._add =", b._add,
     except:
-       print "inaccessible"
+        print "inaccessible"
     else:
-       print "accessible"
+        print "accessible"
     try:
-       print "b._get_.func_defaults =", b._get_.func_defaults,
+        print "b._get_.func_defaults =", b._get_.func_defaults,
     except:
-       print "inaccessible"
+        print "inaccessible"
     else:
-       print "accessible"
+        print "accessible"
     \n"""
     exec testcode
     print '='*20, "Using rexec:", '='*20
index b40edbced47e3343e9946cdf6856af3e31f0c3b5..bb8cb2d239c1d48410fd4aa594d1d873c644c1dc 100644 (file)
@@ -30,138 +30,138 @@ class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
     """
 
     def do_POST(self):
-       """Serve a POST request.
+        """Serve a POST request.
 
-       This is only implemented for CGI scripts.
+        This is only implemented for CGI scripts.
 
-       """
+        """
 
-       if self.is_cgi():
-           self.run_cgi()
-       else:
-           self.send_error(501, "Can only POST to CGI scripts")
+        if self.is_cgi():
+            self.run_cgi()
+        else:
+            self.send_error(501, "Can only POST to CGI scripts")
 
     def send_head(self):
-       """Version of send_head that support CGI scripts"""
-       if self.is_cgi():
-           return self.run_cgi()
-       else:
-           return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
+        """Version of send_head that support CGI scripts"""
+        if self.is_cgi():
+            return self.run_cgi()
+        else:
+            return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
 
     def is_cgi(self):
-       """test whether PATH corresponds to a CGI script.
+        """test whether PATH corresponds to a CGI script.
 
-       Return a tuple (dir, rest) if PATH requires running a
-       CGI script, None if not.  Note that rest begins with a
-       slash if it is not empty.
+        Return a tuple (dir, rest) if PATH requires running a
+        CGI script, None if not.  Note that rest begins with a
+        slash if it is not empty.
 
-       The default implementation tests whether the path
-       begins with one of the strings in the list
-       self.cgi_directories (and the next character is a '/'
-       or the end of the string).
+        The default implementation tests whether the path
+        begins with one of the strings in the list
+        self.cgi_directories (and the next character is a '/'
+        or the end of the string).
 
-       """
+        """
 
-       path = self.path
+        path = self.path
 
-       for x in self.cgi_directories:
-           i = len(x)
-           if path[:i] == x and (not path[i:] or path[i] == '/'):
-               self.cgi_info = path[:i], path[i+1:]
-               return 1
-       return 0
+        for x in self.cgi_directories:
+            i = len(x)
+            if path[:i] == x and (not path[i:] or path[i] == '/'):
+                self.cgi_info = path[:i], path[i+1:]
+                return 1
+        return 0
 
     cgi_directories = ['/cgi-bin', '/htbin']
 
     def run_cgi(self):
-       """Execute a CGI script."""
-       dir, rest = self.cgi_info
-       i = string.rfind(rest, '?')
-       if i >= 0:
-           rest, query = rest[:i], rest[i+1:]
-       else:
-           query = ''
-       i = string.find(rest, '/')
-       if i >= 0:
-           script, rest = rest[:i], rest[i:]
-       else:
-           script, rest = rest, ''
-       scriptname = dir + '/' + script
-       scriptfile = self.translate_path(scriptname)
-       if not os.path.exists(scriptfile):
-           self.send_error(404, "No such CGI script (%s)" % `scriptname`)
-           return
-       if not os.path.isfile(scriptfile):
-           self.send_error(403, "CGI script is not a plain file (%s)" %
-                           `scriptname`)
-           return
-       if not executable(scriptfile):
-           self.send_error(403, "CGI script is not executable (%s)" %
-                           `scriptname`)
-           return
-       nobody = nobody_uid()
-       self.send_response(200, "Script output follows")
-       self.wfile.flush() # Always flush before forking
-       pid = os.fork()
-       if pid != 0:
-           # Parent
-           pid, sts = os.waitpid(pid, 0)
-           if sts:
-               self.log_error("CGI script exit status x%x" % sts)
-           return
-       # Child
-       try:
-           # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
-           # XXX Much of the following could be prepared ahead of time!
-           env = {}
-           env['SERVER_SOFTWARE'] = self.version_string()
-           env['SERVER_NAME'] = self.server.server_name
-           env['GATEWAY_INTERFACE'] = 'CGI/1.1'
-           env['SERVER_PROTOCOL'] = self.protocol_version
-           env['SERVER_PORT'] = str(self.server.server_port)
-           env['REQUEST_METHOD'] = self.command
-           uqrest = urllib.unquote(rest)
-           env['PATH_INFO'] = uqrest
-           env['PATH_TRANSLATED'] = self.translate_path(uqrest)
-           env['SCRIPT_NAME'] = scriptname
-           if query:
-               env['QUERY_STRING'] = query
-           host = self.address_string()
-           if host != self.client_address[0]:
-               env['REMOTE_HOST'] = host
-           env['REMOTE_ADDR'] = self.client_address[0]
-           # AUTH_TYPE
-           # REMOTE_USER
-           # REMOTE_IDENT
-           env['CONTENT_TYPE'] = self.headers.type
-           length = self.headers.getheader('content-length')
-           if length:
-               env['CONTENT_LENGTH'] = length
-           accept = []
-           for line in self.headers.getallmatchingheaders('accept'):
-               if line[:1] in string.whitespace:
-                   accept.append(string.strip(line))
-               else:
-                   accept = accept + string.split(line[7:])
-           env['HTTP_ACCEPT'] = string.joinfields(accept, ',')
-           ua = self.headers.getheader('user-agent')
-           if ua:
-               env['HTTP_USER_AGENT'] = ua
-           # XXX Other HTTP_* headers
-           decoded_query = string.replace(query, '+', ' ')
-           try:
-               os.setuid(nobody)
-           except os.error:
-               pass
-           os.dup2(self.rfile.fileno(), 0)
-           os.dup2(self.wfile.fileno(), 1)
-           print scriptfile, script, decoded_query
-           os.execve(scriptfile,
-                     [script, decoded_query],
-                     env)
-       except:
-           self.server.handle_error(self.request, self.client_address)
-           os._exit(127)
+        """Execute a CGI script."""
+        dir, rest = self.cgi_info
+        i = string.rfind(rest, '?')
+        if i >= 0:
+            rest, query = rest[:i], rest[i+1:]
+        else:
+            query = ''
+        i = string.find(rest, '/')
+        if i >= 0:
+            script, rest = rest[:i], rest[i:]
+        else:
+            script, rest = rest, ''
+        scriptname = dir + '/' + script
+        scriptfile = self.translate_path(scriptname)
+        if not os.path.exists(scriptfile):
+            self.send_error(404, "No such CGI script (%s)" % `scriptname`)
+            return
+        if not os.path.isfile(scriptfile):
+            self.send_error(403, "CGI script is not a plain file (%s)" %
+                            `scriptname`)
+            return
+        if not executable(scriptfile):
+            self.send_error(403, "CGI script is not executable (%s)" %
+                            `scriptname`)
+            return
+        nobody = nobody_uid()
+        self.send_response(200, "Script output follows")
+        self.wfile.flush() # Always flush before forking
+        pid = os.fork()
+        if pid != 0:
+            # Parent
+            pid, sts = os.waitpid(pid, 0)
+            if sts:
+                self.log_error("CGI script exit status x%x" % sts)
+            return
+        # Child
+        try:
+            # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+            # XXX Much of the following could be prepared ahead of time!
+            env = {}
+            env['SERVER_SOFTWARE'] = self.version_string()
+            env['SERVER_NAME'] = self.server.server_name
+            env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+            env['SERVER_PROTOCOL'] = self.protocol_version
+            env['SERVER_PORT'] = str(self.server.server_port)
+            env['REQUEST_METHOD'] = self.command
+            uqrest = urllib.unquote(rest)
+            env['PATH_INFO'] = uqrest
+            env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+            env['SCRIPT_NAME'] = scriptname
+            if query:
+                env['QUERY_STRING'] = query
+            host = self.address_string()
+            if host != self.client_address[0]:
+                env['REMOTE_HOST'] = host
+            env['REMOTE_ADDR'] = self.client_address[0]
+            # AUTH_TYPE
+            # REMOTE_USER
+            # REMOTE_IDENT
+            env['CONTENT_TYPE'] = self.headers.type
+            length = self.headers.getheader('content-length')
+            if length:
+                env['CONTENT_LENGTH'] = length
+            accept = []
+            for line in self.headers.getallmatchingheaders('accept'):
+                if line[:1] in string.whitespace:
+                    accept.append(string.strip(line))
+                else:
+                    accept = accept + string.split(line[7:])
+            env['HTTP_ACCEPT'] = string.joinfields(accept, ',')
+            ua = self.headers.getheader('user-agent')
+            if ua:
+                env['HTTP_USER_AGENT'] = ua
+            # XXX Other HTTP_* headers
+            decoded_query = string.replace(query, '+', ' ')
+            try:
+                os.setuid(nobody)
+            except os.error:
+                pass
+            os.dup2(self.rfile.fileno(), 0)
+            os.dup2(self.wfile.fileno(), 1)
+            print scriptfile, script, decoded_query
+            os.execve(scriptfile,
+                      [script, decoded_query],
+                      env)
+        except:
+            self.server.handle_error(self.request, self.client_address)
+            os._exit(127)
 
 
 nobody = None
@@ -170,26 +170,26 @@ def nobody_uid():
     """Internal routine to get nobody's uid"""
     global nobody
     if nobody:
-       return nobody
+        return nobody
     import pwd
     try:
-       nobody = pwd.getpwnam('nobody')[2]
+        nobody = pwd.getpwnam('nobody')[2]
     except pwd.error:
-       nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
+        nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
     return nobody
 
 
 def executable(path):
     """Test for executable file."""
     try:
-       st = os.stat(path)
+        st = os.stat(path)
     except os.error:
-       return 0
+        return 0
     return st[0] & 0111 != 0
 
 
 def test(HandlerClass = CGIHTTPRequestHandler,
-        ServerClass = BaseHTTPServer.HTTPServer):
+         ServerClass = BaseHTTPServer.HTTPServer):
     SimpleHTTPServer.test(HandlerClass, ServerClass)
 
 
index 3d44bb52f1c469be71b3e41006fd2f270a4e890f..957222c27f77657dcd38ffb10cdc182e0f08c34b 100644 (file)
@@ -34,23 +34,23 @@ ConfigParser -- responsible for for parsing a list of
     sections() -- return all the configuration section names, sans DEFAULT
 
     options(section) -- return list of configuration options for the named
-                       section
+                        section
 
     read(*filenames) -- read and parse the list of named configuration files
 
     get(section, option, raw=0) -- return a string value for the named
-                                  option.  All % interpolations are
-                                  expanded in the return values, based on
-                                  the defaults passed into the constructor
-                                  and the DEFAULT section.
+                                   option.  All % interpolations are
+                                   expanded in the return values, based on
+                                   the defaults passed into the constructor
+                                   and the DEFAULT section.
 
     getint(section, options) -- like get(), but convert value to an integer
 
     getfloat(section, options) -- like get(), but convert value to a float
 
     getboolean(section, options) -- like get(), but convert value to
-                                   a boolean (currently defined as 0
-                                   or 1, only)
+                                    a boolean (currently defined as 0
+                                    or 1, only)
 """
 
 import sys
@@ -71,186 +71,186 @@ DEFAULTSECT = "DEFAULT"
 # exception classes
 class Error:
     def __init__(self, msg=''):
-       self.__msg = msg
+        self.__msg = msg
     def __repr__(self):
-       return self.__msg
+        return self.__msg
 
 class NoSectionError(Error):
     def __init__(self, section):
-       Error.__init__(self, 'No section: %s' % section)
-       self.section = section
+        Error.__init__(self, 'No section: %s' % section)
+        self.section = section
 
 class DuplicateSectionError(Error):
     def __init__(self, section):
-       Error.__init__(self, "Section %s already exists" % section)
-       self.section = section
+        Error.__init__(self, "Section %s already exists" % section)
+        self.section = section
 
 class NoOptionError(Error):
     def __init__(self, option, section):
-       Error.__init__(self, "No option `%s' in section: %s" %
-                      (option, section))
-       self.option = option
-       self.section = section
+        Error.__init__(self, "No option `%s' in section: %s" %
+                       (option, section))
+        self.option = option
+        self.section = section
 
 class InterpolationError(Error):
     def __init__(self, reference, option, section):
-       Error.__init__(self,
-                      "Bad value substitution: sect `%s', opt `%s', ref `%s'"
-                      % (section, option, reference))
-       self.reference = reference
-       self.option = option
-       self.section = section
+        Error.__init__(self,
+                       "Bad value substitution: sect `%s', opt `%s', ref `%s'"
+                       % (section, option, reference))
+        self.reference = reference
+        self.option = option
+        self.section = section
 
 
 \f
 class ConfigParser:
     def __init__(self, defaults=None):
-       self.__sections = {}
-       if defaults is None:
-           self.__defaults = {}
-       else:
-           self.__defaults = defaults
+        self.__sections = {}
+        if defaults is None:
+            self.__defaults = {}
+        else:
+            self.__defaults = defaults
 
     def defaults(self):
-       return self.__defaults
+        return self.__defaults
 
     def sections(self):
-       """Return a list of section names, excluding [DEFAULT]"""
-       # self.__sections will never have [DEFAULT] in it
-       return self.__sections.keys()
+        """Return a list of section names, excluding [DEFAULT]"""
+        # self.__sections will never have [DEFAULT] in it
+        return self.__sections.keys()
 
     def add_section(self, section):
-       """Create a new section in the configuration.
+        """Create a new section in the configuration.
 
-       Raise DuplicateSectionError if a section by the specified name
-       already exists.
-       """
-       if self.__sections.has_key(section):
-           raise DuplicateSectionError(section)
-       self.__sections[section] = {}
+        Raise DuplicateSectionError if a section by the specified name
+        already exists.
+        """
+        if self.__sections.has_key(section):
+            raise DuplicateSectionError(section)
+        self.__sections[section] = {}
 
     def has_section(self, section):
-       """Indicate whether the named section is present in the configuration.
+        """Indicate whether the named section is present in the configuration.
 
-       The DEFAULT section is not acknowledged.
-       """
-       return self.__sections.has_key(section)
+        The DEFAULT section is not acknowledged.
+        """
+        return self.__sections.has_key(section)
 
     def options(self, section):
-       try:
-           opts = self.__sections[section].copy()
-       except KeyError:
-           raise NoSectionError(section)
-       opts.update(self.__defaults)
-       return opts.keys()
+        try:
+            opts = self.__sections[section].copy()
+        except KeyError:
+            raise NoSectionError(section)
+        opts.update(self.__defaults)
+        return opts.keys()
 
     def read(self, filenames):
-       """Read and parse a list of filenames."""
-       if type(filenames) is type(''):
-           filenames = [filenames]
-       for file in filenames:
-           try:
-               fp = open(file, 'r')
-               self.__read(fp)
-           except IOError:
-               pass
+        """Read and parse a list of filenames."""
+        if type(filenames) is type(''):
+            filenames = [filenames]
+        for file in filenames:
+            try:
+                fp = open(file, 'r')
+                self.__read(fp)
+            except IOError:
+                pass
 
     def get(self, section, option, raw=0):
-       """Get an option value for a given section.
-
-       All % interpolations are expanded in the return values, based
-       on the defaults passed into the constructor.
-
-       The section DEFAULT is special.
-       """
-       try:
-           sectdict = self.__sections[section].copy()
-       except KeyError:
-           if section == DEFAULTSECT:
-               sectdict = {}
-           else:
-               raise NoSectionError(section)
-       d = self.__defaults.copy()
-       d.update(sectdict)
-       option = string.lower(option)
-       try:
-           rawval = d[option]
-       except KeyError:
-           raise NoOptionError(option, section)
-       # do the string interpolation
-       if raw:
-           return rawval
-       try:
-           return rawval % d
-       except KeyError, key:
-           raise InterpolationError(key, option, section)
+        """Get an option value for a given section.
+
+        All % interpolations are expanded in the return values, based
+        on the defaults passed into the constructor.
+
+        The section DEFAULT is special.
+        """
+        try:
+            sectdict = self.__sections[section].copy()
+        except KeyError:
+            if section == DEFAULTSECT:
+                sectdict = {}
+            else:
+                raise NoSectionError(section)
+        d = self.__defaults.copy()
+        d.update(sectdict)
+        option = string.lower(option)
+        try:
+            rawval = d[option]
+        except KeyError:
+            raise NoOptionError(option, section)
+        # do the string interpolation
+        if raw:
+            return rawval
+        try:
+            return rawval % d
+        except KeyError, key:
+            raise InterpolationError(key, option, section)
 
     def __get(self, section, conv, option):
-       return conv(self.get(section, option))
+        return conv(self.get(section, option))
 
     def getint(self, section, option):
-       return self.__get(section, string.atoi, option)
+        return self.__get(section, string.atoi, option)
 
     def getfloat(self, section, option):
-       return self.__get(section, string.atof, option)
+        return self.__get(section, string.atof, option)
 
     def getboolean(self, section, option):
-       v = self.get(section, option)
-       val = string.atoi(v)
-       if val not in (0, 1):
-           raise ValueError, 'Not a boolean: %s' % v
-       return val
+        v = self.get(section, option)
+        val = string.atoi(v)
+        if val not in (0, 1):
+            raise ValueError, 'Not a boolean: %s' % v
+        return val
 
     def __read(self, fp):
-       """Parse a sectioned setup file.
-
-       The sections in setup file contains a title line at the top,
-       indicated by a name in square brackets (`[]'), plus key/value
-       options lines, indicated by `name: value' format lines.
-       Continuation are represented by an embedded newline then
-       leading whitespace.  Blank lines, lines beginning with a '#',
-       and just about everything else is ignored.
-       """
-       cursect = None                  # None, or a dictionary
-       optname = None
-       lineno = 0
-       while 1:
-           line = fp.readline()
-           if not line:
-               break
-           lineno = lineno + 1
-           # comment or blank line?
-           if string.strip(line) == '' or line[0] in '#;':
-               continue
-           if string.lower(string.split(line)[0]) == 'rem' \
-              and line[0] == "r":      # no leading whitespace
-               continue
-           # continuation line?
-           if line[0] in ' \t' and cursect <> None and optname:
-               value = string.strip(line)
-               if value:
-                   cursect = cursect[optname] + '\n ' + value
-           # a section header?
-           elif secthead_cre.match(line) >= 0:
-               sectname = secthead_cre.group(1)
-               if self.__sections.has_key(sectname):
-                   cursect = self.__sections[sectname]
-               elif sectname == DEFAULTSECT:
-                   cursect = self.__defaults
-               else:
-                   cursect = {'name': sectname}
-                   self.__sections[sectname] = cursect
-               # So sections can't start with a continuation line.
-               optname = None
-           # an option line?
-           elif option_cre.match(line) >= 0:
-               optname, optval = option_cre.group(1, 3)
-               optname = string.lower(optname)
-               optval = string.strip(optval)
-               # allow empty values
-               if optval == '""':
-                   optval = ''
-               cursect[optname] = optval
-           # an error
-           else:
-               print 'Error in %s at %d: %s', (fp.name, lineno, `line`)
+        """Parse a sectioned setup file.
+
+        The sections in setup file contains a title line at the top,
+        indicated by a name in square brackets (`[]'), plus key/value
+        options lines, indicated by `name: value' format lines.
+        Continuation are represented by an embedded newline then
+        leading whitespace.  Blank lines, lines beginning with a '#',
+        and just about everything else is ignored.
+        """
+        cursect = None                  # None, or a dictionary
+        optname = None
+        lineno = 0
+        while 1:
+            line = fp.readline()
+            if not line:
+                break
+            lineno = lineno + 1
+            # comment or blank line?
+            if string.strip(line) == '' or line[0] in '#;':
+                continue
+            if string.lower(string.split(line)[0]) == 'rem' \
+               and line[0] == "r":      # no leading whitespace
+                continue
+            # continuation line?
+            if line[0] in ' \t' and cursect <> None and optname:
+                value = string.strip(line)
+                if value:
+                    cursect = cursect[optname] + '\n ' + value
+            # a section header?
+            elif secthead_cre.match(line) >= 0:
+                sectname = secthead_cre.group(1)
+                if self.__sections.has_key(sectname):
+                    cursect = self.__sections[sectname]
+                elif sectname == DEFAULTSECT:
+                    cursect = self.__defaults
+                else:
+                    cursect = {'name': sectname}
+                    self.__sections[sectname] = cursect
+                # So sections can't start with a continuation line.
+                optname = None
+            # an option line?
+            elif option_cre.match(line) >= 0:
+                optname, optval = option_cre.group(1, 3)
+                optname = string.lower(optname)
+                optval = string.strip(optval)
+                # allow empty values
+                if optval == '""':
+                    optval = ''
+                cursect[optname] = optval
+            # an error
+            else:
+                print 'Error in %s at %d: %s', (fp.name, lineno, `line`)
index 29a9933b676f7ae0becfe8d432d316fc5ee46d91..0f8b990dfcdfd01d0eceb73d85e721912be1fa03 100644 (file)
@@ -47,7 +47,7 @@ class MimeWriter:
     w.startmultipartbody(subtype)
     for each part:
         subwriter = w.nextpart()
-       ...use the subwriter's methods to create the subpart...
+        ...use the subwriter's methods to create the subpart...
     w.lastpart()
 
     The subwriter is another MimeWriter instance, and should be
@@ -82,46 +82,46 @@ class MimeWriter:
     """
 
     def __init__(self, fp):
-       self._fp = fp
-       self._headers = []
+        self._fp = fp
+        self._headers = []
 
     def addheader(self, key, value, prefix=0):
-       lines = string.splitfields(value, "\n")
-       while lines and not lines[-1]: del lines[-1]
-       while lines and not lines[0]: del lines[0]
-       for i in range(1, len(lines)):
-           lines[i] = "    " + string.strip(lines[i])
-       value = string.joinfields(lines, "\n") + "\n"
-       line = key + ": " + value
-       if prefix:
-           self._headers.insert(0, line)
-       else:
-           self._headers.append(line)
+        lines = string.splitfields(value, "\n")
+        while lines and not lines[-1]: del lines[-1]
+        while lines and not lines[0]: del lines[0]
+        for i in range(1, len(lines)):
+            lines[i] = "    " + string.strip(lines[i])
+        value = string.joinfields(lines, "\n") + "\n"
+        line = key + ": " + value
+        if prefix:
+            self._headers.insert(0, line)
+        else:
+            self._headers.append(line)
 
     def flushheaders(self):
-       self._fp.writelines(self._headers)
-       self._headers = []
+        self._fp.writelines(self._headers)
+        self._headers = []
 
     def startbody(self, ctype, plist=[], prefix=1):
-       for name, value in plist:
-           ctype = ctype + ';\n %s=\"%s\"' % (name, value)
-       self.addheader("Content-Type", ctype, prefix=prefix)
-       self.flushheaders()
-       self._fp.write("\n")
-       return self._fp
+        for name, value in plist:
+            ctype = ctype + ';\n %s=\"%s\"' % (name, value)
+        self.addheader("Content-Type", ctype, prefix=prefix)
+        self.flushheaders()
+        self._fp.write("\n")
+        return self._fp
 
     def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
-       self._boundary = boundary or mimetools.choose_boundary()
-       return self.startbody("multipart/" + subtype,
-                             [("boundary", self._boundary)] + plist,
-                             prefix=prefix)
+        self._boundary = boundary or mimetools.choose_boundary()
+        return self.startbody("multipart/" + subtype,
+                              [("boundary", self._boundary)] + plist,
+                              prefix=prefix)
 
     def nextpart(self):
-       self._fp.write("\n--" + self._boundary + "\n")
-       return self.__class__(self._fp)
+        self._fp.write("\n--" + self._boundary + "\n")
+        return self.__class__(self._fp)
 
     def lastpart(self):
-       self._fp.write("\n--" + self._boundary + "--\n")
+        self._fp.write("\n--" + self._boundary + "--\n")
 
 
 if __name__ == '__main__':
index 1cec4e30af21f609beb7f9fab3c591c8b9b418fe..6710153d2003c633cd70717145213004eadc89f4 100644 (file)
 # exceptions, but also when -X option is used.
 try:
     class Empty(Exception):
-       pass
+        pass
 except TypeError:
     # string based exceptions
-    Empty = 'Queue.Empty'              # Exception raised by get_nowait()
+    Empty = 'Queue.Empty'               # Exception raised by get_nowait()
 
 class Queue:
     def __init__(self, maxsize):
-       """Initialize a queue object with a given maximum size.
+        """Initialize a queue object with a given maximum size.
 
-       If maxsize is <= 0, the queue size is infinite.
-       """
-       import thread
-       self._init(maxsize)
-       self.mutex = thread.allocate_lock()
-       self.esema = thread.allocate_lock()
-       self.esema.acquire_lock()
-       self.fsema = thread.allocate_lock()
+        If maxsize is <= 0, the queue size is infinite.
+        """
+        import thread
+        self._init(maxsize)
+        self.mutex = thread.allocate_lock()
+        self.esema = thread.allocate_lock()
+        self.esema.acquire_lock()
+        self.fsema = thread.allocate_lock()
 
     def qsize(self):
-       """Returns the approximate size of the queue (not reliable!)."""
-       self.mutex.acquire_lock()
-       n = self._qsize()
-       self.mutex.release_lock()
-       return n
+        """Returns the approximate size of the queue (not reliable!)."""
+        self.mutex.acquire_lock()
+        n = self._qsize()
+        self.mutex.release_lock()
+        return n
 
     def empty(self):
-       """Returns 1 if the queue is empty, 0 otherwise (not reliable!)."""
-       self.mutex.acquire_lock()
-       n = self._empty()
-       self.mutex.release_lock()
-       return n
+        """Returns 1 if the queue is empty, 0 otherwise (not reliable!)."""
+        self.mutex.acquire_lock()
+        n = self._empty()
+        self.mutex.release_lock()
+        return n
 
     def full(self):
-       """Returns 1 if the queue is full, 0 otherwise (not reliable!)."""
-       self.mutex.acquire_lock()
-       n = self._full()
-       self.mutex.release_lock()
-       return n
+        """Returns 1 if the queue is full, 0 otherwise (not reliable!)."""
+        self.mutex.acquire_lock()
+        n = self._full()
+        self.mutex.release_lock()
+        return n
 
     def put(self, item):
-       """Put an item into the queue."""
-       self.fsema.acquire_lock()
-       self.mutex.acquire_lock()
-       was_empty = self._empty()
-       self._put(item)
-       if was_empty:
-           self.esema.release_lock()
-       if not self._full():
-           self.fsema.release_lock()
-       self.mutex.release_lock()
+        """Put an item into the queue."""
+        self.fsema.acquire_lock()
+        self.mutex.acquire_lock()
+        was_empty = self._empty()
+        self._put(item)
+        if was_empty:
+            self.esema.release_lock()
+        if not self._full():
+            self.fsema.release_lock()
+        self.mutex.release_lock()
 
     def get(self):
-       """Gets and returns an item from the queue.
-       This method blocks if necessary until an item is available.
-       """
-       self.esema.acquire_lock()
-       self.mutex.acquire_lock()
-       was_full = self._full()
-       item = self._get()
-       if was_full:
-           self.fsema.release_lock()
-       if not self._empty():
-           self.esema.release_lock()
-       self.mutex.release_lock()
-       return item
+        """Gets and returns an item from the queue.
+        This method blocks if necessary until an item is available.
+        """
+        self.esema.acquire_lock()
+        self.mutex.acquire_lock()
+        was_full = self._full()
+        item = self._get()
+        if was_full:
+            self.fsema.release_lock()
+        if not self._empty():
+            self.esema.release_lock()
+        self.mutex.release_lock()
+        return item
 
     # Get an item from the queue if one is immediately available,
     # raise Empty if the queue is empty or temporarily unavailable
     def get_nowait(self):
-       """Gets and returns an item from the queue.
-       Only gets an item if one is immediately available, Otherwise
-       this raises the Empty exception if the queue is empty or
-       temporarily unavailable.
-       """
-       locked = self.esema.acquire_lock(0)
-       self.mutex.acquire_lock()
-       if self._empty():
-           # The queue is empty -- we can't have esema
-           self.mutex.release_lock()
-           raise Empty
-       if not locked:
-           locked = self.esema.acquire_lock(0)
-           if not locked:
-               # Somebody else has esema
-               # but we have mutex --
-               # go out of their way
-               self.mutex.release_lock()
-               raise Empty
-       was_full = self._full()
-       item = self._get()
-       if was_full:
-           self.fsema.release_lock()
-       if not self._empty():
-           self.esema.release_lock()
-       self.mutex.release_lock()
-       return item
+        """Gets and returns an item from the queue.
+        Only gets an item if one is immediately available, Otherwise
+        this raises the Empty exception if the queue is empty or
+        temporarily unavailable.
+        """
+        locked = self.esema.acquire_lock(0)
+        self.mutex.acquire_lock()
+        if self._empty():
+            # The queue is empty -- we can't have esema
+            self.mutex.release_lock()
+            raise Empty
+        if not locked:
+            locked = self.esema.acquire_lock(0)
+            if not locked:
+                # Somebody else has esema
+                # but we have mutex --
+                # go out of their way
+                self.mutex.release_lock()
+                raise Empty
+        was_full = self._full()
+        item = self._get()
+        if was_full:
+            self.fsema.release_lock()
+        if not self._empty():
+            self.esema.release_lock()
+        self.mutex.release_lock()
+        return item
 
     # XXX Need to define put_nowait() as well.
 
@@ -110,26 +110,26 @@ class Queue:
 
     # Initialize the queue representation
     def _init(self, maxsize):
-       self.maxsize = maxsize
-       self.queue = []
+        self.maxsize = maxsize
+        self.queue = []
 
     def _qsize(self):
-       return len(self.queue)
+        return len(self.queue)
 
     # Check wheter the queue is empty
     def _empty(self):
-       return not self.queue
+        return not self.queue
 
     # Check whether the queue is full
     def _full(self):
-       return self.maxsize > 0 and len(self.queue) == self.maxsize
+        return self.maxsize > 0 and len(self.queue) == self.maxsize
 
     # Put a new item in the queue
     def _put(self, item):
-       self.queue.append(item)
+        self.queue.append(item)
 
     # Get an item from the queue
     def _get(self):
-       item = self.queue[0]
-       del self.queue[0]
-       return item
+        item = self.queue[0]
+        del self.queue[0]
+        return item
index ac3e38411593bac8cda93329448150e3fef1df0c..71268558f703bbc684f5c59b88577b0f8ba905e3 100644 (file)
@@ -36,119 +36,119 @@ class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
     server_version = "SimpleHTTP/" + __version__
 
     def do_GET(self):
-       """Serve a GET request."""
-       f = self.send_head()
-       if f:
-           self.copyfile(f, self.wfile)
-           f.close()
+        """Serve a GET request."""
+        f = self.send_head()
+        if f:
+            self.copyfile(f, self.wfile)
+            f.close()
 
     def do_HEAD(self):
-       """Serve a HEAD request."""
-       f = self.send_head()
-       if f:
-           f.close()
+        """Serve a HEAD request."""
+        f = self.send_head()
+        if f:
+            f.close()
 
     def send_head(self):
-       """Common code for GET and HEAD commands.
-
-       This sends the response code and MIME headers.
-
-       Return value is either a file object (which has to be copied
-       to the outputfile by the caller unless the command was HEAD,
-       and must be closed by the caller under all circumstances), or
-       None, in which case the caller has nothing further to do.
-
-       """
-       path = self.translate_path(self.path)
-       if os.path.isdir(path):
-           self.send_error(403, "Directory listing not supported")
-           return None
-       try:
-           f = open(path)
-       except IOError:
-           self.send_error(404, "File not found")
-           return None
-       self.send_response(200)
-       self.send_header("Content-type", self.guess_type(path))
-       self.end_headers()
-       return f
+        """Common code for GET and HEAD commands.
+
+        This sends the response code and MIME headers.
+
+        Return value is either a file object (which has to be copied
+        to the outputfile by the caller unless the command was HEAD,
+        and must be closed by the caller under all circumstances), or
+        None, in which case the caller has nothing further to do.
+
+        """
+        path = self.translate_path(self.path)
+        if os.path.isdir(path):
+            self.send_error(403, "Directory listing not supported")
+            return None
+        try:
+            f = open(path)
+        except IOError:
+            self.send_error(404, "File not found")
+            return None
+        self.send_response(200)
+        self.send_header("Content-type", self.guess_type(path))
+        self.end_headers()
+        return f
 
     def translate_path(self, path):
-       """Translate a /-separated PATH to the local filename syntax.
-
-       Components that mean special things to the local file system
-       (e.g. drive or directory names) are ignored.  (XXX They should
-       probably be diagnosed.)
-
-       """
-       path = posixpath.normpath(path)
-       words = string.splitfields(path, '/')
-       words = filter(None, words)
-       path = os.getcwd()
-       for word in words:
-           drive, word = os.path.splitdrive(word)
-           head, word = os.path.split(word)
-           if word in (os.curdir, os.pardir): continue
-           path = os.path.join(path, word)
-       return path
+        """Translate a /-separated PATH to the local filename syntax.
+
+        Components that mean special things to the local file system
+        (e.g. drive or directory names) are ignored.  (XXX They should
+        probably be diagnosed.)
+
+        """
+        path = posixpath.normpath(path)
+        words = string.splitfields(path, '/')
+        words = filter(None, words)
+        path = os.getcwd()
+        for word in words:
+            drive, word = os.path.splitdrive(word)
+            head, word = os.path.split(word)
+            if word in (os.curdir, os.pardir): continue
+            path = os.path.join(path, word)
+        return path
 
     def copyfile(self, source, outputfile):
-       """Copy all data between two file objects.
+        """Copy all data between two file objects.
 
-       The SOURCE argument is a file object open for reading
-       (or anything with a read() method) and the DESTINATION
-       argument is a file object open for writing (or
-       anything with a write() method).
+        The SOURCE argument is a file object open for reading
+        (or anything with a read() method) and the DESTINATION
+        argument is a file object open for writing (or
+        anything with a write() method).
 
-       The only reason for overriding this would be to change
-       the block size or perhaps to replace newlines by CRLF
-       -- note however that this the default server uses this
-       to copy binary data as well.
+        The only reason for overriding this would be to change
+        the block size or perhaps to replace newlines by CRLF
+        -- note however that this the default server uses this
+        to copy binary data as well.
 
-       """
+        """
 
-       BLOCKSIZE = 8192
-       while 1:
-           data = source.read(BLOCKSIZE)
-           if not data: break
-           outputfile.write(data)
+        BLOCKSIZE = 8192
+        while 1:
+            data = source.read(BLOCKSIZE)
+            if not data: break
+            outputfile.write(data)
 
     def guess_type(self, path):
-       """Guess the type of a file.
+        """Guess the type of a file.
 
-       Argument is a PATH (a filename).
+        Argument is a PATH (a filename).
 
-       Return value is a string of the form type/subtype,
-       usable for a MIME Content-type header.
+        Return value is a string of the form type/subtype,
+        usable for a MIME Content-type header.
 
-       The default implementation looks the file's extension
-       up in the table self.extensions_map, using text/plain
-       as a default; however it would be permissible (if
-       slow) to look inside the data to make a better guess.
+        The default implementation looks the file's extension
+        up in the table self.extensions_map, using text/plain
+        as a default; however it would be permissible (if
+        slow) to look inside the data to make a better guess.
 
-       """
+        """
 
-       base, ext = posixpath.splitext(path)
-       if self.extensions_map.has_key(ext):
-           return self.extensions_map[ext]
-       ext = string.lower(ext)
-       if self.extensions_map.has_key(ext):
-           return self.extensions_map[ext]
-       else:
-           return self.extensions_map['']
+        base, ext = posixpath.splitext(path)
+        if self.extensions_map.has_key(ext):
+            return self.extensions_map[ext]
+        ext = string.lower(ext)
+        if self.extensions_map.has_key(ext):
+            return self.extensions_map[ext]
+        else:
+            return self.extensions_map['']
 
     extensions_map = {
-           '': 'text/plain',   # Default, *must* be present
-           '.html': 'text/html',
-           '.htm': 'text/html',
-           '.gif': 'image/gif',
-           '.jpg': 'image/jpeg',
-           '.jpeg': 'image/jpeg',
-           }
+            '': 'text/plain',   # Default, *must* be present
+            '.html': 'text/html',
+            '.htm': 'text/html',
+            '.gif': 'image/gif',
+            '.jpg': 'image/jpeg',
+            '.jpeg': 'image/jpeg',
+            }
 
 
 def test(HandlerClass = SimpleHTTPRequestHandler,
-        ServerClass = SocketServer.TCPServer):
+         ServerClass = SocketServer.TCPServer):
     BaseHTTPServer.test(HandlerClass, ServerClass)
 
 
index 049a4b6cc775b1e33fa35afdee9a7c6945f0b1df..64a86d11c1905d017d7a7701bb3ed5e98293074d 100644 (file)
@@ -3,19 +3,19 @@
 This module tries to capture the various aspects of defining a server:
 
 - address family:
-       - AF_INET: IP (Internet Protocol) sockets (default)
-       - AF_UNIX: Unix domain sockets
-       - others, e.g. AF_DECNET are conceivable (see <socket.h>
+        - AF_INET: IP (Internet Protocol) sockets (default)
+        - AF_UNIX: Unix domain sockets
+        - others, e.g. AF_DECNET are conceivable (see <socket.h>
 - socket type:
-       - SOCK_STREAM (reliable stream, e.g. TCP)
-       - SOCK_DGRAM (datagrams, e.g. UDP)
+        - SOCK_STREAM (reliable stream, e.g. TCP)
+        - SOCK_DGRAM (datagrams, e.g. UDP)
 - client address verification before further looking at the request
-       (This is actually a hook for any processing that needs to look
-        at the request before anything else, e.g. logging)
+        (This is actually a hook for any processing that needs to look
+         at the request before anything else, e.g. logging)
 - how to handle multiple requests:
-       - synchronous (one request is handled at a time)
-       - forking (each request is handled by a new process)
-       - threading (each request is handled by a new thread)
+        - synchronous (one request is handled at a time)
+        - forking (each request is handled by a new process)
+        - threading (each request is handled by a new thread)
 
 The classes in this module favor the server type that is simplest to
 write: a synchronous TCP/IP server.  This is bad class design, but
@@ -25,14 +25,14 @@ slows down method lookups.)
 There are four classes in an inheritance diagram that represent
 synchronous servers of four types:
 
-       +-----------+        +------------------+
-       | TCPServer |------->| UnixStreamServer |
-       +-----------+        +------------------+
-             |
-             v
-       +-----------+        +--------------------+
-       | UDPServer |------->| UnixDatagramServer |
-       +-----------+        +--------------------+
+        +-----------+        +------------------+
+        | TCPServer |------->| UnixStreamServer |
+        +-----------+        +------------------+
+              |
+              v
+        +-----------+        +--------------------+
+        | UDPServer |------->| UnixDatagramServer |
+        +-----------+        +--------------------+
 
 Note that UnixDatagramServer derives from UDPServer, not from
 UnixStreamServer -- the only difference between an IP and a Unix
@@ -43,7 +43,7 @@ Forking and threading versions of each type of server can be created
 using the ForkingServer and ThreadingServer mix-in classes.  For
 instance, a threading UDP server class is created as follows:
 
-       class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+        class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
 
 The Mix-in class must come first, since it overrides a method defined
 in UDPServer!
@@ -119,8 +119,8 @@ class TCPServer:
 
     - __init__(server_address, RequestHandlerClass)
     - serve_forever()
-    - handle_request() # if you don't use serve_forever()
-    - fileno() -> int  # for select()
+    - handle_request()  # if you don't use serve_forever()
+    - fileno() -> int   # for select()
 
     Methods that may be overridden:
 
@@ -157,42 +157,42 @@ class TCPServer:
     request_queue_size = 5
 
     def __init__(self, server_address, RequestHandlerClass):
-       """Constructor.  May be extended, do not override."""
-       self.server_address = server_address
-       self.RequestHandlerClass = RequestHandlerClass
-       self.socket = socket.socket(self.address_family,
-                                   self.socket_type)
-       self.server_bind()
-       self.server_activate()
+        """Constructor.  May be extended, do not override."""
+        self.server_address = server_address
+        self.RequestHandlerClass = RequestHandlerClass
+        self.socket = socket.socket(self.address_family,
+                                    self.socket_type)
+        self.server_bind()
+        self.server_activate()
 
     def server_bind(self):
-       """Called by constructor to bind the socket.
+        """Called by constructor to bind the socket.
 
-       May be overridden.
+        May be overridden.
 
-       """
-       self.socket.bind(self.server_address)
+        """
+        self.socket.bind(self.server_address)
 
     def server_activate(self):
-       """Called by constructor to activate the server.
+        """Called by constructor to activate the server.
 
-       May be overridden.
+        May be overridden.
 
-       """
-       self.socket.listen(self.request_queue_size)
+        """
+        self.socket.listen(self.request_queue_size)
 
     def fileno(self):
-       """Return socket file number.
+        """Return socket file number.
 
-       Interface required by select().
+        Interface required by select().
 
-       """
-       return self.socket.fileno()
+        """
+        return self.socket.fileno()
 
     def serve_forever(self):
-       """Handle one request at a time until doomsday."""
-       while 1:
-           self.handle_request()
+        """Handle one request at a time until doomsday."""
+        while 1:
+            self.handle_request()
 
     # The distinction between handling, getting, processing and
     # finishing a request is fairly arbitrary.  Remember:
@@ -206,54 +206,54 @@ class TCPServer:
     #   this constructor will handle the request all by itself
 
     def handle_request(self):
-       """Handle one request, possibly blocking."""
-       request, client_address = self.get_request()
-       if self.verify_request(request, client_address):
-           try:
-               self.process_request(request, client_address)
-           except:
-               self.handle_error(request, client_address)
+        """Handle one request, possibly blocking."""
+        request, client_address = self.get_request()
+        if self.verify_request(request, client_address):
+            try:
+                self.process_request(request, client_address)
+            except:
+                self.handle_error(request, client_address)
 
     def get_request(self):
-       """Get the request and client address from the socket.
+        """Get the request and client address from the socket.
 
-       May be overridden.
+        May be overridden.
 
-       """
-       return self.socket.accept()
+        """
+        return self.socket.accept()
 
     def verify_request(self, request, client_address):
-       """Verify the request.  May be overridden.
+        """Verify the request.  May be overridden.
 
-       Return true if we should proceed with this request.
+        Return true if we should proceed with this request.
 
-       """
-       return 1
+        """
+        return 1
 
     def process_request(self, request, client_address):
-       """Call finish_request.
+        """Call finish_request.
 
-       Overridden by ForkingMixIn and ThreadingMixIn.
+        Overridden by ForkingMixIn and ThreadingMixIn.
 
-       """
-       self.finish_request(request, client_address)
+        """
+        self.finish_request(request, client_address)
 
     def finish_request(self, request, client_address):
-       """Finish one request by instantiating RequestHandlerClass."""
-       self.RequestHandlerClass(request, client_address, self)
+        """Finish one request by instantiating RequestHandlerClass."""
+        self.RequestHandlerClass(request, client_address, self)
 
     def handle_error(self, request, client_address):
-       """Handle an error gracefully.  May be overridden.
+        """Handle an error gracefully.  May be overridden.
 
-       The default is to print a traceback and continue.
+        The default is to print a traceback and continue.
 
-       """
-       print '-'*40
-       print 'Exception happened during processing of request from',
-       print client_address
-       import traceback
-       traceback.print_exc()
-       print '-'*40
+        """
+        print '-'*40
+        print 'Exception happened during processing of request from',
+        print client_address
+        import traceback
+        traceback.print_exc()
+        print '-'*40
 
 
 class UDPServer(TCPServer):
@@ -265,19 +265,19 @@ class UDPServer(TCPServer):
     max_packet_size = 8192
 
     def get_request(self):
-       return self.socket.recvfrom(self.max_packet_size)
+        return self.socket.recvfrom(self.max_packet_size)
 
 
 if hasattr(socket, 'AF_UNIX'):
 
     class UnixStreamServer(TCPServer):
 
-       address_family = socket.AF_UNIX
+        address_family = socket.AF_UNIX
 
 
     class UnixDatagramServer(UDPServer):
 
-       address_family = socket.AF_UNIX
+        address_family = socket.AF_UNIX
 
 
 class ForkingMixIn:
@@ -287,34 +287,34 @@ class ForkingMixIn:
     active_children = None
 
     def collect_children(self):
-       """Internal routine to wait for died children."""
-       while self.active_children:
-           pid, status = os.waitpid(0, os.WNOHANG)
-           if not pid: break
-           self.active_children.remove(pid)
+        """Internal routine to wait for died children."""
+        while self.active_children:
+            pid, status = os.waitpid(0, os.WNOHANG)
+            if not pid: break
+            self.active_children.remove(pid)
 
     def process_request(self, request, client_address):
-       """Fork a new subprocess to process the request."""
-       self.collect_children()
-       pid = os.fork()
-       if pid:
-           # Parent process
-           if self.active_children is None:
-               self.active_children = []
-           self.active_children.append(pid)
-           return
-       else:
-           # Child process.
-           # This must never return, hence os._exit()!
-           try:
-               self.finish_request(request, client_address)
-               os._exit(0)
-           except:
-               try:
-                   self.handle_error(request,
-                                     client_address)
-               finally:
-                   os._exit(1)
+        """Fork a new subprocess to process the request."""
+        self.collect_children()
+        pid = os.fork()
+        if pid:
+            # Parent process
+            if self.active_children is None:
+                self.active_children = []
+            self.active_children.append(pid)
+            return
+        else:
+            # Child process.
+            # This must never return, hence os._exit()!
+            try:
+                self.finish_request(request, client_address)
+                os._exit(0)
+            except:
+                try:
+                    self.handle_error(request,
+                                      client_address)
+                finally:
+                    os._exit(1)
 
 
 class ThreadingMixIn:
@@ -322,10 +322,10 @@ class ThreadingMixIn:
     """Mix-in class to handle each request in a new thread."""
 
     def process_request(self, request, client_address):
-       """Start a new thread to process the request."""
-       import thread
-       thread.start_new_thread(self.finish_request,
-                               (request, client_address))
+        """Start a new thread to process the request."""
+        import thread
+        thread.start_new_thread(self.finish_request,
+                                (request, client_address))
 
 
 class ForkingUDPServer(ForkingMixIn, UDPServer): pass
@@ -354,27 +354,27 @@ class BaseRequestHandler:
     """
 
     def __init__(self, request, client_address, server):
-       self.request = request
-       self.client_address = client_address
-       self.server = server
-       try:
-           self.setup()
-           self.handle()
-           self.finish()
-       finally:
-           sys.exc_traceback = None    # Help garbage collection
+        self.request = request
+        self.client_address = client_address
+        self.server = server
+        try:
+            self.setup()
+            self.handle()
+            self.finish()
+        finally:
+            sys.exc_traceback = None    # Help garbage collection
 
     def setup(self):
-       pass
+        pass
 
     def __del__(self):
-       pass
+        pass
 
     def handle(self):
-       pass
+        pass
 
     def finish(self):
-       pass
+        pass
 
 
 # The following two classes make it possible to use the same service
@@ -390,12 +390,12 @@ class StreamRequestHandler(BaseRequestHandler):
     """Define self.rfile and self.wfile for stream sockets."""
 
     def setup(self):
-       self.connection = self.request
-       self.rfile = self.connection.makefile('rb', 0)
-       self.wfile = self.connection.makefile('wb', 0)
+        self.connection = self.request
+        self.rfile = self.connection.makefile('rb', 0)
+        self.wfile = self.connection.makefile('wb', 0)
 
     def finish(self):
-       self.wfile.flush()
+        self.wfile.flush()
 
 
 class DatagramRequestHandler(BaseRequestHandler):
@@ -403,10 +403,10 @@ class DatagramRequestHandler(BaseRequestHandler):
     """Define self.rfile and self.wfile for datagram sockets."""
 
     def setup(self):
-       import StringIO
-       self.packet, self.socket = self.request
-       self.rfile = StringIO.StringIO(self.packet)
-       self.wfile = StringIO.StringIO(self.packet)
+        import StringIO
+        self.packet, self.socket = self.request
+        self.rfile = StringIO.StringIO(self.packet)
+        self.wfile = StringIO.StringIO(self.packet)
 
     def finish(self):
-       self.socket.send(self.wfile.getvalue())
+        self.socket.send(self.wfile.getvalue())
index 3b9b157938f9b1b1a6534a7a90790acc95815a42..08f3161ca9a344c78e76dfdd263ecc604fc7e191 100644 (file)
@@ -4,30 +4,30 @@ class UserDict:
     def __init__(self): self.data = {}
     def __repr__(self): return repr(self.data)
     def __cmp__(self, dict):
-       if type(dict) == type(self.data):
-           return cmp(self.data, dict)
-       else:
-           return cmp(self.data, dict.data)
+        if type(dict) == type(self.data):
+            return cmp(self.data, dict)
+        else:
+            return cmp(self.data, dict.data)
     def __len__(self): return len(self.data)
     def __getitem__(self, key): return self.data[key]
     def __setitem__(self, key, item): self.data[key] = item
     def __delitem__(self, key): del self.data[key]
     def clear(self): return self.data.clear()
     def copy(self):
-       import copy
-       return copy.copy(self)
+        import copy
+        return copy.copy(self)
     def keys(self): return self.data.keys()
     def items(self): return self.data.items()
     def values(self): return self.data.values()
     def has_key(self, key): return self.data.has_key(key)
     def update(self, other):
-       if type(other) is type(self.data):
-           self.data.update(other)
-       else:
-           for k, v in other.items():
-               self.data[k] = v
+        if type(other) is type(self.data):
+            self.data.update(other)
+        else:
+            for k, v in other.items():
+                self.data[k] = v
     def get(self, key, failobj=None):
-       if self.data.has_key(key):
-           return self.data[key]
-       else:
-           return failobj
+        if self.data.has_key(key):
+            return self.data[key]
+        else:
+            return failobj
index e4cb217a05ae10859a83612927b1283c2d79e921..e3842e6aafd27f594031f7c9c28c6f170afe28fe 100755 (executable)
@@ -32,16 +32,16 @@ by a blank line.  The first section contains a number of headers,
 telling the client what kind of data is following.  Python code to
 generate a minimal header section looks like this:
 
-       print "Content-type: text/html" # HTML is following
-       print                           # blank line, end of headers
+        print "Content-type: text/html" # HTML is following
+        print                           # blank line, end of headers
 
 The second section is usually HTML, which allows the client software
 to display nicely formatted text with header, in-line images, etc.
 Here's Python code that prints a simple piece of HTML:
 
-       print "<TITLE>CGI script output</TITLE>"
-       print "<H1>This is my first CGI script</H1>"
-       print "Hello, world!"
+        print "<TITLE>CGI script output</TITLE>"
+        print "<H1>This is my first CGI script</H1>"
+        print "Hello, world!"
 
 It may not be fully legal HTML according to the letter of the
 standard, but any browser will understand it.
@@ -66,16 +66,16 @@ dictionary.  For instance, the following code (which assumes that the
 Content-type header and blank line have already been printed) checks that 
 the fields "name" and "addr" are both set to a non-empty string:
 
-       form = cgi.FieldStorage()
-       form_ok = 0
-       if form.has_key("name") and form.has_key("addr"):
-               if form["name"].value != "" and form["addr"].value != "":
-                       form_ok = 1
-       if not form_ok:
-               print "<H1>Error</H1>"
-               print "Please fill in the name and addr fields."
-               return
-       ...further form processing here...
+        form = cgi.FieldStorage()
+        form_ok = 0
+        if form.has_key("name") and form.has_key("addr"):
+                if form["name"].value != "" and form["addr"].value != "":
+                        form_ok = 1
+        if not form_ok:
+                print "<H1>Error</H1>"
+                print "Please fill in the name and addr fields."
+                return
+        ...further form processing here...
 
 Here the fields, accessed through form[key], are themselves instances
 of FieldStorage (or MiniFieldStorage, depending on the form encoding).
@@ -88,20 +88,20 @@ the same name), use the type() function to determine whether you have
 a single instance or a list of instances.  For example, here's code
 that concatenates any number of username fields, separated by commas:
 
-       username = form["username"]
-       if type(username) is type([]):
-               # Multiple username fields specified
-               usernames = ""
-               for item in username:
-                       if usernames:
-                               # Next item -- insert comma
-                               usernames = usernames + "," + item.value
-                       else:
-                               # First item -- don't insert comma
-                               usernames = item.value
-       else:
-               # Single username field specified
-               usernames = username.value
+        username = form["username"]
+        if type(username) is type([]):
+                # Multiple username fields specified
+                usernames = ""
+                for item in username:
+                        if usernames:
+                                # Next item -- insert comma
+                                usernames = usernames + "," + item.value
+                        else:
+                                # First item -- don't insert comma
+                                usernames = item.value
+        else:
+                # Single username field specified
+                usernames = username.value
 
 If a field represents an uploaded file, the value attribute reads the 
 entire file in memory as a string.  This may not be what you want.  You can 
@@ -109,14 +109,14 @@ test for an uploaded file by testing either the filename attribute or the
 file attribute.  You can then read the data at leasure from the file 
 attribute:
 
-       fileitem = form["userfile"]
-       if fileitem.file:
-               # It's an uploaded file; count lines
-               linecount = 0
-               while 1:
-                       line = fileitem.file.readline()
-                       if not line: break
-                       linecount = linecount + 1
+        fileitem = form["userfile"]
+        if fileitem.file:
+                # It's an uploaded file; count lines
+                linecount = 0
+                while 1:
+                        line = fileitem.file.readline()
+                        if not line: break
+                        linecount = linecount + 1
 
 The file upload draft standard entertains the possibility of uploading
 multiple files from one field (using a recursive multipart/*
@@ -216,7 +216,7 @@ Unix file mode should be 755 (use "chmod 755 filename").  Make sure
 that the first line of the script contains #! starting in column 1
 followed by the pathname of the Python interpreter, for instance:
 
-       #! /usr/local/bin/python
+        #! /usr/local/bin/python
 
 Make sure the Python interpreter exists and is executable by "others".
 
@@ -240,9 +240,9 @@ If you need to load modules from a directory which is not on Python's
 default module search path, you can change the path in your script,
 before importing other modules, e.g.:
 
-       import sys
-       sys.path.insert(0, "/usr/home/joe/lib/python")
-       sys.path.insert(0, "/usr/local/lib/python")
+        import sys
+        sys.path.insert(0, "/usr/home/joe/lib/python")
+        sys.path.insert(0, "/usr/local/lib/python")
 
 This way, the directory inserted last will be searched first!
 
@@ -278,7 +278,7 @@ Give it the right mode etc, and send it a request.  If it's installed
 in the standard cgi-bin directory, it should be possible to send it a
 request by entering a URL into your browser of the form:
 
-       http://yourhostname/cgi-bin/cgi.py?name=Joe+Blow&addr=At+Home
+        http://yourhostname/cgi-bin/cgi.py?name=Joe+Blow&addr=At+Home
 
 If this gives an error of type 404, the server cannot find the script
 -- perhaps you need to install it in a different directory.  If it
@@ -293,8 +293,8 @@ script, you should now be able to debug it.
 The next step could be to call the cgi module's test() function from
 your script: replace its main code with the single statement
 
-       cgi.test()
-       
+        cgi.test()
+        
 This should produce the same results as those gotten from installing
 the cgi.py file itself.
 
@@ -310,30 +310,30 @@ Fortunately, once you have managed to get your script to execute
 be printed.  The test() function below in this module is an example.
 Here are the rules:
 
-       1. Import the traceback module (before entering the
-          try-except!)
-       
-       2. Make sure you finish printing the headers and the blank
-          line early
-       
-       3. Assign sys.stderr to sys.stdout
-       
-       3. Wrap all remaining code in a try-except statement
-       
-       4. In the except clause, call traceback.print_exc()
+        1. Import the traceback module (before entering the
+           try-except!)
+        
+        2. Make sure you finish printing the headers and the blank
+           line early
+        
+        3. Assign sys.stderr to sys.stdout
+        
+        3. Wrap all remaining code in a try-except statement
+        
+        4. In the except clause, call traceback.print_exc()
 
 For example:
 
-       import sys
-       import traceback
-       print "Content-type: text/html"
-       print
-       sys.stderr = sys.stdout
-       try:
-               ...your code here...
-       except:
-               print "\n\n<PRE>"
-               traceback.print_exc()
+        import sys
+        import traceback
+        print "Content-type: text/html"
+        print
+        sys.stderr = sys.stdout
+        try:
+                ...your code here...
+        except:
+                print "\n\n<PRE>"
+                traceback.print_exc()
 
 Notes: The assignment to sys.stderr is needed because the traceback
 prints to sys.stderr.  The print "\n\n<PRE>" statement is necessary to
@@ -343,11 +343,11 @@ If you suspect that there may be a problem in importing the traceback
 module, you can use an even more robust approach (which only uses
 built-in modules):
 
-       import sys
-       sys.stderr = sys.stdout
-       print "Content-type: text/plain"
-       print
-       ...your code here...
+        import sys
+        sys.stderr = sys.stdout
+        print "Content-type: text/plain"
+        print
+        ...your code here...
 
 This relies on the Python interpreter to print the traceback.  The
 content type of the output is set to plain text, which disables all
@@ -428,8 +428,8 @@ from StringIO import StringIO
 # Logging support
 # ===============
 
-logfile = ""           # Filename to log to, if not empty
-logfp = None           # File object to log to, if not None
+logfile = ""            # Filename to log to, if not empty
+logfp = None            # File object to log to, if not None
 
 def initlog(*allargs):
     """Write a log message, if there is a log file.
@@ -456,14 +456,14 @@ def initlog(*allargs):
     """
     global logfp, log
     if logfile and not logfp:
-       try:
-           logfp = open(logfile, "a")
-       except IOError:
-           pass
+        try:
+            logfp = open(logfile, "a")
+        except IOError:
+            pass
     if not logfp:
-       log = nolog
+        log = nolog
     else:
-       log = dolog
+        log = dolog
     apply(log, allargs)
 
 def dolog(fmt, *args):
@@ -474,7 +474,7 @@ def nolog(*allargs):
     """Dummy function, assigned to log when logging is disabled."""
     pass
 
-log = initlog          # The current logging function
+log = initlog           # The current logging function
 
 
 # Parsing functions
@@ -491,49 +491,49 @@ def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
 
         fp              : file pointer; default: sys.stdin
 
-       environ         : environment dictionary; default: os.environ
+        environ         : environment dictionary; default: os.environ
 
         keep_blank_values: flag indicating whether blank values in
             URL encoded forms should be treated as blank strings.  
             A true value inicates that blanks should be retained as 
             blank strings.  The default false value indicates that
-           blank values are to be ignored and treated as if they were
-           not included.
+            blank values are to be ignored and treated as if they were
+            not included.
 
-       strict_parsing: flag indicating what to do with parsing errors.
-           If false (the default), errors are silently ignored.
-           If true, errors raise a ValueError exception.
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
     """
     if not fp:
-       fp = sys.stdin
+        fp = sys.stdin
     if not environ.has_key('REQUEST_METHOD'):
-       environ['REQUEST_METHOD'] = 'GET'       # For testing stand-alone
+        environ['REQUEST_METHOD'] = 'GET'       # For testing stand-alone
     if environ['REQUEST_METHOD'] == 'POST':
-       ctype, pdict = parse_header(environ['CONTENT_TYPE'])
-       if ctype == 'multipart/form-data':
-           return parse_multipart(fp, pdict)
-       elif ctype == 'application/x-www-form-urlencoded':
-           clength = string.atoi(environ['CONTENT_LENGTH'])
-           if maxlen and clength > maxlen:
-               raise ValueError, 'Maximum content length exceeded'
-           qs = fp.read(clength)
-       else:
-           qs = ''                     # Unknown content-type
-       if environ.has_key('QUERY_STRING'): 
-           if qs: qs = qs + '&'
-           qs = qs + environ['QUERY_STRING']
-       elif sys.argv[1:]: 
-           if qs: qs = qs + '&'
-           qs = qs + sys.argv[1]
-       environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+        ctype, pdict = parse_header(environ['CONTENT_TYPE'])
+        if ctype == 'multipart/form-data':
+            return parse_multipart(fp, pdict)
+        elif ctype == 'application/x-www-form-urlencoded':
+            clength = string.atoi(environ['CONTENT_LENGTH'])
+            if maxlen and clength > maxlen:
+                raise ValueError, 'Maximum content length exceeded'
+            qs = fp.read(clength)
+        else:
+            qs = ''                     # Unknown content-type
+        if environ.has_key('QUERY_STRING'): 
+            if qs: qs = qs + '&'
+            qs = qs + environ['QUERY_STRING']
+        elif sys.argv[1:]: 
+            if qs: qs = qs + '&'
+            qs = qs + sys.argv[1]
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
     elif environ.has_key('QUERY_STRING'):
-       qs = environ['QUERY_STRING']
+        qs = environ['QUERY_STRING']
     else:
-       if sys.argv[1:]:
-           qs = sys.argv[1]
-       else:
-           qs = ""
-       environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+        if sys.argv[1:]:
+            qs = sys.argv[1]
+        else:
+            qs = ""
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
     return parse_qs(qs, keep_blank_values, strict_parsing)
 
 
@@ -542,34 +542,34 @@ def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
 
         Arguments:
 
-       qs: URL-encoded query string to be parsed
+        qs: URL-encoded query string to be parsed
 
         keep_blank_values: flag indicating whether blank values in
             URL encoded queries should be treated as blank strings.  
             A true value inicates that blanks should be retained as 
             blank strings.  The default false value indicates that
-           blank values are to be ignored and treated as if they were
-           not included.
+            blank values are to be ignored and treated as if they were
+            not included.
 
-       strict_parsing: flag indicating what to do with parsing errors.
-           If false (the default), errors are silently ignored.
-           If true, errors raise a ValueError exception.
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
     """
     name_value_pairs = string.splitfields(qs, '&')
     dict = {}
     for name_value in name_value_pairs:
-       nv = string.splitfields(name_value, '=')
-       if len(nv) != 2:
-           if strict_parsing:
-               raise ValueError, "bad query field: %s" % `name_value`
-           continue
-       name = urllib.unquote(string.replace(nv[0], '+', ' '))
-       value = urllib.unquote(string.replace(nv[1], '+', ' '))
+        nv = string.splitfields(name_value, '=')
+        if len(nv) != 2:
+            if strict_parsing:
+                raise ValueError, "bad query field: %s" % `name_value`
+            continue
+        name = urllib.unquote(string.replace(nv[0], '+', ' '))
+        value = urllib.unquote(string.replace(nv[1], '+', ' '))
         if len(value) or keep_blank_values:
-           if dict.has_key (name):
-               dict[name].append(value)
-           else:
-               dict[name] = [value]
+            if dict.has_key (name):
+                dict[name].append(value)
+            else:
+                dict[name] = [value]
     return dict
 
 
@@ -595,71 +595,71 @@ def parse_multipart(fp, pdict):
 
     """
     if pdict.has_key('boundary'):
-       boundary = pdict['boundary']
+        boundary = pdict['boundary']
     else:
-       boundary = ""
+        boundary = ""
     nextpart = "--" + boundary
     lastpart = "--" + boundary + "--"
     partdict = {}
     terminator = ""
 
     while terminator != lastpart:
-       bytes = -1
-       data = None
-       if terminator:
-           # At start of next part.  Read headers first.
-           headers = mimetools.Message(fp)
-           clength = headers.getheader('content-length')
-           if clength:
-               try:
-                   bytes = string.atoi(clength)
-               except string.atoi_error:
-                   pass
-           if bytes > 0:
-               if maxlen and bytes > maxlen:
-                   raise ValueError, 'Maximum content length exceeded'
-               data = fp.read(bytes)
-           else:
-               data = ""
-       # Read lines until end of part.
-       lines = []
-       while 1:
-           line = fp.readline()
-           if not line:
-               terminator = lastpart # End outer loop
-               break
-           if line[:2] == "--":
-               terminator = string.strip(line)
-               if terminator in (nextpart, lastpart):
-                   break
-           lines.append(line)
-       # Done with part.
-       if data is None:
-           continue
-       if bytes < 0:
-           if lines:
-               # Strip final line terminator
-               line = lines[-1]
-               if line[-2:] == "\r\n":
-                   line = line[:-2]
-               elif line[-1:] == "\n":
-                   line = line[:-1]
-               lines[-1] = line
-               data = string.joinfields(lines, "")
-       line = headers['content-disposition']
-       if not line:
-           continue
-       key, params = parse_header(line)
-       if key != 'form-data':
-           continue
-       if params.has_key('name'):
-           name = params['name']
-       else:
-           continue
-       if partdict.has_key(name):
-           partdict[name].append(data)
-       else:
-           partdict[name] = [data]
+        bytes = -1
+        data = None
+        if terminator:
+            # At start of next part.  Read headers first.
+            headers = mimetools.Message(fp)
+            clength = headers.getheader('content-length')
+            if clength:
+                try:
+                    bytes = string.atoi(clength)
+                except string.atoi_error:
+                    pass
+            if bytes > 0:
+                if maxlen and bytes > maxlen:
+                    raise ValueError, 'Maximum content length exceeded'
+                data = fp.read(bytes)
+            else:
+                data = ""
+        # Read lines until end of part.
+        lines = []
+        while 1:
+            line = fp.readline()
+            if not line:
+                terminator = lastpart # End outer loop
+                break
+            if line[:2] == "--":
+                terminator = string.strip(line)
+                if terminator in (nextpart, lastpart):
+                    break
+            lines.append(line)
+        # Done with part.
+        if data is None:
+            continue
+        if bytes < 0:
+            if lines:
+                # Strip final line terminator
+                line = lines[-1]
+                if line[-2:] == "\r\n":
+                    line = line[:-2]
+                elif line[-1:] == "\n":
+                    line = line[:-1]
+                lines[-1] = line
+                data = string.joinfields(lines, "")
+        line = headers['content-disposition']
+        if not line:
+            continue
+        key, params = parse_header(line)
+        if key != 'form-data':
+            continue
+        if params.has_key('name'):
+            name = params['name']
+        else:
+            continue
+        if partdict.has_key(name):
+            partdict[name].append(data)
+        else:
+            partdict[name] = [data]
 
     return partdict
 
@@ -675,13 +675,13 @@ def parse_header(line):
     del plist[0]
     pdict = {}
     for p in plist:
-       i = string.find(p, '=')
-       if i >= 0:
-           name = string.lower(string.strip(p[:i]))
-           value = string.strip(p[i+1:])
-           if len(value) >= 2 and value[0] == value[-1] == '"':
-               value = value[1:-1]
-           pdict[name] = value
+        i = string.find(p, '=')
+        if i >= 0:
+            name = string.lower(string.strip(p[:i]))
+            value = string.strip(p[i+1:])
+            if len(value) >= 2 and value[0] == value[-1] == '"':
+                value = value[1:-1]
+            pdict[name] = value
     return key, pdict
 
 
@@ -703,14 +703,14 @@ class MiniFieldStorage:
     headers = {}
 
     def __init__(self, name, value):
-       """Constructor from field name and value."""
-       self.name = name
-       self.value = value
+        """Constructor from field name and value."""
+        self.name = name
+        self.value = value
         # self.file = StringIO(value)
 
     def __repr__(self):
-       """Return printable representation."""
-       return "MiniFieldStorage(%s, %s)" % (`self.name`, `self.value`)
+        """Return printable representation."""
+        return "MiniFieldStorage(%s, %s)" % (`self.name`, `self.value`)
 
 
 class FieldStorage:
@@ -727,26 +727,26 @@ class FieldStorage:
     name: the field name, if specified; otherwise None
 
     filename: the filename, if specified; otherwise None; this is the
-       client side filename, *not* the file name on which it is
-       stored (that's a temporary file you don't deal with)
+        client side filename, *not* the file name on which it is
+        stored (that's a temporary file you don't deal with)
 
     value: the value as a *string*; for file uploads, this
-       transparently reads the file every time you request the value
+        transparently reads the file every time you request the value
 
     file: the file(-like) object from which you can read the data;
-       None if the data is stored a simple string
+        None if the data is stored a simple string
 
     type: the content-type, or None if not specified
 
     type_options: dictionary of options specified on the content-type
-       line
+        line
 
     disposition: content-disposition, or None if not specified
 
     disposition_options: dictionary of corresponding options
 
     headers: a dictionary(-like) object (sometimes rfc822.Message or a
-       subclass thereof) containing *all* headers
+        subclass thereof) containing *all* headers
 
     The class is subclassable, mostly for the purpose of overriding
     the make_file() method, which is called internally to come up with
@@ -757,293 +757,293 @@ class FieldStorage:
     """
 
     def __init__(self, fp=None, headers=None, outerboundary="",
-                environ=os.environ, keep_blank_values=0, strict_parsing=0):
-       """Constructor.  Read multipart/* until last part.
+                 environ=os.environ, keep_blank_values=0, strict_parsing=0):
+        """Constructor.  Read multipart/* until last part.
 
-       Arguments, all optional:
+        Arguments, all optional:
 
-       fp              : file pointer; default: sys.stdin
+        fp              : file pointer; default: sys.stdin
 
-       headers         : header dictionary-like object; default:
-           taken from environ as per CGI spec
+        headers         : header dictionary-like object; default:
+            taken from environ as per CGI spec
 
         outerboundary   : terminating multipart boundary
-           (for internal use only)
+            (for internal use only)
 
-       environ         : environment dictionary; default: os.environ
+        environ         : environment dictionary; default: os.environ
 
         keep_blank_values: flag indicating whether blank values in
             URL encoded forms should be treated as blank strings.  
             A true value inicates that blanks should be retained as 
             blank strings.  The default false value indicates that
-           blank values are to be ignored and treated as if they were
-           not included.
-
-       strict_parsing: flag indicating what to do with parsing errors.
-           If false (the default), errors are silently ignored.
-           If true, errors raise a ValueError exception.
-
-       """
-       method = 'GET'
-       self.keep_blank_values = keep_blank_values
-       self.strict_parsing = strict_parsing
-       if environ.has_key('REQUEST_METHOD'):
-           method = string.upper(environ['REQUEST_METHOD'])
-       if not fp and method == 'GET':
-           if environ.has_key('QUERY_STRING'):
-               qs = environ['QUERY_STRING']
-           elif sys.argv[1:]:
-               qs = sys.argv[1]
-           else:
-               qs = ""
-           fp = StringIO(qs)
-           if headers is None:
-               headers = {'content-type':
-                          "application/x-www-form-urlencoded"}
-       if headers is None:
-           headers = {}
-           if environ.has_key('CONTENT_TYPE'):
-               headers['content-type'] = environ['CONTENT_TYPE']
-           if environ.has_key('CONTENT_LENGTH'):
-               headers['content-length'] = environ['CONTENT_LENGTH']
-       self.fp = fp or sys.stdin
-       self.headers = headers
-       self.outerboundary = outerboundary
-
-       # Process content-disposition header
-       cdisp, pdict = "", {}
-       if self.headers.has_key('content-disposition'):
-           cdisp, pdict = parse_header(self.headers['content-disposition'])
-       self.disposition = cdisp
-       self.disposition_options = pdict
-       self.name = None
-       if pdict.has_key('name'):
-           self.name = pdict['name']
-       self.filename = None
-       if pdict.has_key('filename'):
-           self.filename = pdict['filename']
-
-       # Process content-type header
-       ctype, pdict = "text/plain", {}
-       if self.headers.has_key('content-type'):
-           ctype, pdict = parse_header(self.headers['content-type'])
-       self.type = ctype
-       self.type_options = pdict
-       self.innerboundary = ""
-       if pdict.has_key('boundary'):
-           self.innerboundary = pdict['boundary']
-       clen = -1
-       if self.headers.has_key('content-length'):
-           try:
-               clen = string.atoi(self.headers['content-length'])
-           except:
-               pass
-           if maxlen and clen > maxlen:
-               raise ValueError, 'Maximum content length exceeded'
-       self.length = clen
-
-       self.list = self.file = None
-       self.done = 0
-       self.lines = []
-       if ctype == 'application/x-www-form-urlencoded':
-           self.read_urlencoded()
-       elif ctype[:10] == 'multipart/':
-           self.read_multi()
-       else:
-           self.read_single()
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+
+        """
+        method = 'GET'
+        self.keep_blank_values = keep_blank_values
+        self.strict_parsing = strict_parsing
+        if environ.has_key('REQUEST_METHOD'):
+            method = string.upper(environ['REQUEST_METHOD'])
+        if not fp and method == 'GET':
+            if environ.has_key('QUERY_STRING'):
+                qs = environ['QUERY_STRING']
+            elif sys.argv[1:]:
+                qs = sys.argv[1]
+            else:
+                qs = ""
+            fp = StringIO(qs)
+            if headers is None:
+                headers = {'content-type':
+                           "application/x-www-form-urlencoded"}
+        if headers is None:
+            headers = {}
+            if environ.has_key('CONTENT_TYPE'):
+                headers['content-type'] = environ['CONTENT_TYPE']
+            if environ.has_key('CONTENT_LENGTH'):
+                headers['content-length'] = environ['CONTENT_LENGTH']
+        self.fp = fp or sys.stdin
+        self.headers = headers
+        self.outerboundary = outerboundary
+
+        # Process content-disposition header
+        cdisp, pdict = "", {}
+        if self.headers.has_key('content-disposition'):
+            cdisp, pdict = parse_header(self.headers['content-disposition'])
+        self.disposition = cdisp
+        self.disposition_options = pdict
+        self.name = None
+        if pdict.has_key('name'):
+            self.name = pdict['name']
+        self.filename = None
+        if pdict.has_key('filename'):
+            self.filename = pdict['filename']
+
+        # Process content-type header
+        ctype, pdict = "text/plain", {}
+        if self.headers.has_key('content-type'):
+            ctype, pdict = parse_header(self.headers['content-type'])
+        self.type = ctype
+        self.type_options = pdict
+        self.innerboundary = ""
+        if pdict.has_key('boundary'):
+            self.innerboundary = pdict['boundary']
+        clen = -1
+        if self.headers.has_key('content-length'):
+            try:
+                clen = string.atoi(self.headers['content-length'])
+            except:
+                pass
+            if maxlen and clen > maxlen:
+                raise ValueError, 'Maximum content length exceeded'
+        self.length = clen
+
+        self.list = self.file = None
+        self.done = 0
+        self.lines = []
+        if ctype == 'application/x-www-form-urlencoded':
+            self.read_urlencoded()
+        elif ctype[:10] == 'multipart/':
+            self.read_multi()
+        else:
+            self.read_single()
 
     def __repr__(self):
-       """Return a printable representation."""
-       return "FieldStorage(%s, %s, %s)" % (
-               `self.name`, `self.filename`, `self.value`)
+        """Return a printable representation."""
+        return "FieldStorage(%s, %s, %s)" % (
+                `self.name`, `self.filename`, `self.value`)
 
     def __getattr__(self, name):
-       if name != 'value':
-           raise AttributeError, name
-       if self.file:
-           self.file.seek(0)
-           value = self.file.read()
-           self.file.seek(0)
-       elif self.list is not None:
-           value = self.list
-       else:
-           value = None
-       return value
+        if name != 'value':
+            raise AttributeError, name
+        if self.file:
+            self.file.seek(0)
+            value = self.file.read()
+            self.file.seek(0)
+        elif self.list is not None:
+            value = self.list
+        else:
+            value = None
+        return value
 
     def __getitem__(self, key):
-       """Dictionary style indexing."""
-       if self.list is None:
-           raise TypeError, "not indexable"
-       found = []
-       for item in self.list:
-           if item.name == key: found.append(item)
-       if not found:
-           raise KeyError, key
-       if len(found) == 1:
-           return found[0]
-       else:
-           return found
+        """Dictionary style indexing."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        found = []
+        for item in self.list:
+            if item.name == key: found.append(item)
+        if not found:
+            raise KeyError, key
+        if len(found) == 1:
+            return found[0]
+        else:
+            return found
 
     def keys(self):
-       """Dictionary style keys() method."""
-       if self.list is None:
-           raise TypeError, "not indexable"
-       keys = []
-       for item in self.list:
-           if item.name not in keys: keys.append(item.name)
-       return keys
+        """Dictionary style keys() method."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        keys = []
+        for item in self.list:
+            if item.name not in keys: keys.append(item.name)
+        return keys
 
     def has_key(self, key):
-       """Dictionary style has_key() method."""
-       if self.list is None:
-           raise TypeError, "not indexable"
-       for item in self.list:
-           if item.name == key: return 1
-       return 0
+        """Dictionary style has_key() method."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        for item in self.list:
+            if item.name == key: return 1
+        return 0
 
     def __len__(self):
-       """Dictionary style len(x) support."""
-       return len(self.keys())
+        """Dictionary style len(x) support."""
+        return len(self.keys())
 
     def read_urlencoded(self):
-       """Internal: read data in query string format."""
-       qs = self.fp.read(self.length)
-       dict = parse_qs(qs, self.keep_blank_values, self.strict_parsing)
-       self.list = []
-       for key, valuelist in dict.items():
-           for value in valuelist:
-               self.list.append(MiniFieldStorage(key, value))
-       self.skip_lines()
+        """Internal: read data in query string format."""
+        qs = self.fp.read(self.length)
+        dict = parse_qs(qs, self.keep_blank_values, self.strict_parsing)
+        self.list = []
+        for key, valuelist in dict.items():
+            for value in valuelist:
+                self.list.append(MiniFieldStorage(key, value))
+        self.skip_lines()
 
     def read_multi(self):
-       """Internal: read a part that is itself multipart."""
-       self.list = []
-       part = self.__class__(self.fp, {}, self.innerboundary)
-       # Throw first part away
-       while not part.done:
-           headers = rfc822.Message(self.fp)
-           part = self.__class__(self.fp, headers, self.innerboundary)
-           self.list.append(part)
-       self.skip_lines()
+        """Internal: read a part that is itself multipart."""
+        self.list = []
+        part = self.__class__(self.fp, {}, self.innerboundary)
+        # Throw first part away
+        while not part.done:
+            headers = rfc822.Message(self.fp)
+            part = self.__class__(self.fp, headers, self.innerboundary)
+            self.list.append(part)
+        self.skip_lines()
 
     def read_single(self):
-       """Internal: read an atomic part."""
-       if self.length >= 0:
-           self.read_binary()
-           self.skip_lines()
-       else:
-           self.read_lines()
-       self.file.seek(0)
+        """Internal: read an atomic part."""
+        if self.length >= 0:
+            self.read_binary()
+            self.skip_lines()
+        else:
+            self.read_lines()
+        self.file.seek(0)
 
-    bufsize = 8*1024           # I/O buffering size for copy to file
+    bufsize = 8*1024            # I/O buffering size for copy to file
 
     def read_binary(self):
-       """Internal: read binary data."""
-       self.file = self.make_file('b')
-       todo = self.length
-       if todo >= 0:
-           while todo > 0:
-               data = self.fp.read(min(todo, self.bufsize))
-               if not data:
-                   self.done = -1
-                   break
-               self.file.write(data)
-               todo = todo - len(data)
+        """Internal: read binary data."""
+        self.file = self.make_file('b')
+        todo = self.length
+        if todo >= 0:
+            while todo > 0:
+                data = self.fp.read(min(todo, self.bufsize))
+                if not data:
+                    self.done = -1
+                    break
+                self.file.write(data)
+                todo = todo - len(data)
 
     def read_lines(self):
-       """Internal: read lines until EOF or outerboundary."""
-       self.file = self.make_file('')
-       if self.outerboundary:
-           self.read_lines_to_outerboundary()
-       else:
-           self.read_lines_to_eof()
+        """Internal: read lines until EOF or outerboundary."""
+        self.file = self.make_file('')
+        if self.outerboundary:
+            self.read_lines_to_outerboundary()
+        else:
+            self.read_lines_to_eof()
 
     def read_lines_to_eof(self):
-       """Internal: read lines until EOF."""
-       while 1:
-           line = self.fp.readline()
-           if not line:
-               self.done = -1
-               break
-           self.lines.append(line)
-           self.file.write(line)
+        """Internal: read lines until EOF."""
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            self.lines.append(line)
+            self.file.write(line)
 
     def read_lines_to_outerboundary(self):
-       """Internal: read lines until outerboundary."""
-       next = "--" + self.outerboundary
-       last = next + "--"
-       delim = ""
-       while 1:
-           line = self.fp.readline()
-           if not line:
-               self.done = -1
-               break
-           self.lines.append(line)
-           if line[:2] == "--":
-               strippedline = string.strip(line)
-               if strippedline == next:
-                   break
-               if strippedline == last:
-                   self.done = 1
-                   break
-           odelim = delim
-           if line[-2:] == "\r\n":
-               delim = "\r\n"
-               line = line[:-2]
-           elif line[-1] == "\n":
-               delim = "\n"
-               line = line[:-1]
-           else:
-               delim = ""
-           self.file.write(odelim + line)
+        """Internal: read lines until outerboundary."""
+        next = "--" + self.outerboundary
+        last = next + "--"
+        delim = ""
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            self.lines.append(line)
+            if line[:2] == "--":
+                strippedline = string.strip(line)
+                if strippedline == next:
+                    break
+                if strippedline == last:
+                    self.done = 1
+                    break
+            odelim = delim
+            if line[-2:] == "\r\n":
+                delim = "\r\n"
+                line = line[:-2]
+            elif line[-1] == "\n":
+                delim = "\n"
+                line = line[:-1]
+            else:
+                delim = ""
+            self.file.write(odelim + line)
 
     def skip_lines(self):
-       """Internal: skip lines until outer boundary if defined."""
-       if not self.outerboundary or self.done:
-           return
-       next = "--" + self.outerboundary
-       last = next + "--"
-       while 1:
-           line = self.fp.readline()
-           if not line:
-               self.done = -1
-               break
-           self.lines.append(line)
-           if line[:2] == "--":
-               strippedline = string.strip(line)
-               if strippedline == next:
-                   break
-               if strippedline == last:
-                   self.done = 1
-                   break
+        """Internal: skip lines until outer boundary if defined."""
+        if not self.outerboundary or self.done:
+            return
+        next = "--" + self.outerboundary
+        last = next + "--"
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            self.lines.append(line)
+            if line[:2] == "--":
+                strippedline = string.strip(line)
+                if strippedline == next:
+                    break
+                if strippedline == last:
+                    self.done = 1
+                    break
 
     def make_file(self, binary=None):
-       """Overridable: return a readable & writable file.
+        """Overridable: return a readable & writable file.
 
-       The file will be used as follows:
-       - data is written to it
-       - seek(0)
-       - data is read from it
+        The file will be used as follows:
+        - data is written to it
+        - seek(0)
+        - data is read from it
 
-       The 'binary' argument is unused -- the file is always opened
-       in binary mode.
+        The 'binary' argument is unused -- the file is always opened
+        in binary mode.
 
-       This version opens a temporary file for reading and writing,
-       and immediately deletes (unlinks) it.  The trick (on Unix!) is
-       that the file can still be used, but it can't be opened by
-       another process, and it will automatically be deleted when it
-       is closed or when the current process terminates.
+        This version opens a temporary file for reading and writing,
+        and immediately deletes (unlinks) it.  The trick (on Unix!) is
+        that the file can still be used, but it can't be opened by
+        another process, and it will automatically be deleted when it
+        is closed or when the current process terminates.
 
-       If you want a more permanent file, you derive a class which
-       overrides this method.  If you want a visible temporary file
-       that is nevertheless automatically deleted when the script
-       terminates, try defining a __del__ method in a derived class
-       which unlinks the temporary files you have created.
+        If you want a more permanent file, you derive a class which
+        overrides this method.  If you want a visible temporary file
+        that is nevertheless automatically deleted when the script
+        terminates, try defining a __del__ method in a derived class
+        which unlinks the temporary files you have created.
 
-       """
-       import tempfile
-       return tempfile.TemporaryFile("w+b")
-       
+        """
+        import tempfile
+        return tempfile.TemporaryFile("w+b")
+        
 
 
 # Backwards Compatibility Classes
@@ -1064,19 +1064,19 @@ class FormContentDict:
     """
     def __init__(self, environ=os.environ):
         self.dict = parse(environ=environ)
-       self.query_string = environ['QUERY_STRING']
+        self.query_string = environ['QUERY_STRING']
     def __getitem__(self,key):
-       return self.dict[key]
+        return self.dict[key]
     def keys(self):
-       return self.dict.keys()
+        return self.dict.keys()
     def has_key(self, key):
-       return self.dict.has_key(key)
+        return self.dict.has_key(key)
     def values(self):
-       return self.dict.values()
+        return self.dict.values()
     def items(self):
-       return self.dict.items() 
+        return self.dict.items() 
     def __len__( self ):
-       return len(self.dict)
+        return len(self.dict)
 
 
 class SvFormContentDict(FormContentDict):
@@ -1092,76 +1092,76 @@ class SvFormContentDict(FormContentDict):
 
     """
     def __getitem__(self, key):
-       if len(self.dict[key]) > 1: 
-           raise IndexError, 'expecting a single value' 
-       return self.dict[key][0]
+        if len(self.dict[key]) > 1: 
+            raise IndexError, 'expecting a single value' 
+        return self.dict[key][0]
     def getlist(self, key):
-       return self.dict[key]
+        return self.dict[key]
     def values(self):
-       lis = []
-       for each in self.dict.values(): 
-           if len( each ) == 1 : 
-               lis.append(each[0])
-           else: lis.append(each)
-       return lis
+        lis = []
+        for each in self.dict.values(): 
+            if len( each ) == 1 : 
+                lis.append(each[0])
+            else: lis.append(each)
+        return lis
     def items(self):
-       lis = []
-       for key,value in self.dict.items():
-           if len(value) == 1 :
-               lis.append((key, value[0]))
-           else:       lis.append((key, value))
-       return lis
+        lis = []
+        for key,value in self.dict.items():
+            if len(value) == 1 :
+                lis.append((key, value[0]))
+            else:       lis.append((key, value))
+        return lis
 
 
 class InterpFormContentDict(SvFormContentDict):
     """This class is present for backwards compatibility only.""" 
     def __getitem__( self, key ):
-       v = SvFormContentDict.__getitem__( self, key )
-       if v[0] in string.digits+'+-.' : 
-           try:  return  string.atoi( v ) 
-           except ValueError:
-               try:    return string.atof( v )
-               except ValueError: pass
-       return string.strip(v)
+        v = SvFormContentDict.__getitem__( self, key )
+        if v[0] in string.digits+'+-.' : 
+            try:  return  string.atoi( v ) 
+            except ValueError:
+                try:    return string.atof( v )
+                except ValueError: pass
+        return string.strip(v)
     def values( self ):
-       lis = [] 
-       for key in self.keys():
-           try:
-               lis.append( self[key] )
-           except IndexError:
-               lis.append( self.dict[key] )
-       return lis
+        lis = [] 
+        for key in self.keys():
+            try:
+                lis.append( self[key] )
+            except IndexError:
+                lis.append( self.dict[key] )
+        return lis
     def items( self ):
-       lis = [] 
-       for key in self.keys():
-           try:
-               lis.append( (key, self[key]) )
-           except IndexError:
-               lis.append( (key, self.dict[key]) )
-       return lis
+        lis = [] 
+        for key in self.keys():
+            try:
+                lis.append( (key, self[key]) )
+            except IndexError:
+                lis.append( (key, self.dict[key]) )
+        return lis
 
 
 class FormContent(FormContentDict):
     """This class is present for backwards compatibility only.""" 
     def values(self, key):
-       if self.dict.has_key(key) :return self.dict[key]
-       else: return None
+        if self.dict.has_key(key) :return self.dict[key]
+        else: return None
     def indexed_value(self, key, location):
-       if self.dict.has_key(key):
-           if len (self.dict[key]) > location:
-               return self.dict[key][location]
-           else: return None
-       else: return None
+        if self.dict.has_key(key):
+            if len (self.dict[key]) > location:
+                return self.dict[key][location]
+            else: return None
+        else: return None
     def value(self, key):
-       if self.dict.has_key(key): return self.dict[key][0]
-       else: return None
+        if self.dict.has_key(key): return self.dict[key][0]
+        else: return None
     def length(self, key):
-       return len(self.dict[key])
+        return len(self.dict[key])
     def stripped(self, key):
-       if self.dict.has_key(key): return string.strip(self.dict[key][0])
-       else: return None
+        if self.dict.has_key(key): return string.strip(self.dict[key][0])
+        else: return None
     def pars(self):
-       return self.dict
+        return self.dict
 
 
 # Test/debug code
@@ -1179,46 +1179,46 @@ def test(environ=os.environ):
     print
     sys.stderr = sys.stdout
     try:
-       form = FieldStorage()   # Replace with other classes to test those
-       print_form(form)
+        form = FieldStorage()   # Replace with other classes to test those
+        print_form(form)
         print_environ(environ)
-       print_directory()
-       print_arguments()
-       print_environ_usage()
-       def f():
-           exec "testing print_exception() -- <I>italics?</I>"
-       def g(f=f):
-           f()
-       print "<H3>What follows is a test, not an actual exception:</H3>"
-       g()
+        print_directory()
+        print_arguments()
+        print_environ_usage()
+        def f():
+            exec "testing print_exception() -- <I>italics?</I>"
+        def g(f=f):
+            f()
+        print "<H3>What follows is a test, not an actual exception:</H3>"
+        g()
     except:
-       print_exception()
+        print_exception()
 
     # Second try with a small maxlen...
     global maxlen
     maxlen = 50
     try:
-       form = FieldStorage()   # Replace with other classes to test those
-       print_form(form)
-       print_environ(environ)
-       print_directory()
-       print_arguments()
-       print_environ_usage()
+        form = FieldStorage()   # Replace with other classes to test those
+        print_form(form)
+        print_environ(environ)
+        print_directory()
+        print_arguments()
+        print_environ_usage()
     except:
-       print_exception()
+        print_exception()
 
 def print_exception(type=None, value=None, tb=None, limit=None):
     if type is None:
-       type, value, tb = sys.exc_info()
+        type, value, tb = sys.exc_info()
     import traceback
     print
     print "<H3>Traceback (innermost last):</H3>"
     list = traceback.format_tb(tb, limit) + \
-          traceback.format_exception_only(type, value)
+           traceback.format_exception_only(type, value)
     print "<PRE>%s<B>%s</B></PRE>" % (
-       escape(string.join(list[:-1], "")),
-       escape(list[-1]),
-       )
+        escape(string.join(list[:-1], "")),
+        escape(list[-1]),
+        )
     del tb
 
 def print_environ(environ=os.environ):
@@ -1229,7 +1229,7 @@ def print_environ(environ=os.environ):
     print "<H3>Shell Environment:</H3>"
     print "<DL>"
     for key in keys:
-       print "<DT>", escape(key), "<DD>", escape(environ[key])
+        print "<DT>", escape(key), "<DD>", escape(environ[key])
     print "</DL>" 
     print
 
@@ -1241,10 +1241,10 @@ def print_form(form):
     print "<H3>Form Contents:</H3>"
     print "<DL>"
     for key in keys:
-       print "<DT>" + escape(key) + ":",
-       value = form[key]
-       print "<i>" + escape(`type(value)`) + "</i>"
-       print "<DD>" + escape(`value`)
+        print "<DT>" + escape(key) + ":",
+        value = form[key]
+        print "<i>" + escape(`type(value)`) + "</i>"
+        print "<DD>" + escape(`value`)
     print "</DL>"
     print
 
@@ -1253,11 +1253,11 @@ def print_directory():
     print
     print "<H3>Current Working Directory:</H3>"
     try:
-       pwd = os.getcwd()
+        pwd = os.getcwd()
     except os.error, msg:
-       print "os.error:", escape(str(msg))
+        print "os.error:", escape(str(msg))
     else:
-       print escape(pwd)
+        print escape(pwd)
     print
 
 def print_arguments():
@@ -1316,11 +1316,11 @@ environment as well.  Here are some common variable names:
 
 def escape(s, quote=None):
     """Replace special characters '&', '<' and '>' by SGML entities."""
-    s = string.replace(s, "&", "&amp;")        # Must be done first!
+    s = string.replace(s, "&", "&amp;") # Must be done first!
     s = string.replace(s, "<", "&lt;")
     s = string.replace(s, ">", "&gt;",)
     if quote:
-       s = string.replace(s, '"', "&quot;")
+        s = string.replace(s, '"', "&quot;")
     return s
 
 
index 9b40ed97b9641766dc6e3afdb9cf567c09a35a5b..d0ff4bf2bd16fa3dc98a63e198135c0d63a49cd6 100644 (file)
@@ -32,32 +32,32 @@ def compile_command(source, filename="<input>", symbol="single"):
     code = code1 = code2 = None
 
     try:
-       code = compile(source, filename, symbol)
+        code = compile(source, filename, symbol)
     except SyntaxError, err:
-       pass
+        pass
 
     try:
-       code1 = compile(source + "\n", filename, symbol)
+        code1 = compile(source + "\n", filename, symbol)
     except SyntaxError, err1:
-       pass
+        pass
 
     try:
-       code2 = compile(source + "\n\n", filename, symbol)
+        code2 = compile(source + "\n\n", filename, symbol)
     except SyntaxError, err2:
-       pass
+        pass
 
     if code:
-       return code
+        return code
     try:
-       e1 = err1.__dict__
+        e1 = err1.__dict__
     except AttributeError:
-       e1 = err1
+        e1 = err1
     try:
-       e2 = err2.__dict__
+        e2 = err2.__dict__
     except AttributeError:
-       e2 = err2
+        e2 = err2
     if not code1 and e1 == e2:
-       raise SyntaxError, err1
+        raise SyntaxError, err1
 
 
 def interact(banner=None, readfunc=raw_input, local=None):
@@ -70,41 +70,41 @@ def interact(banner=None, readfunc=raw_input, local=None):
     sys.ps1 = '>>> '
     sys.ps2 = '... '
     if banner:
-       print banner
+        print banner
     else:
-       print "Python Interactive Console", sys.version
-       print sys.copyright
+        print "Python Interactive Console", sys.version
+        print sys.copyright
     buf = []
     while 1:
-       if buf: prompt = sys.ps2
-       else: prompt = sys.ps1
-       try: line = readfunc(prompt)
-       except KeyboardInterrupt:
-           print "\nKeyboardInterrupt"
-           buf = []
-           continue
-       except EOFError: break
-       buf.append(line)
-       try: x = compile_command(string.join(buf, "\n"))
-       except SyntaxError:
-           traceback.print_exc(0)
-           buf = []
-           continue
-       if x == None: continue
-       else:
-           try: exec x in local
-           except:
-               exc_type, exc_value, exc_traceback = \
-                       sys.exc_type, sys.exc_value, \
-                       sys.exc_traceback
-               l = len(traceback.extract_tb(sys.exc_traceback))
-               try: 1/0
-               except:
-                   m = len(traceback.extract_tb(
-                           sys.exc_traceback))
-               traceback.print_exception(exc_type,
-                       exc_value, exc_traceback, l-m)
-           buf = []
-               
+        if buf: prompt = sys.ps2
+        else: prompt = sys.ps1
+        try: line = readfunc(prompt)
+        except KeyboardInterrupt:
+            print "\nKeyboardInterrupt"
+            buf = []
+            continue
+        except EOFError: break
+        buf.append(line)
+        try: x = compile_command(string.join(buf, "\n"))
+        except SyntaxError:
+            traceback.print_exc(0)
+            buf = []
+            continue
+        if x == None: continue
+        else:
+            try: exec x in local
+            except:
+                exc_type, exc_value, exc_traceback = \
+                        sys.exc_type, sys.exc_value, \
+                        sys.exc_traceback
+                l = len(traceback.extract_tb(sys.exc_traceback))
+                try: 1/0
+                except:
+                    m = len(traceback.extract_tb(
+                            sys.exc_traceback))
+                traceback.print_exception(exc_type,
+                        exc_value, exc_traceback, l-m)
+            buf = []
+                
 if __name__ == '__main__':
     interact()
index 8ef854560adfcce169d96a051f03b1d1da1def69..12acc364087e0f3f35f991f4678c408e713cbcab 100644 (file)
@@ -72,11 +72,11 @@ def mk2arg(head, x):
 #
 def mkarg(x):
     if '\'' not in x:
-       return ' \'' + x + '\''
+        return ' \'' + x + '\''
     s = ' "'
     for c in x:
-       if c in '\\$"`':
-           s = s + '\\'
-       s = s + c
+        if c in '\\$"`':
+            s = s + '\\'
+        s = s + c
     s = s + '"'
     return s
index f40d0ca4efe46f4dd237e8a2f0bb79afb8ab1052..f695b3aa8415d2a1888ba511ea2cf92a64acd06b 100644 (file)
@@ -29,36 +29,36 @@ def compile_dir(dir, maxlevels=10, ddir=None):
     """
     print 'Listing', dir, '...'
     try:
-       names = os.listdir(dir)
+        names = os.listdir(dir)
     except os.error:
-       print "Can't list", dir
-       names = []
+        print "Can't list", dir
+        names = []
     names.sort()
     for name in names:
-       fullname = os.path.join(dir, name)
-       if ddir:
-           dfile = os.path.join(ddir, name)
-       else:
-           dfile = None
-       if os.path.isfile(fullname):
-           head, tail = name[:-3], name[-3:]
-           if tail == '.py':
-               print 'Compiling', fullname, '...'
-               try:
-                   py_compile.compile(fullname, None, dfile)
-               except KeyboardInterrupt:
-                   raise KeyboardInterrupt
-               except:
-                   if type(sys.exc_type) == type(''):
-                       exc_type_name = sys.exc_type
-                   else: exc_type_name = sys.exc_type.__name__
-                   print 'Sorry:', exc_type_name + ':',
-                   print sys.exc_value
-       elif maxlevels > 0 and \
-            name != os.curdir and name != os.pardir and \
-            os.path.isdir(fullname) and \
-            not os.path.islink(fullname):
-           compile_dir(fullname, maxlevels - 1, dfile)
+        fullname = os.path.join(dir, name)
+        if ddir:
+            dfile = os.path.join(ddir, name)
+        else:
+            dfile = None
+        if os.path.isfile(fullname):
+            head, tail = name[:-3], name[-3:]
+            if tail == '.py':
+                print 'Compiling', fullname, '...'
+                try:
+                    py_compile.compile(fullname, None, dfile)
+                except KeyboardInterrupt:
+                    raise KeyboardInterrupt
+                except:
+                    if type(sys.exc_type) == type(''):
+                        exc_type_name = sys.exc_type
+                    else: exc_type_name = sys.exc_type.__name__
+                    print 'Sorry:', exc_type_name + ':',
+                    print sys.exc_value
+        elif maxlevels > 0 and \
+             name != os.curdir and name != os.pardir and \
+             os.path.isdir(fullname) and \
+             not os.path.islink(fullname):
+            compile_dir(fullname, maxlevels - 1, dfile)
 
 def compile_path(skip_curdir=1, maxlevels=0):
     """Byte-compile all module on sys.path.
@@ -70,40 +70,40 @@ def compile_path(skip_curdir=1, maxlevels=0):
 
     """
     for dir in sys.path:
-       if (not dir or dir == os.curdir) and skip_curdir:
-           print 'Skipping current directory'
-       else:
-           compile_dir(dir, maxlevels)
+        if (not dir or dir == os.curdir) and skip_curdir:
+            print 'Skipping current directory'
+        else:
+            compile_dir(dir, maxlevels)
 
 def main():
     """Script main program."""
     import getopt
     try:
-       opts, args = getopt.getopt(sys.argv[1:], 'ld:')
+        opts, args = getopt.getopt(sys.argv[1:], 'ld:')
     except getopt.error, msg:
-       print msg
-       print "usage: compileall [-l] [-d destdir] [directory ...]"
-       print "-l: don't recurse down"
-       print "-d destdir: purported directory name for error messages"
-       print "if no arguments, -l sys.path is assumed"
-       sys.exit(2)
+        print msg
+        print "usage: compileall [-l] [-d destdir] [directory ...]"
+        print "-l: don't recurse down"
+        print "-d destdir: purported directory name for error messages"
+        print "if no arguments, -l sys.path is assumed"
+        sys.exit(2)
     maxlevels = 10
     ddir = None
     for o, a in opts:
-       if o == '-l': maxlevels = 0
-       if o == '-d': ddir = a
+        if o == '-l': maxlevels = 0
+        if o == '-d': ddir = a
     if ddir:
-       if len(args) != 1:
-           print "-d destdir require exactly one directory argument"
-           sys.exit(2)
+        if len(args) != 1:
+            print "-d destdir require exactly one directory argument"
+            sys.exit(2)
     try:
-       if args:
-           for dir in args:
-               compile_dir(dir, maxlevels, ddir)
-       else:
-           compile_path()
+        if args:
+            for dir in args:
+                compile_dir(dir, maxlevels, ddir)
+        else:
+            compile_path()
     except KeyboardInterrupt:
-       print "\n[interrupt]"
+        print "\n[interrupt]"
 
 if __name__ == '__main__':
     main()
index c7bbbf491eaf5d2a340b433fa65b91a9a0aa89b6..ba63be6ecdfe26412f068f7aa5993e889087263d 100644 (file)
@@ -48,18 +48,18 @@ Exception(*)
 
 class Exception:
     def __init__(self, *args):
-       self.args = args
+        self.args = args
 
     def __str__(self):
         if not self.args:
             return ''
-       elif len(self.args) == 1:
-           return str(self.args[0])
-       else:
-           return str(self.args)
+        elif len(self.args) == 1:
+            return str(self.args[0])
+        else:
+            return str(self.args)
 
     def __getitem__(self, i):
-       return self.args[i]
+        return self.args[i]
 
 class StandardError(Exception):
     pass
@@ -68,21 +68,21 @@ class SyntaxError(StandardError):
     filename = lineno = offset = text = None
     msg = ""
     def __init__(self, *args):
-       self.args = args
-       if len(self.args) >= 1:
-           self.msg = self.args[0]
-       if len(self.args) == 2:
-           info = self.args[1]
-           try:
-               self.filename, self.lineno, self.offset, self.text = info
-           except:
-               pass
+        self.args = args
+        if len(self.args) >= 1:
+            self.msg = self.args[0]
+        if len(self.args) == 2:
+            info = self.args[1]
+            try:
+                self.filename, self.lineno, self.offset, self.text = info
+            except:
+                pass
     def __str__(self):
         return str(self.msg)
 
 class IOError(StandardError):
     def __init__(self, *args):
-       self.args = args
+        self.args = args
         self.errno = None
         self.strerror = None
         if len(args) == 2:
@@ -146,7 +146,7 @@ class MemoryError(StandardError):
 
 class SystemExit(Exception):
     def __init__(self, *args):
-       self.args = args
+        self.args = args
         if len(args) == 0:
             self.code = None
         elif len(args) == 1:
index b332202c1a42e60614de617ab412bc5ef84749c9..2f895e9dae892e351b1a300e19b46c81d195438e 100644 (file)
@@ -80,7 +80,7 @@ _state = None
 def input(files=(), inplace=0, backup=""):
     global _state
     if _state and _state._file:
-       raise RuntimeError, "input() already active"
+        raise RuntimeError, "input() already active"
     _state = FileInput(files, inplace, backup)
     return _state
 
@@ -89,151 +89,151 @@ def close():
     state = _state
     _state = None
     if state:
-       state.close()
+        state.close()
 
 def nextfile():
     if not _state:
-       raise RuntimeError, "no active input()"
+        raise RuntimeError, "no active input()"
     return _state.nextfile()
 
 def filename():
     if not _state:
-       raise RuntimeError, "no active input()"
+        raise RuntimeError, "no active input()"
     return _state.filename()
 
 def lineno():
     if not _state:
-       raise RuntimeError, "no active input()"
+        raise RuntimeError, "no active input()"
     return _state.lineno()
 
 def filelineno():
     if not _state:
-       raise RuntimeError, "no active input()"
+        raise RuntimeError, "no active input()"
     return _state.filelineno()
 
 def isfirstline():
     if not _state:
-       raise RuntimeError, "no active input()"
+        raise RuntimeError, "no active input()"
     return _state.isfirstline()
 
 def isstdin():
     if not _state:
-       raise RuntimeError, "no active input()"
+        raise RuntimeError, "no active input()"
     return _state.isstdin()
 
 class FileInput:
 
     def __init__(self, files=(), inplace=0, backup=""):
-       if type(files) == type(''):
-           files = (files,)
-       else:
-           files = tuple(files)
-           if not files:
-               files = tuple(sys.argv[1:])
-               if not files:
-                   files = ('-',)
-       self._files = files
-       self._inplace = inplace
-       self._backup = backup
-       self._savestdout = None
-       self._output = None
-       self._filename = None
-       self._lineno = 0
-       self._filelineno = 0
-       self._file = None
-       self._isstdin = 0
+        if type(files) == type(''):
+            files = (files,)
+        else:
+            files = tuple(files)
+            if not files:
+                files = tuple(sys.argv[1:])
+                if not files:
+                    files = ('-',)
+        self._files = files
+        self._inplace = inplace
+        self._backup = backup
+        self._savestdout = None
+        self._output = None
+        self._filename = None
+        self._lineno = 0
+        self._filelineno = 0
+        self._file = None
+        self._isstdin = 0
 
     def __del__(self):
-       self.close()
+        self.close()
 
     def close(self):
-       self.nextfile()
-       self._files = ()
+        self.nextfile()
+        self._files = ()
 
     def __getitem__(self, i):
-       if i != self._lineno:
-           raise RuntimeError, "accessing lines out of order"
-       line = self.readline()
-       if not line:
-           raise IndexError, "end of input reached"
-       return line
+        if i != self._lineno:
+            raise RuntimeError, "accessing lines out of order"
+        line = self.readline()
+        if not line:
+            raise IndexError, "end of input reached"
+        return line
 
     def nextfile(self):
-       savestdout = self._savestdout
-       self._savestdout = 0
-       if savestdout:
-           sys.stdout = savestdout
+        savestdout = self._savestdout
+        self._savestdout = 0
+        if savestdout:
+            sys.stdout = savestdout
 
-       output = self._output
-       self._output = 0
-       if output:
-           output.close()
+        output = self._output
+        self._output = 0
+        if output:
+            output.close()
 
-       file = self._file
-       self._file = 0
-       if file and not self._isstdin:
-           file.close()
+        file = self._file
+        self._file = 0
+        if file and not self._isstdin:
+            file.close()
 
-       backupfilename = self._backupfilename
-       self._backupfilename = 0
-       if backupfilename and not self._backup:
-           try: os.unlink(backupfilename)
-           except: pass
+        backupfilename = self._backupfilename
+        self._backupfilename = 0
+        if backupfilename and not self._backup:
+            try: os.unlink(backupfilename)
+            except: pass
 
-       self._isstdin = 0
+        self._isstdin = 0
 
     def readline(self):
-       if not self._file:
-           if not self._files:
-               return ""
-           self._filename = self._files[0]
-           self._files = self._files[1:]
-           self._filelineno = 0
-           self._file = None
-           self._isstdin = 0
-           self._backupfilename = 0
-           if self._filename == '-':
-               self._filename = '<stdin>'
-               self._file = sys.stdin
-               self._isstdin = 1
-           else:
-               if self._inplace:
-                   self._backupfilename = (
-                       self._filename + (self._backup or ".bak"))
-                   try: os.unlink(self._backupfilename)
-                   except os.error: pass
-                   # The next three lines may raise IOError
-                   os.rename(self._filename, self._backupfilename)
-                   self._file = open(self._backupfilename, "r")
-                   self._output = open(self._filename, "w")
-                   self._savestdout = sys.stdout
-                   sys.stdout = self._output
-               else:
-                   # This may raise IOError
-                   self._file = open(self._filename, "r")
-       line = self._file.readline()
-       if line:
-           self._lineno = self._lineno + 1
-           self._filelineno = self._filelineno + 1
-           return line
-       self.nextfile()
-       # Recursive call
-       return self.readline()
+        if not self._file:
+            if not self._files:
+                return ""
+            self._filename = self._files[0]
+            self._files = self._files[1:]
+            self._filelineno = 0
+            self._file = None
+            self._isstdin = 0
+            self._backupfilename = 0
+            if self._filename == '-':
+                self._filename = '<stdin>'
+                self._file = sys.stdin
+                self._isstdin = 1
+            else:
+                if self._inplace:
+                    self._backupfilename = (
+                        self._filename + (self._backup or ".bak"))
+                    try: os.unlink(self._backupfilename)
+                    except os.error: pass
+                    # The next three lines may raise IOError
+                    os.rename(self._filename, self._backupfilename)
+                    self._file = open(self._backupfilename, "r")
+                    self._output = open(self._filename, "w")
+                    self._savestdout = sys.stdout
+                    sys.stdout = self._output
+                else:
+                    # This may raise IOError
+                    self._file = open(self._filename, "r")
+        line = self._file.readline()
+        if line:
+            self._lineno = self._lineno + 1
+            self._filelineno = self._filelineno + 1
+            return line
+        self.nextfile()
+        # Recursive call
+        return self.readline()
 
     def filename(self):
-       return self._filename
+        return self._filename
 
     def lineno(self):
-       return self._lineno
+        return self._lineno
 
     def filelineno(self):
-       return self._filelineno
+        return self._filelineno
 
     def isfirstline(self):
-       return self._filelineno == 1
+        return self._filelineno == 1
 
     def isstdin(self):
-       return self._isstdin
+        return self._isstdin
 
 def _test():
     import getopt
@@ -241,13 +241,13 @@ def _test():
     backup = 0
     opts, args = getopt.getopt(sys.argv[1:], "ib:")
     for o, a in opts:
-       if o == '-i': inplace = 1
-       if o == '-b': backup = a
+        if o == '-i': inplace = 1
+        if o == '-b': backup = a
     for line in input(args, inplace=inplace, backup=backup):
-       if line[-1:] == '\n': line = line[:-1]
-       if line[-1:] == '\r': line = line[:-1]
-       print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
-                                  isfirstline() and "*" or "", line)
+        if line[-1:] == '\n': line = line[:-1]
+        if line[-1:] == '\r': line = line[:-1]
+        print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
+                                   isfirstline() and "*" or "", line)
     print "%d: %s[%d]" % (lineno(), filename(), filelineno())
 
 if __name__ == '__main__':
index 504807da6f66760ac9c8466937a3c913e0e7a5f3..a3e82a440a7093501358baabf450e874a5419d0d 100644 (file)
@@ -9,9 +9,9 @@ AS_IS = None
 class NullFormatter:
 
     def __init__(self, writer=None):
-       if not writer:
-           writer = NullWriter()
-       self.writer = writer
+        if not writer:
+            writer = NullWriter()
+        self.writer = writer
     def end_paragraph(self, blankline): pass
     def add_line_break(self): pass
     def add_hor_rule(self, *args, **kw): pass
@@ -39,88 +39,88 @@ class AbstractFormatter:
     #  in all circumstances.
 
     def __init__(self, writer):
-       self.writer = writer            # Output device
-       self.align = None               # Current alignment
-       self.align_stack = []           # Alignment stack
-       self.font_stack = []            # Font state
-       self.margin_stack = []          # Margin state
-       self.spacing = None             # Vertical spacing state
-       self.style_stack = []           # Other state, e.g. color
-       self.nospace = 1                # Should leading space be suppressed
-       self.softspace = 0              # Should a space be inserted
-       self.para_end = 1               # Just ended a paragraph
-       self.parskip = 0                # Skipped space between paragraphs?
-       self.hard_break = 1             # Have a hard break
-       self.have_label = 0
+        self.writer = writer            # Output device
+        self.align = None               # Current alignment
+        self.align_stack = []           # Alignment stack
+        self.font_stack = []            # Font state
+        self.margin_stack = []          # Margin state
+        self.spacing = None             # Vertical spacing state
+        self.style_stack = []           # Other state, e.g. color
+        self.nospace = 1                # Should leading space be suppressed
+        self.softspace = 0              # Should a space be inserted
+        self.para_end = 1               # Just ended a paragraph
+        self.parskip = 0                # Skipped space between paragraphs?
+        self.hard_break = 1             # Have a hard break
+        self.have_label = 0
 
     def end_paragraph(self, blankline):
-       if not self.hard_break:
-           self.writer.send_line_break()
-           self.have_label = 0
-       if self.parskip < blankline and not self.have_label:
-           self.writer.send_paragraph(blankline - self.parskip)
-           self.parskip = blankline
-           self.have_label = 0
-       self.hard_break = self.nospace = self.para_end = 1
-       self.softspace = 0
+        if not self.hard_break:
+            self.writer.send_line_break()
+            self.have_label = 0
+        if self.parskip < blankline and not self.have_label:
+            self.writer.send_paragraph(blankline - self.parskip)
+            self.parskip = blankline
+            self.have_label = 0
+        self.hard_break = self.nospace = self.para_end = 1
+        self.softspace = 0
 
     def add_line_break(self):
-       if not (self.hard_break or self.para_end):
-           self.writer.send_line_break()
-           self.have_label = self.parskip = 0
-       self.hard_break = self.nospace = 1
-       self.softspace = 0
+        if not (self.hard_break or self.para_end):
+            self.writer.send_line_break()
+            self.have_label = self.parskip = 0
+        self.hard_break = self.nospace = 1
+        self.softspace = 0
 
     def add_hor_rule(self, *args, **kw):
-       if not self.hard_break:
-           self.writer.send_line_break()
-       apply(self.writer.send_hor_rule, args, kw)
-       self.hard_break = self.nospace = 1
-       self.have_label = self.para_end = self.softspace = self.parskip = 0
+        if not self.hard_break:
+            self.writer.send_line_break()
+        apply(self.writer.send_hor_rule, args, kw)
+        self.hard_break = self.nospace = 1
+        self.have_label = self.para_end = self.softspace = self.parskip = 0
 
     def add_label_data(self, format, counter, blankline = None):
-       if self.have_label or not self.hard_break:
-           self.writer.send_line_break()
-       if not self.para_end:
-           self.writer.send_paragraph((blankline and 1) or 0)
-       if type(format) is StringType:
-           self.writer.send_label_data(self.format_counter(format, counter))
-       else:
-           self.writer.send_label_data(format)
-       self.nospace = self.have_label = self.hard_break = self.para_end = 1
-       self.softspace = self.parskip = 0
+        if self.have_label or not self.hard_break:
+            self.writer.send_line_break()
+        if not self.para_end:
+            self.writer.send_paragraph((blankline and 1) or 0)
+        if type(format) is StringType:
+            self.writer.send_label_data(self.format_counter(format, counter))
+        else:
+            self.writer.send_label_data(format)
+        self.nospace = self.have_label = self.hard_break = self.para_end = 1
+        self.softspace = self.parskip = 0
 
     def format_counter(self, format, counter):
         label = ''
         for c in format:
             try:
                 if c == '1':
-                   label = label + ('%d' % counter)
+                    label = label + ('%d' % counter)
                 elif c in 'aA':
-                   if counter > 0:
-                       label = label + self.format_letter(c, counter)
+                    if counter > 0:
+                        label = label + self.format_letter(c, counter)
                 elif c in 'iI':
-                   if counter > 0:
-                       label = label + self.format_roman(c, counter)
-               else:
-                   label = label + c
+                    if counter > 0:
+                        label = label + self.format_roman(c, counter)
+                else:
+                    label = label + c
             except:
                 label = label + c
         return label
 
     def format_letter(self, case, counter):
-       label = ''
-       while counter > 0:
-           counter, x = divmod(counter-1, 26)
-           s = chr(ord(case) + x)
-           label = s + label
-       return label
+        label = ''
+        while counter > 0:
+            counter, x = divmod(counter-1, 26)
+            s = chr(ord(case) + x)
+            label = s + label
+        return label
 
     def format_roman(self, case, counter):
         ones = ['i', 'x', 'c', 'm']
         fives = ['v', 'l', 'd']
         label, index = '', 0
-       # This will die of IndexError when counter is too big
+        # This will die of IndexError when counter is too big
         while counter > 0:
             counter, x = divmod(counter, 10)
             if x == 9:
@@ -134,132 +134,132 @@ class AbstractFormatter:
                 else:
                     s = ''
                 s = s + ones[index]*x
-               label = s + label
+                label = s + label
             index = index + 1
         if case == 'I':
-           return string.upper(label)
+            return string.upper(label)
         return label
 
     def add_flowing_data(self, data,
-                        # These are only here to load them into locals:
-                        whitespace = string.whitespace,
-                        join = string.join, split = string.split):
-       if not data: return
-       # The following looks a bit convoluted but is a great improvement over
-       # data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
-       prespace = data[:1] in whitespace
-       postspace = data[-1:] in whitespace
-       data = join(split(data))
-       if self.nospace and not data:
-           return
-       elif prespace or self.softspace:
-           if not data:
-               if not self.nospace:
-                   self.softspace = 1
-                   self.parskip = 0
-               return
-           if not self.nospace:
-               data = ' ' + data
-       self.hard_break = self.nospace = self.para_end = \
-                         self.parskip = self.have_label = 0
-       self.softspace = postspace
-       self.writer.send_flowing_data(data)
+                         # These are only here to load them into locals:
+                         whitespace = string.whitespace,
+                         join = string.join, split = string.split):
+        if not data: return
+        # The following looks a bit convoluted but is a great improvement over
+        # data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
+        prespace = data[:1] in whitespace
+        postspace = data[-1:] in whitespace
+        data = join(split(data))
+        if self.nospace and not data:
+            return
+        elif prespace or self.softspace:
+            if not data:
+                if not self.nospace:
+                    self.softspace = 1
+                    self.parskip = 0
+                return
+            if not self.nospace:
+                data = ' ' + data
+        self.hard_break = self.nospace = self.para_end = \
+                          self.parskip = self.have_label = 0
+        self.softspace = postspace
+        self.writer.send_flowing_data(data)
 
     def add_literal_data(self, data):
-       if not data: return
-       if self.softspace:
-           self.writer.send_flowing_data(" ")
-       self.hard_break = data[-1:] == '\n'
-       self.nospace = self.para_end = self.softspace = \
-                      self.parskip = self.have_label = 0
-       self.writer.send_literal_data(data)
+        if not data: return
+        if self.softspace:
+            self.writer.send_flowing_data(" ")
+        self.hard_break = data[-1:] == '\n'
+        self.nospace = self.para_end = self.softspace = \
+                       self.parskip = self.have_label = 0
+        self.writer.send_literal_data(data)
 
     def flush_softspace(self):
-       if self.softspace:
-           self.hard_break = self.para_end = self.parskip = \
-                             self.have_label = self.softspace = 0
-           self.nospace = 1
-           self.writer.send_flowing_data(' ')
+        if self.softspace:
+            self.hard_break = self.para_end = self.parskip = \
+                              self.have_label = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
 
     def push_alignment(self, align):
-       if align and align != self.align:
-           self.writer.new_alignment(align)
-           self.align = align
-           self.align_stack.append(align)
-       else:
-           self.align_stack.append(self.align)
+        if align and align != self.align:
+            self.writer.new_alignment(align)
+            self.align = align
+            self.align_stack.append(align)
+        else:
+            self.align_stack.append(self.align)
 
     def pop_alignment(self):
-       if self.align_stack:
-           del self.align_stack[-1]
-       if self.align_stack:
-           self.align = align = self.align_stack[-1]
-           self.writer.new_alignment(align)
-       else:
-           self.align = None
-           self.writer.new_alignment(None)
+        if self.align_stack:
+            del self.align_stack[-1]
+        if self.align_stack:
+            self.align = align = self.align_stack[-1]
+            self.writer.new_alignment(align)
+        else:
+            self.align = None
+            self.writer.new_alignment(None)
 
     def push_font(self, (size, i, b, tt)):
-       if self.softspace:
-           self.hard_break = self.para_end = self.softspace = 0
-           self.nospace = 1
-           self.writer.send_flowing_data(' ')
-       if self.font_stack:
-           csize, ci, cb, ctt = self.font_stack[-1]
-           if size is AS_IS: size = csize
-           if i is AS_IS: i = ci
-           if b is AS_IS: b = cb
-           if tt is AS_IS: tt = ctt
-       font = (size, i, b, tt)
-       self.font_stack.append(font)
-       self.writer.new_font(font)
+        if self.softspace:
+            self.hard_break = self.para_end = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+        if self.font_stack:
+            csize, ci, cb, ctt = self.font_stack[-1]
+            if size is AS_IS: size = csize
+            if i is AS_IS: i = ci
+            if b is AS_IS: b = cb
+            if tt is AS_IS: tt = ctt
+        font = (size, i, b, tt)
+        self.font_stack.append(font)
+        self.writer.new_font(font)
 
     def pop_font(self):
-       if self.font_stack:
-           del self.font_stack[-1]
-       if self.font_stack:
-           font = self.font_stack[-1]
-       else:
-           font = None
-       self.writer.new_font(font)
+        if self.font_stack:
+            del self.font_stack[-1]
+        if self.font_stack:
+            font = self.font_stack[-1]
+        else:
+            font = None
+        self.writer.new_font(font)
 
     def push_margin(self, margin):
-       self.margin_stack.append(margin)
-       fstack = filter(None, self.margin_stack)
-       if not margin and fstack:
-           margin = fstack[-1]
-       self.writer.new_margin(margin, len(fstack))
+        self.margin_stack.append(margin)
+        fstack = filter(None, self.margin_stack)
+        if not margin and fstack:
+            margin = fstack[-1]
+        self.writer.new_margin(margin, len(fstack))
 
     def pop_margin(self):
-       if self.margin_stack:
-           del self.margin_stack[-1]
-       fstack = filter(None, self.margin_stack)
-       if fstack:
-           margin = fstack[-1]
-       else:
-           margin = None
-       self.writer.new_margin(margin, len(fstack))
+        if self.margin_stack:
+            del self.margin_stack[-1]
+        fstack = filter(None, self.margin_stack)
+        if fstack:
+            margin = fstack[-1]
+        else:
+            margin = None
+        self.writer.new_margin(margin, len(fstack))
 
     def set_spacing(self, spacing):
-       self.spacing = spacing
-       self.writer.new_spacing(spacing)
+        self.spacing = spacing
+        self.writer.new_spacing(spacing)
 
     def push_style(self, *styles):
-       if self.softspace:
-           self.hard_break = self.para_end = self.softspace = 0
-           self.nospace = 1
-           self.writer.send_flowing_data(' ')
-       for style in styles:
-           self.style_stack.append(style)
-       self.writer.new_styles(tuple(self.style_stack))
+        if self.softspace:
+            self.hard_break = self.para_end = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+        for style in styles:
+            self.style_stack.append(style)
+        self.writer.new_styles(tuple(self.style_stack))
 
     def pop_style(self, n=1):
-       del self.style_stack[-n:]
-       self.writer.new_styles(tuple(self.style_stack))
+        del self.style_stack[-n:]
+        self.writer.new_styles(tuple(self.style_stack))
 
     def assert_line_data(self, flag=1):
-       self.nospace = self.hard_break = not flag
-       self.para_end = self.parskip = self.have_label = 0
+        self.nospace = self.hard_break = not flag
+        self.para_end = self.parskip = self.have_label = 0
 
 
 class NullWriter:
@@ -282,119 +282,119 @@ class NullWriter:
 class AbstractWriter(NullWriter):
 
     def __init__(self):
-       pass
+        pass
 
     def new_alignment(self, align):
-       print "new_alignment(%s)" % `align`
+        print "new_alignment(%s)" % `align`
 
     def new_font(self, font):
-       print "new_font(%s)" % `font`
+        print "new_font(%s)" % `font`
 
     def new_margin(self, margin, level):
-       print "new_margin(%s, %d)" % (`margin`, level)
+        print "new_margin(%s, %d)" % (`margin`, level)
 
     def new_spacing(self, spacing):
-       print "new_spacing(%s)" % `spacing`
+        print "new_spacing(%s)" % `spacing`
 
     def new_styles(self, styles):
-       print "new_styles(%s)" % `styles`
+        print "new_styles(%s)" % `styles`
 
     def send_paragraph(self, blankline):
-       print "send_paragraph(%s)" % `blankline`
+        print "send_paragraph(%s)" % `blankline`
 
     def send_line_break(self):
-       print "send_line_break()"
+        print "send_line_break()"
 
     def send_hor_rule(self, *args, **kw):
-       print "send_hor_rule()"
+        print "send_hor_rule()"
 
     def send_label_data(self, data):
-       print "send_label_data(%s)" % `data`
+        print "send_label_data(%s)" % `data`
 
     def send_flowing_data(self, data):
-       print "send_flowing_data(%s)" % `data`
+        print "send_flowing_data(%s)" % `data`
 
     def send_literal_data(self, data):
-       print "send_literal_data(%s)" % `data`
+        print "send_literal_data(%s)" % `data`
 
 
 class DumbWriter(NullWriter):
 
     def __init__(self, file=None, maxcol=72):
-       self.file = file or sys.stdout
-       self.maxcol = maxcol
-       NullWriter.__init__(self)
-       self.reset()
+        self.file = file or sys.stdout
+        self.maxcol = maxcol
+        NullWriter.__init__(self)
+        self.reset()
 
     def reset(self):
-       self.col = 0
-       self.atbreak = 0
+        self.col = 0
+        self.atbreak = 0
 
     def send_paragraph(self, blankline):
-       self.file.write('\n' + '\n'*blankline)
-       self.col = 0
-       self.atbreak = 0
+        self.file.write('\n' + '\n'*blankline)
+        self.col = 0
+        self.atbreak = 0
 
     def send_line_break(self):
-       self.file.write('\n')
-       self.col = 0
-       self.atbreak = 0
+        self.file.write('\n')
+        self.col = 0
+        self.atbreak = 0
 
     def send_hor_rule(self, *args, **kw):
-       self.file.write('\n')
-       self.file.write('-'*self.maxcol)
-       self.file.write('\n')
-       self.col = 0
-       self.atbreak = 0
+        self.file.write('\n')
+        self.file.write('-'*self.maxcol)
+        self.file.write('\n')
+        self.col = 0
+        self.atbreak = 0
 
     def send_literal_data(self, data):
-       self.file.write(data)
-       i = string.rfind(data, '\n')
-       if i >= 0:
-           self.col = 0
-           data = data[i+1:]
-       data = string.expandtabs(data)
-       self.col = self.col + len(data)
-       self.atbreak = 0
+        self.file.write(data)
+        i = string.rfind(data, '\n')
+        if i >= 0:
+            self.col = 0
+            data = data[i+1:]
+        data = string.expandtabs(data)
+        self.col = self.col + len(data)
+        self.atbreak = 0
 
     def send_flowing_data(self, data):
-       if not data: return
-       atbreak = self.atbreak or data[0] in string.whitespace
-       col = self.col
-       maxcol = self.maxcol
-       write = self.file.write
-       for word in string.split(data):
-           if atbreak:
-               if col + len(word) >= maxcol:
-                   write('\n')
-                   col = 0
-               else:
-                   write(' ')
-                   col = col + 1
-           write(word)
-           col = col + len(word)
-           atbreak = 1
-       self.col = col
-       self.atbreak = data[-1] in string.whitespace
+        if not data: return
+        atbreak = self.atbreak or data[0] in string.whitespace
+        col = self.col
+        maxcol = self.maxcol
+        write = self.file.write
+        for word in string.split(data):
+            if atbreak:
+                if col + len(word) >= maxcol:
+                    write('\n')
+                    col = 0
+                else:
+                    write(' ')
+                    col = col + 1
+            write(word)
+            col = col + len(word)
+            atbreak = 1
+        self.col = col
+        self.atbreak = data[-1] in string.whitespace
 
 
 def test(file = None):
     w = DumbWriter()
     f = AbstractFormatter(w)
     if file:
-       fp = open(file)
+        fp = open(file)
     elif sys.argv[1:]:
-       fp = open(sys.argv[1])
+        fp = open(sys.argv[1])
     else:
-       fp = sys.stdin
+        fp = sys.stdin
     while 1:
-       line = fp.readline()
-       if not line:
-           break
-       if line == '\n':
-           f.end_paragraph(1)
-       else:
-           f.add_flowing_data(line)
+        line = fp.readline()
+        if not line:
+            break
+        if line == '\n':
+            f.end_paragraph(1)
+        else:
+            f.add_flowing_data(line)
     f.end_paragraph(0)
 
 
index 5d71b282546794eee14466e9d362e0df8522a456..e93f7d1a68ad6932433e6d12b6e08e6a1d73f7c9 100644 (file)
 # detects an error.
 
 # It returns two values:
-# (1)  a list of pairs (option, option_argument) giving the options in
-#      the order in which they were specified.  (I'd use a dictionary
-#      but applications may depend on option order or multiple
-#      occurrences.)  Boolean options have '' as option_argument.
-# (2)  the list of remaining arguments (may be empty).
+# (1)   a list of pairs (option, option_argument) giving the options in
+#       the order in which they were specified.  (I'd use a dictionary
+#       but applications may depend on option order or multiple
+#       occurrences.)  Boolean options have '' as option_argument.
+# (2)   the list of remaining arguments (may be empty).
 
 import string
 
@@ -36,31 +36,31 @@ def getopt(args, shortopts, longopts = []):
     longopts = longopts[:]
     longopts.sort()
     while args and args[0][:1] == '-' and args[0] != '-':
-       if args[0] == '--':
-           args = args[1:]
-           break
-       if args[0][:2] == '--':
-           list, args = do_longs(list, args[0][2:], longopts, args[1:])
-       else:
-           list, args = do_shorts(list, args[0][1:], shortopts, args[1:])
+        if args[0] == '--':
+            args = args[1:]
+            break
+        if args[0][:2] == '--':
+            list, args = do_longs(list, args[0][2:], longopts, args[1:])
+        else:
+            list, args = do_shorts(list, args[0][1:], shortopts, args[1:])
 
     return list, args
 
 def do_longs(list, opt, longopts, args):
     try:
-       i = string.index(opt, '=')
-       opt, optarg = opt[:i], opt[i+1:]
+        i = string.index(opt, '=')
+        opt, optarg = opt[:i], opt[i+1:]
     except ValueError:
-       optarg = None
+        optarg = None
 
     has_arg, opt = long_has_args(opt, longopts)
     if has_arg:
-       if optarg is None:
-           if not args:
-               raise error, 'option --%s requires argument' % opt
-           optarg, args = args[0], args[1:]
+        if optarg is None:
+            if not args:
+                raise error, 'option --%s requires argument' % opt
+            optarg, args = args[0], args[1:]
     elif optarg:
-       raise error, 'option --%s must not have an argument' % opt
+        raise error, 'option --%s must not have an argument' % opt
     list.append(('--' + opt, optarg or ''))
     return list, args
 
@@ -70,35 +70,35 @@ def do_longs(list, opt, longopts, args):
 def long_has_args(opt, longopts):
     optlen = len(opt)
     for i in range(len(longopts)):
-       x, y = longopts[i][:optlen], longopts[i][optlen:]
-       if opt != x:
-           continue
-       if y != '' and y != '=' and i+1 < len(longopts):
-           if opt == longopts[i+1][:optlen]:
-               raise error, 'option --%s not a unique prefix' % opt
-       if longopts[i][-1:] in ('=', ):
-           return 1, longopts[i][:-1]
-       return 0, longopts[i]
+        x, y = longopts[i][:optlen], longopts[i][optlen:]
+        if opt != x:
+            continue
+        if y != '' and y != '=' and i+1 < len(longopts):
+            if opt == longopts[i+1][:optlen]:
+                raise error, 'option --%s not a unique prefix' % opt
+        if longopts[i][-1:] in ('=', ):
+            return 1, longopts[i][:-1]
+        return 0, longopts[i]
     raise error, 'option --' + opt + ' not recognized'
 
 def do_shorts(list, optstring, shortopts, args):
     while optstring != '':
-       opt, optstring = optstring[0], optstring[1:]
-       if short_has_arg(opt, shortopts):
-           if optstring == '':
-               if not args:
-                   raise error, 'option -%s requires argument' % opt
-               optstring, args = args[0], args[1:]
-           optarg, optstring = optstring, ''
-       else:
-           optarg = ''
-       list.append(('-' + opt, optarg))
+        opt, optstring = optstring[0], optstring[1:]
+        if short_has_arg(opt, shortopts):
+            if optstring == '':
+                if not args:
+                    raise error, 'option -%s requires argument' % opt
+                optstring, args = args[0], args[1:]
+            optarg, optstring = optstring, ''
+        else:
+            optarg = ''
+        list.append(('-' + opt, optarg))
     return list, args
 
 def short_has_arg(opt, shortopts):
     for i in range(len(shortopts)):
-       if opt == shortopts[i] != ':':
-           return shortopts[i+1:i+2] == ':'
+        if opt == shortopts[i] != ':':
+            return shortopts[i+1:i+2] == ':'
     raise error, 'option -%s not recognized' % opt
 
 if __name__ == '__main__':
index 3d656b927e8a855321bf82b508f359382b510ef6..9c5ff0c6f3b583dc1c753a2bcc0e1f07a9cd1f17 100644 (file)
@@ -47,222 +47,222 @@ class GzipFile:
     myfileobj = None
 
     def __init__(self, filename=None, mode=None, 
-                compresslevel=9, fileobj=None):
-       if fileobj is None:
-           fileobj = self.myfileobj = __builtin__.open(filename, mode or 'r')
+                 compresslevel=9, fileobj=None):
+        if fileobj is None:
+            fileobj = self.myfileobj = __builtin__.open(filename, mode or 'r')
         if filename is None:
-           if hasattr(fileobj, 'name'): filename = fileobj.name
-           else: filename = ''
+            if hasattr(fileobj, 'name'): filename = fileobj.name
+            else: filename = ''
         if mode is None:
-           if hasattr(fileobj, 'mode'): mode = fileobj.mode
-           else: mode = 'r'
-
-       if mode[0:1] == 'r':
-           self.mode = READ
-           self._init_read()
-           self.filename = filename
-           self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
-
-       elif mode[0:1] == 'w':
-           self.mode = WRITE
-           self._init_write(filename)
-           self.compress = zlib.compressobj(compresslevel,
-                                            zlib.DEFLATED, 
-                                            -zlib.MAX_WBITS,
-                                            zlib.DEF_MEM_LEVEL,
-                                            0)
-       else:
-           raise ValueError, "Mode " + mode + " not supported"
-
-       self.fileobj = fileobj
-
-       if self.mode == WRITE:
-           self._write_gzip_header()
-       elif self.mode == READ:
-           self._read_gzip_header()
+            if hasattr(fileobj, 'mode'): mode = fileobj.mode
+            else: mode = 'r'
+
+        if mode[0:1] == 'r':
+            self.mode = READ
+            self._init_read()
+            self.filename = filename
+            self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
+
+        elif mode[0:1] == 'w':
+            self.mode = WRITE
+            self._init_write(filename)
+            self.compress = zlib.compressobj(compresslevel,
+                                             zlib.DEFLATED, 
+                                             -zlib.MAX_WBITS,
+                                             zlib.DEF_MEM_LEVEL,
+                                             0)
+        else:
+            raise ValueError, "Mode " + mode + " not supported"
+
+        self.fileobj = fileobj
+
+        if self.mode == WRITE:
+            self._write_gzip_header()
+        elif self.mode == READ:
+            self._read_gzip_header()
 
     def __repr__(self):
-       s = repr(self.fileobj)
-       return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
+        s = repr(self.fileobj)
+        return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
 
     def _init_write(self, filename):
-       if filename[-3:] != '.gz':
-           filename = filename + '.gz'
-       self.filename = filename
-       self.crc = zlib.crc32("")
-       self.size = 0
-       self.writebuf = []
-       self.bufsize = 0
+        if filename[-3:] != '.gz':
+            filename = filename + '.gz'
+        self.filename = filename
+        self.crc = zlib.crc32("")
+        self.size = 0
+        self.writebuf = []
+        self.bufsize = 0
 
     def _write_gzip_header(self):
-       self.fileobj.write('\037\213')             # magic header
-       self.fileobj.write('\010')                 # compression method
-       fname = self.filename[:-3]
-       flags = 0
-       if fname:
-           flags = FNAME
-       self.fileobj.write(chr(flags))
-       write32(self.fileobj, int(time.time()))
-       self.fileobj.write('\002')
-       self.fileobj.write('\377')
-       if fname:
-           self.fileobj.write(fname + '\000')
+        self.fileobj.write('\037\213')             # magic header
+        self.fileobj.write('\010')                 # compression method
+        fname = self.filename[:-3]
+        flags = 0
+        if fname:
+            flags = FNAME
+        self.fileobj.write(chr(flags))
+        write32(self.fileobj, int(time.time()))
+        self.fileobj.write('\002')
+        self.fileobj.write('\377')
+        if fname:
+            self.fileobj.write(fname + '\000')
 
     def _init_read(self):
-       self.crc = zlib.crc32("")
-       self.size = 0
-       self.extrabuf = ""
-       self.extrasize = 0
+        self.crc = zlib.crc32("")
+        self.size = 0
+        self.extrabuf = ""
+        self.extrasize = 0
 
     def _read_gzip_header(self):
-       magic = self.fileobj.read(2)
-       if magic != '\037\213':
-           raise RuntimeError, 'Not a gzipped file'
-       method = ord( self.fileobj.read(1) )
-       if method != 8:
-           raise RuntimeError, 'Unknown compression method'
-       flag = ord( self.fileobj.read(1) )
-       # modtime = self.fileobj.read(4)
-       # extraflag = self.fileobj.read(1)
-       # os = self.fileobj.read(1)
-       self.fileobj.read(6)
-
-       if flag & FEXTRA:
-           # Read & discard the extra field, if present
-           xlen=ord(self.fileobj.read(1))              
-           xlen=xlen+256*ord(self.fileobj.read(1))
-           self.fileobj.read(xlen)
-       if flag & FNAME:
-           # Read and discard a null-terminated string containing the filename
-           while (1):
-               s=self.fileobj.read(1)
-               if not s or s=='\000': break
-       if flag & FCOMMENT:
-           # Read and discard a null-terminated string containing a comment
-           while (1):
-               s=self.fileobj.read(1)
-               if not s or s=='\000': break
-       if flag & FHCRC:
-           self.fileobj.read(2)     # Read & discard the 16-bit header CRC
+        magic = self.fileobj.read(2)
+        if magic != '\037\213':
+            raise RuntimeError, 'Not a gzipped file'
+        method = ord( self.fileobj.read(1) )
+        if method != 8:
+            raise RuntimeError, 'Unknown compression method'
+        flag = ord( self.fileobj.read(1) )
+        # modtime = self.fileobj.read(4)
+        # extraflag = self.fileobj.read(1)
+        # os = self.fileobj.read(1)
+        self.fileobj.read(6)
+
+        if flag & FEXTRA:
+            # Read & discard the extra field, if present
+            xlen=ord(self.fileobj.read(1))              
+            xlen=xlen+256*ord(self.fileobj.read(1))
+            self.fileobj.read(xlen)
+        if flag & FNAME:
+            # Read and discard a null-terminated string containing the filename
+            while (1):
+                s=self.fileobj.read(1)
+                if not s or s=='\000': break
+        if flag & FCOMMENT:
+            # Read and discard a null-terminated string containing a comment
+            while (1):
+                s=self.fileobj.read(1)
+                if not s or s=='\000': break
+        if flag & FHCRC:
+            self.fileobj.read(2)     # Read & discard the 16-bit header CRC
 
 
     def write(self,data):
-       if self.fileobj is None:
-           raise ValueError, "write() on closed GzipFile object"
-       if len(data) > 0:
-           self.size = self.size + len(data)
-           self.crc = zlib.crc32(data, self.crc)
-           self.fileobj.write( self.compress.compress(data) )
+        if self.fileobj is None:
+            raise ValueError, "write() on closed GzipFile object"
+        if len(data) > 0:
+            self.size = self.size + len(data)
+            self.crc = zlib.crc32(data, self.crc)
+            self.fileobj.write( self.compress.compress(data) )
 
     def writelines(self,lines):
-       self.write(string.join(lines))
+        self.write(string.join(lines))
 
     def read(self,size=None):
-       if self.extrasize <= 0 and self.fileobj is None:
-           return ''
-
-       readsize = 1024
-       if not size:        # get the whole thing
-           try:
-               while 1:
-                   self._read(readsize)
-                   readsize = readsize * 2
-           except EOFError:
-               size = self.extrasize
-       else:               # just get some more of it
-           try:
-               while size > self.extrasize:
-                   self._read(readsize)
-                   readsize = readsize * 2
-           except EOFError:
-               pass
-       
-       chunk = self.extrabuf[:size]
-       self.extrabuf = self.extrabuf[size:]
-       self.extrasize = self.extrasize - size
-
-       return chunk
+        if self.extrasize <= 0 and self.fileobj is None:
+            return ''
+
+        readsize = 1024
+        if not size:        # get the whole thing
+            try:
+                while 1:
+                    self._read(readsize)
+                    readsize = readsize * 2
+            except EOFError:
+                size = self.extrasize
+        else:               # just get some more of it
+            try:
+                while size > self.extrasize:
+                    self._read(readsize)
+                    readsize = readsize * 2
+            except EOFError:
+                pass
+        
+        chunk = self.extrabuf[:size]
+        self.extrabuf = self.extrabuf[size:]
+        self.extrasize = self.extrasize - size
+
+        return chunk
 
     def _unread(self, buf):
-       self.extrabuf = buf + self.extrabuf
-       self.extrasize = len(buf) + self.extrasize
+        self.extrabuf = buf + self.extrabuf
+        self.extrasize = len(buf) + self.extrasize
 
     def _read(self, size=1024):
-       try:
-           buf = self.fileobj.read(size)
-       except AttributeError:
-           raise EOFError, "Reached EOF"
-       if buf == "":
-           uncompress = self.decompress.flush()
-           if uncompress == "":
-               self._read_eof()
-               self.fileobj = None
-               raise EOFError, 'Reached EOF'
-       else:
-           uncompress = self.decompress.decompress(buf)
-       self.crc = zlib.crc32(uncompress, self.crc)
-       self.extrabuf = self.extrabuf + uncompress
-       self.extrasize = self.extrasize + len(uncompress)
-       self.size = self.size + len(uncompress)
+        try:
+            buf = self.fileobj.read(size)
+        except AttributeError:
+            raise EOFError, "Reached EOF"
+        if buf == "":
+            uncompress = self.decompress.flush()
+            if uncompress == "":
+                self._read_eof()
+                self.fileobj = None
+                raise EOFError, 'Reached EOF'
+        else:
+            uncompress = self.decompress.decompress(buf)
+        self.crc = zlib.crc32(uncompress, self.crc)
+        self.extrabuf = self.extrabuf + uncompress
+        self.extrasize = self.extrasize + len(uncompress)
+        self.size = self.size + len(uncompress)
 
     def _read_eof(self):
-       # Andrew writes:
-       ## We've read to the end of the file, so we have to rewind in order
-       ## to reread the 8 bytes containing the CRC and the file size.  The
-       ## decompressor is smart and knows when to stop, so feeding it
-       ## extra data is harmless.  
-       self.fileobj.seek(-8, 2)
-       crc32 = read32(self.fileobj)
-       isize = read32(self.fileobj)
-       if crc32 != self.crc:
-           self.error = "CRC check failed"
-       elif isize != self.size:
-           self.error = "Incorrect length of data produced"
+        # Andrew writes:
+        ## We've read to the end of the file, so we have to rewind in order
+        ## to reread the 8 bytes containing the CRC and the file size.  The
+        ## decompressor is smart and knows when to stop, so feeding it
+        ## extra data is harmless.  
+        self.fileobj.seek(-8, 2)
+        crc32 = read32(self.fileobj)
+        isize = read32(self.fileobj)
+        if crc32 != self.crc:
+            self.error = "CRC check failed"
+        elif isize != self.size:
+            self.error = "Incorrect length of data produced"
 
     def close(self):
-       if self.mode == WRITE:
-           self.fileobj.write(self.compress.flush())
-           write32(self.fileobj, self.crc)
-           write32(self.fileobj, self.size)
-           self.fileobj = None
-       elif self.mode == READ:
-           self.fileobj = None
-       if self.myfileobj:
-           self.myfileobj.close()
-           self.myfileobj = None
+        if self.mode == WRITE:
+            self.fileobj.write(self.compress.flush())
+            write32(self.fileobj, self.crc)
+            write32(self.fileobj, self.size)
+            self.fileobj = None
+        elif self.mode == READ:
+            self.fileobj = None
+        if self.myfileobj:
+            self.myfileobj.close()
+            self.myfileobj = None
 
     def flush(self):
-       self.fileobj.flush()
+        self.fileobj.flush()
 
     def seek(self):
-       raise IOError, 'Random access not allowed in gzip files'
+        raise IOError, 'Random access not allowed in gzip files'
 
     def tell(self):
-       raise IOError, 'I won\'t tell() you for gzip files'
+        raise IOError, 'I won\'t tell() you for gzip files'
 
     def isatty(self):
-       return 0
+        return 0
 
     def readline(self):
-       bufs = []
-       readsize = 100
-       while 1:
-           c = self.read(readsize)
-           i = string.find(c, '\n')
-           if i >= 0 or c == '':
-               bufs.append(c[:i])
-               self._unread(c[i+1:])
-               return string.join(bufs, '')
-           bufs.append(c)
-           readsize = readsize * 2
+        bufs = []
+        readsize = 100
+        while 1:
+            c = self.read(readsize)
+            i = string.find(c, '\n')
+            if i >= 0 or c == '':
+                bufs.append(c[:i])
+                self._unread(c[i+1:])
+                return string.join(bufs, '')
+            bufs.append(c)
+            readsize = readsize * 2
 
     def readlines(self):
-       buf = self.read()
-       return string.split(buf, '\n')
+        buf = self.read()
+        return string.split(buf, '\n')
 
     def writelines(self, L):
-       for line in L:
-           self.write(line)
+        for line in L:
+            self.write(line)
 
 
 def _test():
@@ -273,36 +273,36 @@ def _test():
     args = sys.argv[1:]
     decompress = args and args[0] == "-d"
     if decompress:
-       args = args[1:]
+        args = args[1:]
     if not args:
-       args = ["-"]
+        args = ["-"]
     for arg in args:
-       if decompress:
-           if arg == "-":
-               f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
-               g = sys.stdout
-           else:
-               if arg[-3:] != ".gz":
-                   print "filename doesn't end in .gz:", `arg`
-                   continue
-               f = open(arg, "rb")
-               g = __builtin__.open(arg[:-3], "wb")
-       else:
-           if arg == "-":
-               f = sys.stdin
-               g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
-           else:
-               f = __builtin__.open(arg, "rb")
-               g = open(arg + ".gz", "wb")
-       while 1:
-           chunk = f.read(1024)
-           if not chunk:
-               break
-           g.write(chunk)
-       if g is not sys.stdout:
-           g.close()
-       if f is not sys.stdin:
-           f.close()
+        if decompress:
+            if arg == "-":
+                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
+                g = sys.stdout
+            else:
+                if arg[-3:] != ".gz":
+                    print "filename doesn't end in .gz:", `arg`
+                    continue
+                f = open(arg, "rb")
+                g = __builtin__.open(arg[:-3], "wb")
+        else:
+            if arg == "-":
+                f = sys.stdin
+                g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
+            else:
+                f = __builtin__.open(arg, "rb")
+                g = open(arg + ".gz", "wb")
+        while 1:
+            chunk = f.read(1024)
+            if not chunk:
+                break
+            g.write(chunk)
+        if g is not sys.stdout:
+            g.close()
+        if f is not sys.stdin:
+            f.close()
 
 if __name__ == '__main__':
     _test()
index 72742900770f130d8a1f5349c711786c5f3e1ad0..77be4709ce54a9ea6d78e9390c8c10a2965ecf24 100644 (file)
@@ -34,12 +34,12 @@ class HTMLParser(SGMLParser):
 
     def handle_data(self, data):
         if self.savedata is not None:
-           self.savedata = self.savedata + data
+            self.savedata = self.savedata + data
         else:
-           if self.nofill:
-               self.formatter.add_literal_data(data)
-           else:
-               self.formatter.add_flowing_data(data)
+            if self.nofill:
+                self.formatter.add_literal_data(data)
+            else:
+                self.formatter.add_flowing_data(data)
 
     # --- Hooks to save data; shouldn't need to be overridden
 
@@ -49,21 +49,21 @@ class HTMLParser(SGMLParser):
     def save_end(self):
         data = self.savedata
         self.savedata = None
-       if not self.nofill:
-           data = string.join(string.split(data))
-       return data
+        if not self.nofill:
+            data = string.join(string.split(data))
+        return data
 
     # --- Hooks for anchors; should probably be overridden
 
     def anchor_bgn(self, href, name, type):
         self.anchor = href
         if self.anchor:
-           self.anchorlist.append(href)
+            self.anchorlist.append(href)
 
     def anchor_end(self):
         if self.anchor:
-           self.handle_data("[%d]" % len(self.anchorlist))
-           self.anchor = None
+            self.handle_data("[%d]" % len(self.anchorlist))
+            self.anchor = None
 
     # --- Hook for images; should probably be overridden
 
@@ -218,10 +218,10 @@ class HTMLParser(SGMLParser):
     def do_li(self, attrs):
         self.formatter.end_paragraph(0)
         if self.list_stack:
-           [dummy, label, counter] = top = self.list_stack[-1]
-           top[2] = counter = counter+1
+            [dummy, label, counter] = top = self.list_stack[-1]
+            top[2] = counter = counter+1
         else:
-           label, counter = '*', 0
+            label, counter = '*', 0
         self.formatter.add_label_data(label, counter)
 
     def start_ol(self, attrs):
@@ -230,8 +230,8 @@ class HTMLParser(SGMLParser):
         label = '1.'
         for a, v in attrs:
             if a == 'type':
-               if len(v) == 1: v = v + '.'
-               label = v
+                if len(v) == 1: v = v + '.'
+                label = v
         self.list_stack.append(['ol', label, 0])
 
     def end_ol(self):
@@ -271,8 +271,8 @@ class HTMLParser(SGMLParser):
         self.formatter.end_paragraph(bl)
         if self.list_stack:
             if self.list_stack[-1][0] == 'dd':
-               del self.list_stack[-1]
-               self.formatter.pop_margin()
+                del self.list_stack[-1]
+                self.formatter.pop_margin()
 
     # --- Phrase Markup
 
@@ -302,26 +302,26 @@ class HTMLParser(SGMLParser):
     # Typographic Elements
 
     def start_i(self, attrs):
-       self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
+        self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
     def end_i(self):
-       self.formatter.pop_font()
+        self.formatter.pop_font()
 
     def start_b(self, attrs):
-       self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
+        self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
     def end_b(self):
-       self.formatter.pop_font()
+        self.formatter.pop_font()
 
     def start_tt(self, attrs):
-       self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
+        self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
     def end_tt(self):
-       self.formatter.pop_font()
+        self.formatter.pop_font()
 
     def start_a(self, attrs):
         href = ''
         name = ''
         type = ''
         for attrname, value in attrs:
-           value = string.strip(value)
+            value = string.strip(value)
             if attrname == 'href':
                 href = value
             if attrname == 'name':
@@ -350,8 +350,8 @@ class HTMLParser(SGMLParser):
         alt = '(image)'
         ismap = ''
         src = ''
-       width = 0
-       height = 0
+        width = 0
+        height = 0
         for attrname, value in attrs:
             if attrname == 'align':
                 align = value
@@ -361,12 +361,12 @@ class HTMLParser(SGMLParser):
                 ismap = value
             if attrname == 'src':
                 src = value
-           if attrname == 'width':
-               try: width = string.atoi(value)
-               except: pass
-           if attrname == 'height':
-               try: height = string.atoi(value)
-               except: pass
+            if attrname == 'width':
+                try: width = string.atoi(value)
+                except: pass
+            if attrname == 'height':
+                try: height = string.atoi(value)
+                except: pass
         self.handle_image(src, alt, ismap, align, width, height)
 
     # --- Really Old Unofficial Deprecated Stuff
@@ -388,35 +388,35 @@ def test(args = None):
     import sys, formatter
 
     if not args:
-       args = sys.argv[1:]
+        args = sys.argv[1:]
 
     silent = args and args[0] == '-s'
     if silent:
-       del args[0]
+        del args[0]
 
     if args:
-       file = args[0]
+        file = args[0]
     else:
-       file = 'test.html'
+        file = 'test.html'
 
     if file == '-':
-       f = sys.stdin
+        f = sys.stdin
     else:
-       try:
-           f = open(file, 'r')
-       except IOError, msg:
-           print file, ":", msg
-           sys.exit(1)
+        try:
+            f = open(file, 'r')
+        except IOError, msg:
+            print file, ":", msg
+            sys.exit(1)
 
     data = f.read()
 
     if f is not sys.stdin:
-       f.close()
+        f.close()
     
     if silent:
-       f = formatter.NullFormatter()
+        f = formatter.NullFormatter()
     else:
-       f = formatter.AbstractFormatter(formatter.DumbWriter())
+        f = formatter.AbstractFormatter(formatter.DumbWriter())
 
     p = HTMLParser(f)
     p.feed(data)
index 8f64957dd733a3c4f247366a28046207d2cbd9ba..36759789e5ea8a18da357bbaf2040b86499c8597 100644 (file)
@@ -69,22 +69,22 @@ FROZEN_MODULE = 33
 class _Verbose:
 
     def __init__(self, verbose = 0):
-       self.verbose = verbose
+        self.verbose = verbose
 
     def get_verbose(self):
-       return self.verbose
+        return self.verbose
 
     def set_verbose(self, verbose):
-       self.verbose = verbose
+        self.verbose = verbose
 
     # XXX The following is an experimental interface
 
     def note(self, *args):
-       if self.verbose:
-           apply(self.message, args)
+        if self.verbose:
+            apply(self.message, args)
 
     def message(self, format, *args):
-       print format%args
+        print format%args
 
 
 class BasicModuleLoader(_Verbose):
@@ -105,49 +105,49 @@ class BasicModuleLoader(_Verbose):
     """
 
     def find_module(self, name, path = None):
-       if path is None: 
-           path = [None] + self.default_path()
-       for dir in path:
-           stuff = self.find_module_in_dir(name, dir)
-           if stuff: return stuff
-       return None
+        if path is None: 
+            path = [None] + self.default_path()
+        for dir in path:
+            stuff = self.find_module_in_dir(name, dir)
+            if stuff: return stuff
+        return None
 
     def default_path(self):
-       return sys.path
+        return sys.path
 
     def find_module_in_dir(self, name, dir):
-       if dir is None:
-           return self.find_builtin_module(name)
-       else:
-           try:
-               return imp.find_module(name, [dir])
-           except ImportError:
-               return None
+        if dir is None:
+            return self.find_builtin_module(name)
+        else:
+            try:
+                return imp.find_module(name, [dir])
+            except ImportError:
+                return None
 
     def find_builtin_module(self, name):
-       if imp.is_builtin(name):
-           return None, '', ('', '', BUILTIN_MODULE)
-       if imp.is_frozen(name):
-           return None, '', ('', '', FROZEN_MODULE)
-       return None
+        if imp.is_builtin(name):
+            return None, '', ('', '', BUILTIN_MODULE)
+        if imp.is_frozen(name):
+            return None, '', ('', '', FROZEN_MODULE)
+        return None
 
     def load_module(self, name, stuff):
-       file, filename, (suff, mode, type) = stuff
-       try:
-           if type == BUILTIN_MODULE:
-               return imp.init_builtin(name)
-           if type == FROZEN_MODULE:
-               return imp.init_frozen(name)
-           if type == C_EXTENSION:
-               return imp.load_dynamic(name, filename, file)
-           if type == PY_SOURCE:
-               return imp.load_source(name, filename, file)
-           if type == PY_COMPILED:
-               return imp.load_compiled(name, filename, file)
-       finally:
-           if file: file.close()
-       raise ImportError, "Unrecognized module type (%s) for %s" % \
-                          (`type`, name)
+        file, filename, (suff, mode, type) = stuff
+        try:
+            if type == BUILTIN_MODULE:
+                return imp.init_builtin(name)
+            if type == FROZEN_MODULE:
+                return imp.init_frozen(name)
+            if type == C_EXTENSION:
+                return imp.load_dynamic(name, filename, file)
+            if type == PY_SOURCE:
+                return imp.load_source(name, filename, file)
+            if type == PY_COMPILED:
+                return imp.load_compiled(name, filename, file)
+        finally:
+            if file: file.close()
+        raise ImportError, "Unrecognized module type (%s) for %s" % \
+                           (`type`, name)
 
 
 class Hooks(_Verbose):
@@ -170,17 +170,17 @@ class Hooks(_Verbose):
     def init_frozen(self, name): return imp.init_frozen(name)
     def get_frozen_object(self, name): return imp.get_frozen_object(name)
     def load_source(self, name, filename, file=None):
-       return imp.load_source(name, filename, file)
+        return imp.load_source(name, filename, file)
     def load_compiled(self, name, filename, file=None):
-       return imp.load_compiled(name, filename, file)
+        return imp.load_compiled(name, filename, file)
     def load_dynamic(self, name, filename, file=None):
-       return imp.load_dynamic(name, filename, file)
+        return imp.load_dynamic(name, filename, file)
 
     def add_module(self, name):
-       d = self.modules_dict()
-       if d.has_key(name): return d[name]
-       d[name] = m = self.new_module(name)
-       return m
+        d = self.modules_dict()
+        if d.has_key(name): return d[name]
+        d[name] = m = self.new_module(name)
+        return m
 
     # sys interface
     def modules_dict(self): return sys.modules
@@ -215,61 +215,61 @@ class ModuleLoader(BasicModuleLoader):
     """
 
     def __init__(self, hooks = None, verbose = 0):
-       BasicModuleLoader.__init__(self, verbose)
-       self.hooks = hooks or Hooks(verbose)
+        BasicModuleLoader.__init__(self, verbose)
+        self.hooks = hooks or Hooks(verbose)
 
     def default_path(self):
-       return self.hooks.default_path()
+        return self.hooks.default_path()
 
     def modules_dict(self):
-       return self.hooks.modules_dict()
+        return self.hooks.modules_dict()
 
     def get_hooks(self):
-       return self.hooks
+        return self.hooks
 
     def set_hooks(self, hooks):
-       self.hooks = hooks
+        self.hooks = hooks
 
     def find_builtin_module(self, name):
-       if self.hooks.is_builtin(name):
-           return None, '', ('', '', BUILTIN_MODULE)
-       if self.hooks.is_frozen(name):
-           return None, '', ('', '', FROZEN_MODULE)
-       return None
+        if self.hooks.is_builtin(name):
+            return None, '', ('', '', BUILTIN_MODULE)
+        if self.hooks.is_frozen(name):
+            return None, '', ('', '', FROZEN_MODULE)
+        return None
 
     def find_module_in_dir(self, name, dir):
-       if dir is None:
-           return self.find_builtin_module(name)
-       for info in self.hooks.get_suffixes():
-           suff, mode, type = info
-           fullname = self.hooks.path_join(dir, name+suff)
-           try:
-               fp = self.hooks.openfile(fullname, mode)
-               return fp, fullname, info
-           except self.hooks.openfile_error:
-               pass
-       return None
+        if dir is None:
+            return self.find_builtin_module(name)
+        for info in self.hooks.get_suffixes():
+            suff, mode, type = info
+            fullname = self.hooks.path_join(dir, name+suff)
+            try:
+                fp = self.hooks.openfile(fullname, mode)
+                return fp, fullname, info
+            except self.hooks.openfile_error:
+                pass
+        return None
 
     def load_module(self, name, stuff):
-       file, filename, (suff, mode, type) = stuff
-       try:
-           if type == BUILTIN_MODULE:
-               return self.hooks.init_builtin(name)
-           if type == FROZEN_MODULE:
-               return self.hooks.init_frozen(name)
-           if type == C_EXTENSION:
-               m = self.hooks.load_dynamic(name, filename, file)
-           elif type == PY_SOURCE:
-               m = self.hooks.load_source(name, filename, file)
-           elif type == PY_COMPILED:
-               m = self.hooks.load_compiled(name, filename, file)
-           else:
-               raise ImportError, "Unrecognized module type (%s) for %s" % \
-                     (`type`, name)
-       finally:
-           if file: file.close()
-       m.__file__ = filename
-       return m
+        file, filename, (suff, mode, type) = stuff
+        try:
+            if type == BUILTIN_MODULE:
+                return self.hooks.init_builtin(name)
+            if type == FROZEN_MODULE:
+                return self.hooks.init_frozen(name)
+            if type == C_EXTENSION:
+                m = self.hooks.load_dynamic(name, filename, file)
+            elif type == PY_SOURCE:
+                m = self.hooks.load_source(name, filename, file)
+            elif type == PY_COMPILED:
+                m = self.hooks.load_compiled(name, filename, file)
+            else:
+                raise ImportError, "Unrecognized module type (%s) for %s" % \
+                      (`type`, name)
+        finally:
+            if file: file.close()
+        m.__file__ = filename
+        return m
 
 
 class FancyModuleLoader(ModuleLoader):
@@ -277,22 +277,22 @@ class FancyModuleLoader(ModuleLoader):
     """Fancy module loader -- parses and execs the code itself."""
 
     def load_module(self, name, stuff):
-       file, filename, (suff, mode, type) = stuff
-       if type == FROZEN_MODULE:
-           code = self.hooks.get_frozen_object(name)
-       elif type == PY_COMPILED:
-           import marshal
-           file.seek(8)
-           code = marshal.load(file)
-       elif type == PY_SOURCE:
-           data = file.read()
-           code = compile(data, filename, 'exec')
-       else:
-           return ModuleLoader.load_module(self, name, stuff)
-       m = self.hooks.add_module(name)
-       m.__file__ = filename
-       exec code in m.__dict__
-       return m
+        file, filename, (suff, mode, type) = stuff
+        if type == FROZEN_MODULE:
+            code = self.hooks.get_frozen_object(name)
+        elif type == PY_COMPILED:
+            import marshal
+            file.seek(8)
+            code = marshal.load(file)
+        elif type == PY_SOURCE:
+            data = file.read()
+            code = compile(data, filename, 'exec')
+        else:
+            return ModuleLoader.load_module(self, name, stuff)
+        m = self.hooks.add_module(name)
+        m.__file__ = filename
+        exec code in m.__dict__
+        return m
 
 
 class ModuleImporter(_Verbose):
@@ -305,57 +305,57 @@ class ModuleImporter(_Verbose):
     """
 
     def __init__(self, loader = None, verbose = 0):
-       _Verbose.__init__(self, verbose)
-       self.loader = loader or ModuleLoader(None, verbose)
-       self.modules = self.loader.modules_dict()
+        _Verbose.__init__(self, verbose)
+        self.loader = loader or ModuleLoader(None, verbose)
+        self.modules = self.loader.modules_dict()
 
     def get_loader(self):
-       return self.loader
+        return self.loader
 
     def set_loader(self, loader):
-       self.loader = loader
+        self.loader = loader
 
     def get_hooks(self):
-       return self.loader.get_hooks()
+        return self.loader.get_hooks()
 
     def set_hooks(self, hooks):
-       return self.loader.set_hooks(hooks)
+        return self.loader.set_hooks(hooks)
 
     def import_module(self, name, globals={}, locals={}, fromlist=[]):
-       if self.modules.has_key(name):
-           return self.modules[name] # Fast path
-       stuff = self.loader.find_module(name)
-       if not stuff:
-           raise ImportError, "No module named %s" % name
-       return self.loader.load_module(name, stuff)
+        if self.modules.has_key(name):
+            return self.modules[name] # Fast path
+        stuff = self.loader.find_module(name)
+        if not stuff:
+            raise ImportError, "No module named %s" % name
+        return self.loader.load_module(name, stuff)
 
     def reload(self, module, path = None):
-       name = module.__name__
-       stuff = self.loader.find_module(name, path)
-       if not stuff:
-           raise ImportError, "Module %s not found for reload" % name
-       return self.loader.load_module(name, stuff)
+        name = module.__name__
+        stuff = self.loader.find_module(name, path)
+        if not stuff:
+            raise ImportError, "Module %s not found for reload" % name
+        return self.loader.load_module(name, stuff)
 
     def unload(self, module):
-       del self.modules[module.__name__]
-       # XXX Should this try to clear the module's namespace?
+        del self.modules[module.__name__]
+        # XXX Should this try to clear the module's namespace?
 
     def install(self):
-       self.save_import_module = __builtin__.__import__
-       self.save_reload = __builtin__.reload
-       if not hasattr(__builtin__, 'unload'):
-           __builtin__.unload = None
-       self.save_unload = __builtin__.unload
-       __builtin__.__import__ = self.import_module
-       __builtin__.reload = self.reload
-       __builtin__.unload = self.unload
+        self.save_import_module = __builtin__.__import__
+        self.save_reload = __builtin__.reload
+        if not hasattr(__builtin__, 'unload'):
+            __builtin__.unload = None
+        self.save_unload = __builtin__.unload
+        __builtin__.__import__ = self.import_module
+        __builtin__.reload = self.reload
+        __builtin__.unload = self.unload
 
     def uninstall(self):
-       __builtin__.__import__ = self.save_import_module
-       __builtin__.reload = self.save_reload
-       __builtin__.unload = self.save_unload
-       if not __builtin__.unload:
-           del __builtin__.unload
+        __builtin__.__import__ = self.save_import_module
+        __builtin__.reload = self.save_reload
+        __builtin__.unload = self.save_unload
+        if not __builtin__.unload:
+            del __builtin__.unload
 
 
 default_importer = None
index ba9249d2e448148455805a4d36aa2283713dc861..3cf8a2e7af9a38175c58f7c505ba94a95d7b29ff 100755 (executable)
@@ -63,10 +63,10 @@ def main():
     while 1:
         line = fp.readline()
         if not line: break
-       if string.find(line, '{1, "') > -1:
-           match = strprog.search(line)
-           if match:
-               lines.append("        '" + match.group(1) + "',\n")
+        if string.find(line, '{1, "') > -1:
+            match = strprog.search(line)
+            if match:
+                lines.append("        '" + match.group(1) + "',\n")
     fp.close()
     lines.sort()
 
index 2473f9b79f1c39249a0e5a7dabe227c15d86425a..95c3c231649371a57f2e0f6e6f3bedb3b1aba252 100644 (file)
@@ -16,93 +16,93 @@ def import_hook(name, globals=None, locals=None, fromlist=None):
     q, tail = find_head_package(parent, name)
     m = load_tail(q, tail)
     if not fromlist:
-       return q
+        return q
     if hasattr(m, "__path__"):
-       ensure_fromlist(m, fromlist)
+        ensure_fromlist(m, fromlist)
     return m
 
 def determine_parent(globals):
     if not globals or  not globals.has_key("__name__"):
-       return None
+        return None
     pname = globals['__name__']
     if globals.has_key("__path__"):
-       parent = sys.modules[pname]
-       assert globals is parent.__dict__
-       return parent
+        parent = sys.modules[pname]
+        assert globals is parent.__dict__
+        return parent
     if '.' in pname:
-       i = string.rfind(pname, '.')
-       pname = pname[:i]
-       parent = sys.modules[pname]
-       assert parent.__name__ == pname
-       return parent
+        i = string.rfind(pname, '.')
+        pname = pname[:i]
+        parent = sys.modules[pname]
+        assert parent.__name__ == pname
+        return parent
     return None
 
 def find_head_package(parent, name):
     if '.' in name:
-       i = string.find(name, '.')
-       head = name[:i]
-       tail = name[i+1:]
+        i = string.find(name, '.')
+        head = name[:i]
+        tail = name[i+1:]
     else:
-       head = name
-       tail = ""
+        head = name
+        tail = ""
     if parent:
-       qname = "%s.%s" % (parent.__name__, head)
+        qname = "%s.%s" % (parent.__name__, head)
     else:
-       qname = head
+        qname = head
     q = import_module(head, qname, parent)
     if q: return q, tail
     if parent:
-       qname = head
-       parent = None
-       q = import_module(head, qname, parent)
-       if q: return q, tail
+        qname = head
+        parent = None
+        q = import_module(head, qname, parent)
+        if q: return q, tail
     raise ImportError, "No module named " + qname
 
 def load_tail(q, tail):
     m = q
     while tail:
-       i = string.find(tail, '.')
-       if i < 0: i = len(tail)
-       head, tail = tail[:i], tail[i+1:]
-       mname = "%s.%s" % (m.__name__, head)
-       m = import_module(head, mname, m)
-       if not m:
-           raise ImportError, "No module named " + mname
+        i = string.find(tail, '.')
+        if i < 0: i = len(tail)
+        head, tail = tail[:i], tail[i+1:]
+        mname = "%s.%s" % (m.__name__, head)
+        m = import_module(head, mname, m)
+        if not m:
+            raise ImportError, "No module named " + mname
     return m
 
 def ensure_fromlist(m, fromlist, recursive=0):
     for sub in fromlist:
-       if sub == "*":
-           if not recursive:
-               try:
-                   all = m.__all__
-               except AttributeError:
-                   pass
-               else:
-                   ensure_fromlist(m, all, 1)
-           continue
-       if sub != "*" and not hasattr(m, sub):
-           subname = "%s.%s" % (m.__name__, sub)
-           submod = import_module(sub, subname, m)
-           if not submod:
-               raise ImportError, "No module named " + subname
+        if sub == "*":
+            if not recursive:
+                try:
+                    all = m.__all__
+                except AttributeError:
+                    pass
+                else:
+                    ensure_fromlist(m, all, 1)
+            continue
+        if sub != "*" and not hasattr(m, sub):
+            subname = "%s.%s" % (m.__name__, sub)
+            submod = import_module(sub, subname, m)
+            if not submod:
+                raise ImportError, "No module named " + subname
 
 def import_module(partname, fqname, parent):
     try:
-       return sys.modules[fqname]
+        return sys.modules[fqname]
     except KeyError:
-       pass
+        pass
     try:
-       fp, pathname, stuff = imp.find_module(partname,
-                                             parent and parent.__path__)
+        fp, pathname, stuff = imp.find_module(partname,
+                                              parent and parent.__path__)
     except ImportError:
-       return None
+        return None
     try:
-       m = imp.load_module(fqname, fp, pathname, stuff)
+        m = imp.load_module(fqname, fp, pathname, stuff)
     finally:
-       if fp: fp.close()
+        if fp: fp.close()
     if parent:
-       setattr(parent, partname, m)
+        setattr(parent, partname, m)
     return m
 
 
@@ -110,7 +110,7 @@ def import_module(partname, fqname, parent):
 def reload_hook(module):
     name = module.__name__
     if '.' not in name:
-       return import_module(name, name, None)
+        return import_module(name, name, None)
     i = string.rfind(name, '.')
     pname = name[:i]
     parent = sys.modules[pname]
index 6246fd930d7893a1c416a1dba8705c76a8692b60..d85cabfca5898287a0f27a5eaa9b3d55516b93c1 100644 (file)
@@ -11,21 +11,21 @@ def _group(s):
     if not grouping:return s
     result=""
     while s and grouping:
-       # if grouping is -1, we are done 
-       if grouping[0]==CHAR_MAX:
-           break
-       # 0: re-use last group ad infinitum
-       elif grouping[0]!=0:
-           #process last group
-           group=grouping[0]
-           grouping=grouping[1:]
-       if result:
-           result=s[-group:]+conv['thousands_sep']+result
-       else:
-           result=s[-group:]
-       s=s[:-group]
+        # if grouping is -1, we are done 
+        if grouping[0]==CHAR_MAX:
+            break
+        # 0: re-use last group ad infinitum
+        elif grouping[0]!=0:
+            #process last group
+            group=grouping[0]
+            grouping=grouping[1:]
+        if result:
+            result=s[-group:]+conv['thousands_sep']+result
+        else:
+            result=s[-group:]
+        s=s[:-group]
     if s and result:
-       result=s+conv['thousands_sep']+result
+        result=s+conv['thousands_sep']+result
     return result
 
 def format(f,val,grouping=0):
@@ -35,13 +35,13 @@ def format(f,val,grouping=0):
     result = f % val
     fields = string.splitfields(result,".")
     if grouping:
-       fields[0]=_group(fields[0])
+        fields[0]=_group(fields[0])
     if len(fields)==2:
-       return fields[0]+localeconv()['decimal_point']+fields[1]
+        return fields[0]+localeconv()['decimal_point']+fields[1]
     elif len(fields)==1:
-       return fields[0]
+        return fields[0]
     else:
-       raise Error,"Too many decimal points in result string"
+        raise Error,"Too many decimal points in result string"
     
 def str(val):
     """Convert float to integer, taking the locale into account."""
index db4c599b016171c17e36fdd0106765d549a0e12f..dced58aa56a1c8bd839718eee66caec67f965ccb 100644 (file)
@@ -12,71 +12,71 @@ def url2pathname(pathname):
     #
     tp = urllib.splittype(pathname)[0]
     if tp and tp <> 'file':
-       raise RuntimeError, 'Cannot convert non-local URL to pathname'
+        raise RuntimeError, 'Cannot convert non-local URL to pathname'
     components = string.split(pathname, '/')
     # Remove . and embedded ..
     i = 0
     while i < len(components):
-       if components[i] == '.':
-           del components[i]
-       elif components[i] == '..' and i > 0 and \
-                                 components[i-1] not in ('', '..'):
-           del components[i-1:i+1]
-           i = i-1
-       elif components[i] == '' and i > 0 and components[i-1] <> '':
-           del components[i]
-       else:
-           i = i+1
+        if components[i] == '.':
+            del components[i]
+        elif components[i] == '..' and i > 0 and \
+                                  components[i-1] not in ('', '..'):
+            del components[i-1:i+1]
+            i = i-1
+        elif components[i] == '' and i > 0 and components[i-1] <> '':
+            del components[i]
+        else:
+            i = i+1
     if not components[0]:
-       # Absolute unix path, don't start with colon
-       return string.join(components[1:], ':')
+        # Absolute unix path, don't start with colon
+        return string.join(components[1:], ':')
     else:
-       # relative unix path, start with colon. First replace
-       # leading .. by empty strings (giving ::file)
-       i = 0
-       while i < len(components) and components[i] == '..':
-           components[i] = ''
-           i = i + 1
-       return ':' + string.join(components, ':')
+        # relative unix path, start with colon. First replace
+        # leading .. by empty strings (giving ::file)
+        i = 0
+        while i < len(components) and components[i] == '..':
+            components[i] = ''
+            i = i + 1
+        return ':' + string.join(components, ':')
 
 def pathname2url(pathname):
     "convert mac pathname to /-delimited pathname"
     if '/' in pathname:
-       raise RuntimeError, "Cannot convert pathname containing slashes"
+        raise RuntimeError, "Cannot convert pathname containing slashes"
     components = string.split(pathname, ':')
     # Remove empty first and/or last component
     if components[0] == '':
-       del components[0]
+        del components[0]
     if components[-1] == '':
-       del components[-1]
+        del components[-1]
     # Replace empty string ('::') by .. (will result in '/../' later)
     for i in range(len(components)):
-       if components[i] == '':
-           components[i] = '..'
+        if components[i] == '':
+            components[i] = '..'
     # Truncate names longer than 31 bytes
     components = map(lambda x: x[:31], components)
 
     if os.path.isabs(pathname):
-       return '/' + string.join(components, '/')
+        return '/' + string.join(components, '/')
     else:
-       return string.join(components, '/')
+        return string.join(components, '/')
 
 def test():
     for url in ["index.html",
-               "bar/index.html",
-               "/foo/bar/index.html",
-               "/foo/bar/",
-               "/"]:
-       print `url`, '->', `url2pathname(url)`
+                "bar/index.html",
+                "/foo/bar/index.html",
+                "/foo/bar/",
+                "/"]:
+        print `url`, '->', `url2pathname(url)`
     for path in ["drive:",
-                "drive:dir:",
-                "drive:dir:file",
-                "drive:file",
-                "file",
-                ":file",
-                ":dir:",
-                ":dir:file"]:
-       print `path`, '->', `pathname2url(path)`
+                 "drive:dir:",
+                 "drive:dir:file",
+                 "drive:file",
+                 "file",
+                 ":file",
+                 ":dir:",
+                 ":dir:file"]:
+        print `path`, '->', `pathname2url(path)`
 
 if __name__ == '__main__':
     test()
index 8caa46ddefc51451300fd5f454b9ddc3cf3a16f8..e19a7468283dc6222d04be675765ee69c6ede219 100644 (file)
@@ -15,33 +15,33 @@ def getcaps():
     """
     caps = {}
     for mailcap in listmailcapfiles():
-       try:
-           fp = open(mailcap, 'r')
-       except:
-           continue
-       morecaps = readmailcapfile(fp)
-       fp.close()
-       for key in morecaps.keys():
-           if not caps.has_key(key):
-               caps[key] = morecaps[key]
-           else:
-               caps[key] = caps[key] + morecaps[key]
+        try:
+            fp = open(mailcap, 'r')
+        except:
+            continue
+        morecaps = readmailcapfile(fp)
+        fp.close()
+        for key in morecaps.keys():
+            if not caps.has_key(key):
+                caps[key] = morecaps[key]
+            else:
+                caps[key] = caps[key] + morecaps[key]
     return caps
 
 def listmailcapfiles():
     """Return a list of all mailcap files found on the system."""
     # XXX Actually, this is Unix-specific
     if os.environ.has_key('MAILCAPS'):
-       str = os.environ['MAILCAPS']
-       mailcaps = string.splitfields(str, ':')
+        str = os.environ['MAILCAPS']
+        mailcaps = string.splitfields(str, ':')
     else:
-       if os.environ.has_key('HOME'):
-           home = os.environ['HOME']
-       else:
-           # Don't bother with getpwuid()
-           home = '.' # Last resort
-       mailcaps = [home + '/.mailcap', '/etc/mailcap',
-               '/usr/etc/mailcap', '/usr/local/etc/mailcap']
+        if os.environ.has_key('HOME'):
+            home = os.environ['HOME']
+        else:
+            # Don't bother with getpwuid()
+            home = '.' # Last resort
+        mailcaps = [home + '/.mailcap', '/etc/mailcap',
+                '/usr/etc/mailcap', '/usr/local/etc/mailcap']
     return mailcaps
 
 
@@ -50,69 +50,69 @@ def listmailcapfiles():
 def readmailcapfile(fp):
     caps = {}
     while 1:
-       line = fp.readline()
-       if not line: break
-       # Ignore comments and blank lines
-       if line[0] == '#' or string.strip(line) == '':
-           continue
-       nextline = line
-       # Join continuation lines
-       while nextline[-2:] == '\\\n':
-           nextline = fp.readline()
-           if not nextline: nextline = '\n'
-           line = line[:-2] + nextline
-       # Parse the line
-       key, fields = parseline(line)
-       if not (key and fields):
-           continue
-       # Normalize the key
-       types = string.splitfields(key, '/')
-       for j in range(len(types)):
-           types[j] = string.strip(types[j])
-       key = string.lower(string.joinfields(types, '/'))
-       # Update the database
-       if caps.has_key(key):
-           caps[key].append(fields)
-       else:
-           caps[key] = [fields]
+        line = fp.readline()
+        if not line: break
+        # Ignore comments and blank lines
+        if line[0] == '#' or string.strip(line) == '':
+            continue
+        nextline = line
+        # Join continuation lines
+        while nextline[-2:] == '\\\n':
+            nextline = fp.readline()
+            if not nextline: nextline = '\n'
+            line = line[:-2] + nextline
+        # Parse the line
+        key, fields = parseline(line)
+        if not (key and fields):
+            continue
+        # Normalize the key
+        types = string.splitfields(key, '/')
+        for j in range(len(types)):
+            types[j] = string.strip(types[j])
+        key = string.lower(string.joinfields(types, '/'))
+        # Update the database
+        if caps.has_key(key):
+            caps[key].append(fields)
+        else:
+            caps[key] = [fields]
     return caps
 
 def parseline(line):
     fields = []
     i, n = 0, len(line)
     while i < n:
-       field, i = parsefield(line, i, n)
-       fields.append(field)
-       i = i+1 # Skip semicolon
+        field, i = parsefield(line, i, n)
+        fields.append(field)
+        i = i+1 # Skip semicolon
     if len(fields) < 2:
-       return None, None
+        return None, None
     key, view, rest = fields[0], fields[1], fields[2:]
     fields = {'view': view}
     for field in rest:
-       i = string.find(field, '=')
-       if i < 0:
-           fkey = field
-           fvalue = ""
-       else:
-           fkey = string.strip(field[:i])
-           fvalue = string.strip(field[i+1:])
-       if fields.has_key(fkey):
-           # Ignore it
-           pass
-       else:
-           fields[fkey] = fvalue
+        i = string.find(field, '=')
+        if i < 0:
+            fkey = field
+            fvalue = ""
+        else:
+            fkey = string.strip(field[:i])
+            fvalue = string.strip(field[i+1:])
+        if fields.has_key(fkey):
+            # Ignore it
+            pass
+        else:
+            fields[fkey] = fvalue
     return key, fields
 
 def parsefield(line, i, n):
     start = i
     while i < n:
-       c = line[i]
-       if c == ';':
-           break
-       elif c == '\\':
-           i = i+2
-       else:
-           i = i+1
+        c = line[i]
+        if c == ';':
+            break
+        elif c == '\\':
+            i = i+2
+        else:
+            i = i+1
     return string.strip(line[start:i]), i
 
 
@@ -130,24 +130,24 @@ def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
     entries = lookup(caps, MIMEtype, key)
     # XXX This code should somehow check for the needsterminal flag. 
     for e in entries:
-       if e.has_key('test'):
-           test = subst(e['test'], filename, plist)
-           if test and os.system(test) != 0:
-               continue
-       command = subst(e[key], MIMEtype, filename, plist)
-       return command, e
+        if e.has_key('test'):
+            test = subst(e['test'], filename, plist)
+            if test and os.system(test) != 0:
+                continue
+        command = subst(e[key], MIMEtype, filename, plist)
+        return command, e
     return None, None
 
 def lookup(caps, MIMEtype, key=None):
     entries = []
     if caps.has_key(MIMEtype):
-       entries = entries + caps[MIMEtype]
+        entries = entries + caps[MIMEtype]
     MIMEtypes = string.splitfields(MIMEtype, '/')
     MIMEtype = MIMEtypes[0] + '/*'
     if caps.has_key(MIMEtype):
-       entries = entries + caps[MIMEtype]
+        entries = entries + caps[MIMEtype]
     if key is not None:
-       entries = filter(lambda e, key=key: e.has_key(key), entries)
+        entries = filter(lambda e, key=key: e.has_key(key), entries)
     return entries
 
 def subst(field, MIMEtype, filename, plist=[]):
@@ -155,39 +155,39 @@ def subst(field, MIMEtype, filename, plist=[]):
     res = ''
     i, n = 0, len(field)
     while i < n:
-       c = field[i]; i = i+1
-       if c <> '%':
-           if c == '\\':
-               c = field[i:i+1]; i = i+1
-           res = res + c
-       else:
-           c = field[i]; i = i+1
-           if c == '%':
-               res = res + c
-           elif c == 's':
-               res = res + filename
-           elif c == 't':
-               res = res + MIMEtype
-           elif c == '{':
-               start = i
-               while i < n and field[i] <> '}':
-                   i = i+1
-               name = field[start:i]
-               i = i+1
-               res = res + findparam(name, plist)
-           # XXX To do:
-           # %n == number of parts if type is multipart/*
-           # %F == list of alternating type and filename for parts
-           else:
-               res = res + '%' + c
+        c = field[i]; i = i+1
+        if c <> '%':
+            if c == '\\':
+                c = field[i:i+1]; i = i+1
+            res = res + c
+        else:
+            c = field[i]; i = i+1
+            if c == '%':
+                res = res + c
+            elif c == 's':
+                res = res + filename
+            elif c == 't':
+                res = res + MIMEtype
+            elif c == '{':
+                start = i
+                while i < n and field[i] <> '}':
+                    i = i+1
+                name = field[start:i]
+                i = i+1
+                res = res + findparam(name, plist)
+            # XXX To do:
+            # %n == number of parts if type is multipart/*
+            # %F == list of alternating type and filename for parts
+            else:
+                res = res + '%' + c
     return res
 
 def findparam(name, plist):
     name = string.lower(name) + '='
     n = len(name)
     for p in plist:
-       if string.lower(p[:n]) == name:
-           return p[n:]
+        if string.lower(p[:n]) == name:
+            return p[n:]
     return ''
 
 
@@ -197,23 +197,23 @@ def test():
     import sys
     caps = getcaps()
     if not sys.argv[1:]:
-       show(caps)
-       return
+        show(caps)
+        return
     for i in range(1, len(sys.argv), 2):
-       args = sys.argv[i:i+2]
-       if len(args) < 2:
-           print "usage: mailcap [MIMEtype file] ..."
-           return
-       MIMEtype = args[0]
-       file = args[1]
-       command, e = findmatch(caps, MIMEtype, 'view', file)
-       if not command:
-           print "No viewer found for", type
-       else:
-           print "Executing:", command
-           sts = os.system(command)
-           if sts:
-               print "Exit status:", sts
+        args = sys.argv[i:i+2]
+        if len(args) < 2:
+            print "usage: mailcap [MIMEtype file] ..."
+            return
+        MIMEtype = args[0]
+        file = args[1]
+        command, e = findmatch(caps, MIMEtype, 'view', file)
+        if not command:
+            print "No viewer found for", type
+        else:
+            print "Executing:", command
+            sts = os.system(command)
+            if sts:
+                print "Exit status:", sts
 
 def show(caps):
     print "Mailcap files:"
@@ -225,14 +225,14 @@ def show(caps):
     ckeys = caps.keys()
     ckeys.sort()
     for type in ckeys:
-       print type
-       entries = caps[type]
-       for e in entries:
-           keys = e.keys()
-           keys.sort()
-           for k in keys:
-               print "  %-15s" % k, e[k]
-           print
+        print type
+        entries = caps[type]
+        for e in entries:
+            keys = e.keys()
+            keys.sort()
+            for k in keys:
+                print "  %-15s" % k, e[k]
+            print
 
 if __name__ == '__main__':
     test()
index dc9871234c0ee3ca74891f2c65d7b6fdc1466b06..85bf2cd3f6178de015f3373a789c4ef0ad5d1dbb 100644 (file)
@@ -96,142 +96,142 @@ class MH:
 
     # Constructor
     def __init__(self, path = None, profile = None):
-       if not profile: profile = MH_PROFILE
-       self.profile = os.path.expanduser(profile)
-       if not path: path = self.getprofile('Path')
-       if not path: path = PATH
-       if not os.path.isabs(path) and path[0] != '~':
-           path = os.path.join('~', path)
-       path = os.path.expanduser(path)
-       if not os.path.isdir(path): raise Error, 'MH() path not found'
-       self.path = path
+        if not profile: profile = MH_PROFILE
+        self.profile = os.path.expanduser(profile)
+        if not path: path = self.getprofile('Path')
+        if not path: path = PATH
+        if not os.path.isabs(path) and path[0] != '~':
+            path = os.path.join('~', path)
+        path = os.path.expanduser(path)
+        if not os.path.isdir(path): raise Error, 'MH() path not found'
+        self.path = path
 
     # String representation
     def __repr__(self):
-       return 'MH(%s, %s)' % (`self.path`, `self.profile`)
+        return 'MH(%s, %s)' % (`self.path`, `self.profile`)
 
     # Routine to print an error.  May be overridden by a derived class
     def error(self, msg, *args):
-       sys.stderr.write('MH error: %s\n' % (msg % args))
+        sys.stderr.write('MH error: %s\n' % (msg % args))
 
     # Return a profile entry, None if not found
     def getprofile(self, key):
-       return pickline(self.profile, key)
+        return pickline(self.profile, key)
 
     # Return the path (the name of the collection's directory)
     def getpath(self):
-       return self.path
+        return self.path
 
     # Return the name of the current folder
     def getcontext(self):
-       context = pickline(os.path.join(self.getpath(), 'context'),
-                 'Current-Folder')
-       if not context: context = 'inbox'
-       return context
+        context = pickline(os.path.join(self.getpath(), 'context'),
+                  'Current-Folder')
+        if not context: context = 'inbox'
+        return context
 
     # Set the name of the current folder
     def setcontext(self, context):
-       fn = os.path.join(self.getpath(), 'context')
-       f = open(fn, "w")
-       f.write("Current-Folder: %s\n" % context)
-       f.close()
+        fn = os.path.join(self.getpath(), 'context')
+        f = open(fn, "w")
+        f.write("Current-Folder: %s\n" % context)
+        f.close()
 
     # Return the names of the top-level folders
     def listfolders(self):
-       folders = []
-       path = self.getpath()
-       for name in os.listdir(path):
-           fullname = os.path.join(path, name)
-           if os.path.isdir(fullname):
-               folders.append(name)
-       folders.sort()
-       return folders
+        folders = []
+        path = self.getpath()
+        for name in os.listdir(path):
+            fullname = os.path.join(path, name)
+            if os.path.isdir(fullname):
+                folders.append(name)
+        folders.sort()
+        return folders
 
     # Return the names of the subfolders in a given folder
     # (prefixed with the given folder name)
     def listsubfolders(self, name):
-       fullname = os.path.join(self.path, name)
-       # Get the link count so we can avoid listing folders
-       # that have no subfolders.
-       st = os.stat(fullname)
-       nlinks = st[ST_NLINK]
-       if nlinks <= 2:
-           return []
-       subfolders = []
-       subnames = os.listdir(fullname)
-       for subname in subnames:
-           fullsubname = os.path.join(fullname, subname)
-           if os.path.isdir(fullsubname):
-               name_subname = os.path.join(name, subname)
-               subfolders.append(name_subname)
-               # Stop looking for subfolders when
-               # we've seen them all
-               nlinks = nlinks - 1
-               if nlinks <= 2:
-                   break
-       subfolders.sort()
-       return subfolders
+        fullname = os.path.join(self.path, name)
+        # Get the link count so we can avoid listing folders
+        # that have no subfolders.
+        st = os.stat(fullname)
+        nlinks = st[ST_NLINK]
+        if nlinks <= 2:
+            return []
+        subfolders = []
+        subnames = os.listdir(fullname)
+        for subname in subnames:
+            fullsubname = os.path.join(fullname, subname)
+            if os.path.isdir(fullsubname):
+                name_subname = os.path.join(name, subname)
+                subfolders.append(name_subname)
+                # Stop looking for subfolders when
+                # we've seen them all
+                nlinks = nlinks - 1
+                if nlinks <= 2:
+                    break
+        subfolders.sort()
+        return subfolders
 
     # Return the names of all folders, including subfolders, recursively
     def listallfolders(self):
-       return self.listallsubfolders('')
+        return self.listallsubfolders('')
 
     # Return the names of subfolders in a given folder, recursively
     def listallsubfolders(self, name):
-       fullname = os.path.join(self.path, name)
-       # Get the link count so we can avoid listing folders
-       # that have no subfolders.
-       st = os.stat(fullname)
-       nlinks = st[ST_NLINK]
-       if nlinks <= 2:
-           return []
-       subfolders = []
-       subnames = os.listdir(fullname)
-       for subname in subnames:
-           if subname[0] == ',' or isnumeric(subname): continue
-           fullsubname = os.path.join(fullname, subname)
-           if os.path.isdir(fullsubname):
-               name_subname = os.path.join(name, subname)
-               subfolders.append(name_subname)
-               if not os.path.islink(fullsubname):
-                   subsubfolders = self.listallsubfolders(
-                             name_subname)
-                   subfolders = subfolders + subsubfolders
-               # Stop looking for subfolders when
-               # we've seen them all
-               nlinks = nlinks - 1
-               if nlinks <= 2:
-                   break
-       subfolders.sort()
-       return subfolders
+        fullname = os.path.join(self.path, name)
+        # Get the link count so we can avoid listing folders
+        # that have no subfolders.
+        st = os.stat(fullname)
+        nlinks = st[ST_NLINK]
+        if nlinks <= 2:
+            return []
+        subfolders = []
+        subnames = os.listdir(fullname)
+        for subname in subnames:
+            if subname[0] == ',' or isnumeric(subname): continue
+            fullsubname = os.path.join(fullname, subname)
+            if os.path.isdir(fullsubname):
+                name_subname = os.path.join(name, subname)
+                subfolders.append(name_subname)
+                if not os.path.islink(fullsubname):
+                    subsubfolders = self.listallsubfolders(
+                              name_subname)
+                    subfolders = subfolders + subsubfolders
+                # Stop looking for subfolders when
+                # we've seen them all
+                nlinks = nlinks - 1
+                if nlinks <= 2:
+                    break
+        subfolders.sort()
+        return subfolders
 
     # Return a new Folder object for the named folder
     def openfolder(self, name):
-       return Folder(self, name)
+        return Folder(self, name)
 
     # Create a new folder.  This raises os.error if the folder
     # cannot be created
     def makefolder(self, name):
-       protect = pickline(self.profile, 'Folder-Protect')
-       if protect and isnumeric(protect):
-           mode = string.atoi(protect, 8)
-       else:
-           mode = FOLDER_PROTECT
-       os.mkdir(os.path.join(self.getpath(), name), mode)
+        protect = pickline(self.profile, 'Folder-Protect')
+        if protect and isnumeric(protect):
+            mode = string.atoi(protect, 8)
+        else:
+            mode = FOLDER_PROTECT
+        os.mkdir(os.path.join(self.getpath(), name), mode)
 
     # Delete a folder.  This removes files in the folder but not
     # subdirectories.  If deleting the folder itself fails it
     # raises os.error
     def deletefolder(self, name):
-       fullname = os.path.join(self.getpath(), name)
-       for subname in os.listdir(fullname):
-           fullsubname = os.path.join(fullname, subname)
-           try:
-               os.unlink(fullsubname)
-           except os.error:
-               self.error('%s not deleted, continuing...' %
-                         fullsubname)
-       os.rmdir(fullname)
+        fullname = os.path.join(self.getpath(), name)
+        for subname in os.listdir(fullname):
+            fullsubname = os.path.join(fullname, subname)
+            try:
+                os.unlink(fullsubname)
+            except os.error:
+                self.error('%s not deleted, continuing...' %
+                          fullsubname)
+        os.rmdir(fullname)
 
 
 # Class representing a particular folder
@@ -244,522 +244,522 @@ class Folder:
 
     # Constructor
     def __init__(self, mh, name):
-       self.mh = mh
-       self.name = name
-       if not os.path.isdir(self.getfullname()):
-           raise Error, 'no folder %s' % name
+        self.mh = mh
+        self.name = name
+        if not os.path.isdir(self.getfullname()):
+            raise Error, 'no folder %s' % name
 
     # String representation
     def __repr__(self):
-       return 'Folder(%s, %s)' % (`self.mh`, `self.name`)
+        return 'Folder(%s, %s)' % (`self.mh`, `self.name`)
 
     # Error message handler
     def error(self, *args):
-       apply(self.mh.error, args)
+        apply(self.mh.error, args)
 
     # Return the full pathname of the folder
     def getfullname(self):
-       return os.path.join(self.mh.path, self.name)
+        return os.path.join(self.mh.path, self.name)
 
     # Return the full pathname of the folder's sequences file
     def getsequencesfilename(self):
-       return os.path.join(self.getfullname(), MH_SEQUENCES)
+        return os.path.join(self.getfullname(), MH_SEQUENCES)
 
     # Return the full pathname of a message in the folder
     def getmessagefilename(self, n):
-       return os.path.join(self.getfullname(), str(n))
+        return os.path.join(self.getfullname(), str(n))
 
     # Return list of direct subfolders
     def listsubfolders(self):
-       return self.mh.listsubfolders(self.name)
+        return self.mh.listsubfolders(self.name)
 
     # Return list of all subfolders
     def listallsubfolders(self):
-       return self.mh.listallsubfolders(self.name)
+        return self.mh.listallsubfolders(self.name)
 
     # Return the list of messages currently present in the folder.
     # As a side effect, set self.last to the last message (or 0)
     def listmessages(self):
-       messages = []
-       match = numericprog.match
-       append = messages.append
-       for name in os.listdir(self.getfullname()):
-           if match(name) >= 0:
-               append(name)
-       messages = map(string.atoi, messages)
-       messages.sort()
-       if messages:
-           self.last = messages[-1]
-       else:
-           self.last = 0
-       return messages
+        messages = []
+        match = numericprog.match
+        append = messages.append
+        for name in os.listdir(self.getfullname()):
+            if match(name) >= 0:
+                append(name)
+        messages = map(string.atoi, messages)
+        messages.sort()
+        if messages:
+            self.last = messages[-1]
+        else:
+            self.last = 0
+        return messages
 
     # Return the set of sequences for the folder
     def getsequences(self):
-       sequences = {}
-       fullname = self.getsequencesfilename()
-       try:
-           f = open(fullname, 'r')
-       except IOError:
-           return sequences
-       while 1:
-           line = f.readline()
-           if not line: break
-           fields = string.splitfields(line, ':')
-           if len(fields) <> 2:
-               self.error('bad sequence in %s: %s' %
-                         (fullname, string.strip(line)))
-           key = string.strip(fields[0])
-           value = IntSet(string.strip(fields[1]), ' ').tolist()
-           sequences[key] = value
-       return sequences
+        sequences = {}
+        fullname = self.getsequencesfilename()
+        try:
+            f = open(fullname, 'r')
+        except IOError:
+            return sequences
+        while 1:
+            line = f.readline()
+            if not line: break
+            fields = string.splitfields(line, ':')
+            if len(fields) <> 2:
+                self.error('bad sequence in %s: %s' %
+                          (fullname, string.strip(line)))
+            key = string.strip(fields[0])
+            value = IntSet(string.strip(fields[1]), ' ').tolist()
+            sequences[key] = value
+        return sequences
 
     # Write the set of sequences back to the folder
     def putsequences(self, sequences):
-       fullname = self.getsequencesfilename()
-       f = None
-       for key in sequences.keys():
-           s = IntSet('', ' ')
-           s.fromlist(sequences[key])
-           if not f: f = open(fullname, 'w')
-           f.write('%s: %s\n' % (key, s.tostring()))
-       if not f:
-           try:
-               os.unlink(fullname)
-           except os.error:
-               pass
-       else:
-           f.close()
+        fullname = self.getsequencesfilename()
+        f = None
+        for key in sequences.keys():
+            s = IntSet('', ' ')
+            s.fromlist(sequences[key])
+            if not f: f = open(fullname, 'w')
+            f.write('%s: %s\n' % (key, s.tostring()))
+        if not f:
+            try:
+                os.unlink(fullname)
+            except os.error:
+                pass
+        else:
+            f.close()
 
     # Return the current message.  Raise KeyError when there is none
     def getcurrent(self):
-       seqs = self.getsequences()
-       try:
-           return max(seqs['cur'])
-       except (ValueError, KeyError):
-           raise Error, "no cur message"
+        seqs = self.getsequences()
+        try:
+            return max(seqs['cur'])
+        except (ValueError, KeyError):
+            raise Error, "no cur message"
 
     # Set the current message
     def setcurrent(self, n):
-       updateline(self.getsequencesfilename(), 'cur', str(n), 0)
+        updateline(self.getsequencesfilename(), 'cur', str(n), 0)
 
     # Parse an MH sequence specification into a message list.
     # Attempt to mimic mh-sequence(5) as close as possible.
     # Also attempt to mimic observed behavior regarding which
     # conditions cause which error messages
     def parsesequence(self, seq):
-       # XXX Still not complete (see mh-format(5)).
-       # Missing are:
-       # - 'prev', 'next' as count
-       # - Sequence-Negation option
-       all = self.listmessages()
-       # Observed behavior: test for empty folder is done first
-       if not all:
-           raise Error, "no messages in %s" % self.name
-       # Common case first: all is frequently the default
-       if seq == 'all':
-           return all
-       # Test for X:Y before X-Y because 'seq:-n' matches both
-       i = string.find(seq, ':')
-       if i >= 0:
-           head, dir, tail = seq[:i], '', seq[i+1:]
-           if tail[:1] in '-+':
-               dir, tail = tail[:1], tail[1:]
-           if not isnumeric(tail):
-               raise Error, "bad message list %s" % seq
-           try:
-               count = string.atoi(tail)
-           except (ValueError, OverflowError):
-               # Can't use sys.maxint because of i+count below
-               count = len(all)
-           try:
-               anchor = self._parseindex(head, all)
-           except Error, msg:
-               seqs = self.getsequences()
-               if not seqs.has_key(head):
-                   if not msg:
-                       msg = "bad message list %s" % seq
-                   raise Error, msg, sys.exc_info()[2]
-               msgs = seqs[head]
-               if not msgs:
-                   raise Error, "sequence %s empty" % head
-               if dir == '-':
-                   return msgs[-count:]
-               else:
-                   return msgs[:count]
-           else:
-               if not dir:
-                   if head in ('prev', 'last'):
-                       dir = '-'
-               if dir == '-':
-                   i = bisect(all, anchor)
-                   return all[max(0, i-count):i]
-               else:
-                   i = bisect(all, anchor-1)
-                   return all[i:i+count]
-       # Test for X-Y next
-       i = string.find(seq, '-')
-       if i >= 0:
-           begin = self._parseindex(seq[:i], all)
-           end = self._parseindex(seq[i+1:], all)
-           i = bisect(all, begin-1)
-           j = bisect(all, end)
-           r = all[i:j]
-           if not r:
-               raise Error, "bad message list %s" % seq
-           return r
-       # Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
-       try:
-           n = self._parseindex(seq, all)
-       except Error, msg:
-           seqs = self.getsequences()
-           if not seqs.has_key(seq):
-               if not msg:
-                   msg = "bad message list %s" % seq
-               raise Error, msg
-           return seqs[seq]
-       else:
-           if n not in all:
-               if isnumeric(seq):
-                   raise Error, "message %d doesn't exist" % n
-               else:
-                   raise Error, "no %s message" % seq
-           else:
-               return [n]
+        # XXX Still not complete (see mh-format(5)).
+        # Missing are:
+        # - 'prev', 'next' as count
+        # - Sequence-Negation option
+        all = self.listmessages()
+        # Observed behavior: test for empty folder is done first
+        if not all:
+            raise Error, "no messages in %s" % self.name
+        # Common case first: all is frequently the default
+        if seq == 'all':
+            return all
+        # Test for X:Y before X-Y because 'seq:-n' matches both
+        i = string.find(seq, ':')
+        if i >= 0:
+            head, dir, tail = seq[:i], '', seq[i+1:]
+            if tail[:1] in '-+':
+                dir, tail = tail[:1], tail[1:]
+            if not isnumeric(tail):
+                raise Error, "bad message list %s" % seq
+            try:
+                count = string.atoi(tail)
+            except (ValueError, OverflowError):
+                # Can't use sys.maxint because of i+count below
+                count = len(all)
+            try:
+                anchor = self._parseindex(head, all)
+            except Error, msg:
+                seqs = self.getsequences()
+                if not seqs.has_key(head):
+                    if not msg:
+                        msg = "bad message list %s" % seq
+                    raise Error, msg, sys.exc_info()[2]
+                msgs = seqs[head]
+                if not msgs:
+                    raise Error, "sequence %s empty" % head
+                if dir == '-':
+                    return msgs[-count:]
+                else:
+                    return msgs[:count]
+            else:
+                if not dir:
+                    if head in ('prev', 'last'):
+                        dir = '-'
+                if dir == '-':
+                    i = bisect(all, anchor)
+                    return all[max(0, i-count):i]
+                else:
+                    i = bisect(all, anchor-1)
+                    return all[i:i+count]
+        # Test for X-Y next
+        i = string.find(seq, '-')
+        if i >= 0:
+            begin = self._parseindex(seq[:i], all)
+            end = self._parseindex(seq[i+1:], all)
+            i = bisect(all, begin-1)
+            j = bisect(all, end)
+            r = all[i:j]
+            if not r:
+                raise Error, "bad message list %s" % seq
+            return r
+        # Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
+        try:
+            n = self._parseindex(seq, all)
+        except Error, msg:
+            seqs = self.getsequences()
+            if not seqs.has_key(seq):
+                if not msg:
+                    msg = "bad message list %s" % seq
+                raise Error, msg
+            return seqs[seq]
+        else:
+            if n not in all:
+                if isnumeric(seq):
+                    raise Error, "message %d doesn't exist" % n
+                else:
+                    raise Error, "no %s message" % seq
+            else:
+                return [n]
 
     # Internal: parse a message number (or cur, first, etc.)
     def _parseindex(self, seq, all):
-       if isnumeric(seq):
-           try:
-               return string.atoi(seq)
-           except (OverflowError, ValueError):
-               return sys.maxint
-       if seq in ('cur', '.'):
-           return self.getcurrent()
-       if seq == 'first':
-           return all[0]
-       if seq == 'last':
-           return all[-1]
-       if seq == 'next':
-           n = self.getcurrent()
-           i = bisect(all, n)
-           try:
-               return all[i]
-           except IndexError:
-               raise Error, "no next message"
-       if seq == 'prev':
-           n = self.getcurrent()
-           i = bisect(all, n-1)
-           if i == 0:
-               raise Error, "no prev message"
-           try:
-               return all[i-1]
-           except IndexError:
-               raise Error, "no prev message"
-       raise Error, None
+        if isnumeric(seq):
+            try:
+                return string.atoi(seq)
+            except (OverflowError, ValueError):
+                return sys.maxint
+        if seq in ('cur', '.'):
+            return self.getcurrent()
+        if seq == 'first':
+            return all[0]
+        if seq == 'last':
+            return all[-1]
+        if seq == 'next':
+            n = self.getcurrent()
+            i = bisect(all, n)
+            try:
+                return all[i]
+            except IndexError:
+                raise Error, "no next message"
+        if seq == 'prev':
+            n = self.getcurrent()
+            i = bisect(all, n-1)
+            if i == 0:
+                raise Error, "no prev message"
+            try:
+                return all[i-1]
+            except IndexError:
+                raise Error, "no prev message"
+        raise Error, None
 
     # Open a message -- returns a Message object
     def openmessage(self, n):
-       return Message(self, n)
+        return Message(self, n)
 
     # Remove one or more messages -- may raise os.error
     def removemessages(self, list):
-       errors = []
-       deleted = []
-       for n in list:
-           path = self.getmessagefilename(n)
-           commapath = self.getmessagefilename(',' + str(n))
-           try:
-               os.unlink(commapath)
-           except os.error:
-               pass
-           try:
-               os.rename(path, commapath)
-           except os.error, msg:
-               errors.append(msg)
-           else:
-               deleted.append(n)
-       if deleted:
-           self.removefromallsequences(deleted)
-       if errors:
-           if len(errors) == 1:
-               raise os.error, errors[0]
-           else:
-               raise os.error, ('multiple errors:', errors)
+        errors = []
+        deleted = []
+        for n in list:
+            path = self.getmessagefilename(n)
+            commapath = self.getmessagefilename(',' + str(n))
+            try:
+                os.unlink(commapath)
+            except os.error:
+                pass
+            try:
+                os.rename(path, commapath)
+            except os.error, msg:
+                errors.append(msg)
+            else:
+                deleted.append(n)
+        if deleted:
+            self.removefromallsequences(deleted)
+        if errors:
+            if len(errors) == 1:
+                raise os.error, errors[0]
+            else:
+                raise os.error, ('multiple errors:', errors)
 
     # Refile one or more messages -- may raise os.error.
     # 'tofolder' is an open folder object
     def refilemessages(self, list, tofolder, keepsequences=0):
-       errors = []
-       refiled = {}
-       for n in list:
-           ton = tofolder.getlast() + 1
-           path = self.getmessagefilename(n)
-           topath = tofolder.getmessagefilename(ton)
-           try:
-               os.rename(path, topath)
-           except os.error:
-               # Try copying
-               try:
-                   shutil.copy2(path, topath)
-                   os.unlink(path)
-               except (IOError, os.error), msg:
-                   errors.append(msg)
-                   try:
-                       os.unlink(topath)
-                   except os.error:
-                       pass
-                   continue
-           tofolder.setlast(ton)
-           refiled[n] = ton
-       if refiled:
-           if keepsequences:
-               tofolder._copysequences(self, refiled.items())
-           self.removefromallsequences(refiled.keys())
-       if errors:
-           if len(errors) == 1:
-               raise os.error, errors[0]
-           else:
-               raise os.error, ('multiple errors:', errors)
+        errors = []
+        refiled = {}
+        for n in list:
+            ton = tofolder.getlast() + 1
+            path = self.getmessagefilename(n)
+            topath = tofolder.getmessagefilename(ton)
+            try:
+                os.rename(path, topath)
+            except os.error:
+                # Try copying
+                try:
+                    shutil.copy2(path, topath)
+                    os.unlink(path)
+                except (IOError, os.error), msg:
+                    errors.append(msg)
+                    try:
+                        os.unlink(topath)
+                    except os.error:
+                        pass
+                    continue
+            tofolder.setlast(ton)
+            refiled[n] = ton
+        if refiled:
+            if keepsequences:
+                tofolder._copysequences(self, refiled.items())
+            self.removefromallsequences(refiled.keys())
+        if errors:
+            if len(errors) == 1:
+                raise os.error, errors[0]
+            else:
+                raise os.error, ('multiple errors:', errors)
 
     # Helper for refilemessages() to copy sequences
     def _copysequences(self, fromfolder, refileditems):
-       fromsequences = fromfolder.getsequences()
-       tosequences = self.getsequences()
-       changed = 0
-       for name, seq in fromsequences.items():
-           try:
-               toseq = tosequences[name]
-               new = 0
-           except:
-               toseq = []
-               new = 1
-           for fromn, ton in refileditems:
-               if fromn in seq:
-                   toseq.append(ton)
-                   changed = 1
-           if new and toseq:
-               tosequences[name] = toseq
-       if changed:
-           self.putsequences(tosequences)
+        fromsequences = fromfolder.getsequences()
+        tosequences = self.getsequences()
+        changed = 0
+        for name, seq in fromsequences.items():
+            try:
+                toseq = tosequences[name]
+                new = 0
+            except:
+                toseq = []
+                new = 1
+            for fromn, ton in refileditems:
+                if fromn in seq:
+                    toseq.append(ton)
+                    changed = 1
+            if new and toseq:
+                tosequences[name] = toseq
+        if changed:
+            self.putsequences(tosequences)
 
     # Move one message over a specific destination message,
     # which may or may not already exist.
     def movemessage(self, n, tofolder, ton):
-       path = self.getmessagefilename(n)
-       # Open it to check that it exists
-       f = open(path)
-       f.close()
-       del f
-       topath = tofolder.getmessagefilename(ton)
-       backuptopath = tofolder.getmessagefilename(',%d' % ton)
-       try:
-           os.rename(topath, backuptopath)
-       except os.error:
-           pass
-       try:
-           os.rename(path, topath)
-       except os.error:
-           # Try copying
-           ok = 0
-           try:
-               tofolder.setlast(None)
-               shutil.copy2(path, topath)
-               ok = 1
-           finally:
-               if not ok:
-                   try:
-                       os.unlink(topath)
-                   except os.error:
-                       pass
-           os.unlink(path)
-       self.removefromallsequences([n])
+        path = self.getmessagefilename(n)
+        # Open it to check that it exists
+        f = open(path)
+        f.close()
+        del f
+        topath = tofolder.getmessagefilename(ton)
+        backuptopath = tofolder.getmessagefilename(',%d' % ton)
+        try:
+            os.rename(topath, backuptopath)
+        except os.error:
+            pass
+        try:
+            os.rename(path, topath)
+        except os.error:
+            # Try copying
+            ok = 0
+            try:
+                tofolder.setlast(None)
+                shutil.copy2(path, topath)
+                ok = 1
+            finally:
+                if not ok:
+                    try:
+                        os.unlink(topath)
+                    except os.error:
+                        pass
+            os.unlink(path)
+        self.removefromallsequences([n])
 
     # Copy one message over a specific destination message,
     # which may or may not already exist.
     def copymessage(self, n, tofolder, ton):
-       path = self.getmessagefilename(n)
-       # Open it to check that it exists
-       f = open(path)
-       f.close()
-       del f
-       topath = tofolder.getmessagefilename(ton)
-       backuptopath = tofolder.getmessagefilename(',%d' % ton)
-       try:
-           os.rename(topath, backuptopath)
-       except os.error:
-           pass
-       ok = 0
-       try:
-           tofolder.setlast(None)
-           shutil.copy2(path, topath)
-           ok = 1
-       finally:
-           if not ok:
-               try:
-                   os.unlink(topath)
-               except os.error:
-                   pass
+        path = self.getmessagefilename(n)
+        # Open it to check that it exists
+        f = open(path)
+        f.close()
+        del f
+        topath = tofolder.getmessagefilename(ton)
+        backuptopath = tofolder.getmessagefilename(',%d' % ton)
+        try:
+            os.rename(topath, backuptopath)
+        except os.error:
+            pass
+        ok = 0
+        try:
+            tofolder.setlast(None)
+            shutil.copy2(path, topath)
+            ok = 1
+        finally:
+            if not ok:
+                try:
+                    os.unlink(topath)
+                except os.error:
+                    pass
 
     # Create a message, with text from the open file txt.
     def createmessage(self, n, txt):
-       path = self.getmessagefilename(n)
-       backuppath = self.getmessagefilename(',%d' % n)
-       try:
-           os.rename(path, backuppath)
-       except os.error:
-           pass
-       ok = 0
-       BUFSIZE = 16*1024
-       try:
-           f = open(path, "w")
-           while 1:
-               buf = txt.read(BUFSIZE)
-               if not buf:
-                   break
-               f.write(buf)
-           f.close()
-           ok = 1
-       finally:
-           if not ok:
-               try:
-                   os.unlink(path)
-               except os.error:
-                   pass
+        path = self.getmessagefilename(n)
+        backuppath = self.getmessagefilename(',%d' % n)
+        try:
+            os.rename(path, backuppath)
+        except os.error:
+            pass
+        ok = 0
+        BUFSIZE = 16*1024
+        try:
+            f = open(path, "w")
+            while 1:
+                buf = txt.read(BUFSIZE)
+                if not buf:
+                    break
+                f.write(buf)
+            f.close()
+            ok = 1
+        finally:
+            if not ok:
+                try:
+                    os.unlink(path)
+                except os.error:
+                    pass
 
     # Remove one or more messages from all sequeuces (including last)
     # -- but not from 'cur'!!!
     def removefromallsequences(self, list):
-       if hasattr(self, 'last') and self.last in list:
-           del self.last
-       sequences = self.getsequences()
-       changed = 0
-       for name, seq in sequences.items():
-           if name == 'cur':
-               continue
-           for n in list:
-               if n in seq:
-                   seq.remove(n)
-                   changed = 1
-                   if not seq:
-                       del sequences[name]
-       if changed:
-           self.putsequences(sequences)
+        if hasattr(self, 'last') and self.last in list:
+            del self.last
+        sequences = self.getsequences()
+        changed = 0
+        for name, seq in sequences.items():
+            if name == 'cur':
+                continue
+            for n in list:
+                if n in seq:
+                    seq.remove(n)
+                    changed = 1
+                    if not seq:
+                        del sequences[name]
+        if changed:
+            self.putsequences(sequences)
 
     # Return the last message number
     def getlast(self):
-       if not hasattr(self, 'last'):
-           messages = self.listmessages()
-       return self.last
+        if not hasattr(self, 'last'):
+            messages = self.listmessages()
+        return self.last
 
     # Set the last message number
     def setlast(self, last):
-       if last is None:
-           if hasattr(self, 'last'):
-               del self.last
-       else:
-           self.last = last
+        if last is None:
+            if hasattr(self, 'last'):
+                del self.last
+        else:
+            self.last = last
 
 class Message(mimetools.Message):
 
     # Constructor
     def __init__(self, f, n, fp = None):
-       self.folder = f
-       self.number = n
-       if not fp:
-           path = f.getmessagefilename(n)
-           fp = open(path, 'r')
-       mimetools.Message.__init__(self, fp)
+        self.folder = f
+        self.number = n
+        if not fp:
+            path = f.getmessagefilename(n)
+            fp = open(path, 'r')
+        mimetools.Message.__init__(self, fp)
 
     # String representation
     def __repr__(self):
-       return 'Message(%s, %s)' % (repr(self.folder), self.number)
+        return 'Message(%s, %s)' % (repr(self.folder), self.number)
 
     # Return the message's header text as a string.  If an
     # argument is specified, it is used as a filter predicate to
     # decide which headers to return (its argument is the header
     # name converted to lower case).
     def getheadertext(self, pred = None):
-       if not pred:
-           return string.joinfields(self.headers, '')
-       headers = []
-       hit = 0
-       for line in self.headers:
-           if line[0] not in string.whitespace:
-               i = string.find(line, ':')
-               if i > 0:
-                   hit = pred(string.lower(line[:i]))
-           if hit: headers.append(line)
-       return string.joinfields(headers, '')
+        if not pred:
+            return string.joinfields(self.headers, '')
+        headers = []
+        hit = 0
+        for line in self.headers:
+            if line[0] not in string.whitespace:
+                i = string.find(line, ':')
+                if i > 0:
+                    hit = pred(string.lower(line[:i]))
+            if hit: headers.append(line)
+        return string.joinfields(headers, '')
 
     # Return the message's body text as string.  This undoes a
     # Content-Transfer-Encoding, but does not interpret other MIME
     # features (e.g. multipart messages).  To suppress to
     # decoding, pass a 0 as argument
     def getbodytext(self, decode = 1):
-       self.fp.seek(self.startofbody)
-       encoding = self.getencoding()
-       if not decode or encoding in ('7bit', '8bit', 'binary'):
-           return self.fp.read()
-       from StringIO import StringIO
-       output = StringIO()
-       mimetools.decode(self.fp, output, encoding)
-       return output.getvalue()
+        self.fp.seek(self.startofbody)
+        encoding = self.getencoding()
+        if not decode or encoding in ('7bit', '8bit', 'binary'):
+            return self.fp.read()
+        from StringIO import StringIO
+        output = StringIO()
+        mimetools.decode(self.fp, output, encoding)
+        return output.getvalue()
 
     # Only for multipart messages: return the message's body as a
     # list of SubMessage objects.  Each submessage object behaves
     # (almost) as a Message object.
     def getbodyparts(self):
-       if self.getmaintype() != 'multipart':
-           raise Error, 'Content-Type is not multipart/*'
-       bdry = self.getparam('boundary')
-       if not bdry:
-           raise Error, 'multipart/* without boundary param'
-       self.fp.seek(self.startofbody)
-       mf = multifile.MultiFile(self.fp)
-       mf.push(bdry)
-       parts = []
-       while mf.next():
-           n = str(self.number) + '.' + `1 + len(parts)`
-           part = SubMessage(self.folder, n, mf)
-           parts.append(part)
-       mf.pop()
-       return parts
+        if self.getmaintype() != 'multipart':
+            raise Error, 'Content-Type is not multipart/*'
+        bdry = self.getparam('boundary')
+        if not bdry:
+            raise Error, 'multipart/* without boundary param'
+        self.fp.seek(self.startofbody)
+        mf = multifile.MultiFile(self.fp)
+        mf.push(bdry)
+        parts = []
+        while mf.next():
+            n = str(self.number) + '.' + `1 + len(parts)`
+            part = SubMessage(self.folder, n, mf)
+            parts.append(part)
+        mf.pop()
+        return parts
 
     # Return body, either a string or a list of messages
     def getbody(self):
-       if self.getmaintype() == 'multipart':
-           return self.getbodyparts()
-       else:
-           return self.getbodytext()
+        if self.getmaintype() == 'multipart':
+            return self.getbodyparts()
+        else:
+            return self.getbodytext()
 
 
 class SubMessage(Message):
 
     # Constructor
     def __init__(self, f, n, fp):
-       Message.__init__(self, f, n, fp)
-       if self.getmaintype() == 'multipart':
-           self.body = Message.getbodyparts(self)
-       else:
-           self.body = Message.getbodytext(self)
-           # XXX If this is big, should remember file pointers
+        Message.__init__(self, f, n, fp)
+        if self.getmaintype() == 'multipart':
+            self.body = Message.getbodyparts(self)
+        else:
+            self.body = Message.getbodytext(self)
+            # XXX If this is big, should remember file pointers
 
     # String representation
     def __repr__(self):
-       f, n, fp = self.folder, self.number, self.fp
-       return 'SubMessage(%s, %s, %s)' % (f, n, fp)
+        f, n, fp = self.folder, self.number, self.fp
+        return 'SubMessage(%s, %s, %s)' % (f, n, fp)
 
     def getbodytext(self):
-       if type(self.body) == type(''):
-           return self.body
+        if type(self.body) == type(''):
+            return self.body
 
     def getbodyparts(self):
-       if type(self.body) == type([]):
-           return self.body
+        if type(self.body) == type([]):
+            return self.body
 
     def getbody(self):
-       return self.body
+        return self.body
 
 
 # Class implementing sets of integers.
@@ -786,169 +786,169 @@ class SubMessage(Message):
 class IntSet:
 
     def __init__(self, data = None, sep = ',', rng = '-'):
-       self.pairs = []
-       self.sep = sep
-       self.rng = rng
-       if data: self.fromstring(data)
+        self.pairs = []
+        self.sep = sep
+        self.rng = rng
+        if data: self.fromstring(data)
 
     def reset(self):
-       self.pairs = []
+        self.pairs = []
 
     def __cmp__(self, other):
-       return cmp(self.pairs, other.pairs)
+        return cmp(self.pairs, other.pairs)
 
     def __hash__(self):
-       return hash(self.pairs)
+        return hash(self.pairs)
 
     def __repr__(self):
-       return 'IntSet(%s, %s, %s)' % (`self.tostring()`,
-                 `self.sep`, `self.rng`)
+        return 'IntSet(%s, %s, %s)' % (`self.tostring()`,
+                  `self.sep`, `self.rng`)
 
     def normalize(self):
-       self.pairs.sort()
-       i = 1
-       while i < len(self.pairs):
-           alo, ahi = self.pairs[i-1]
-           blo, bhi = self.pairs[i]
-           if ahi >= blo-1:
-               self.pairs[i-1:i+1] = [(alo, max(ahi, bhi))]
-           else:
-               i = i+1
+        self.pairs.sort()
+        i = 1
+        while i < len(self.pairs):
+            alo, ahi = self.pairs[i-1]
+            blo, bhi = self.pairs[i]
+            if ahi >= blo-1:
+                self.pairs[i-1:i+1] = [(alo, max(ahi, bhi))]
+            else:
+                i = i+1
 
     def tostring(self):
-       s = ''
-       for lo, hi in self.pairs:
-           if lo == hi: t = `lo`
-           else: t = `lo` + self.rng + `hi`
-           if s: s = s + (self.sep + t)
-           else: s = t
-       return s
+        s = ''
+        for lo, hi in self.pairs:
+            if lo == hi: t = `lo`
+            else: t = `lo` + self.rng + `hi`
+            if s: s = s + (self.sep + t)
+            else: s = t
+        return s
 
     def tolist(self):
-       l = []
-       for lo, hi in self.pairs:
-           m = range(lo, hi+1)
-           l = l + m
-       return l
+        l = []
+        for lo, hi in self.pairs:
+            m = range(lo, hi+1)
+            l = l + m
+        return l
 
     def fromlist(self, list):
-       for i in list:
-           self.append(i)
+        for i in list:
+            self.append(i)
 
     def clone(self):
-       new = IntSet()
-       new.pairs = self.pairs[:]
-       return new
+        new = IntSet()
+        new.pairs = self.pairs[:]
+        return new
 
     def min(self):
-       return self.pairs[0][0]
+        return self.pairs[0][0]
 
     def max(self):
-       return self.pairs[-1][-1]
+        return self.pairs[-1][-1]
 
     def contains(self, x):
-       for lo, hi in self.pairs:
-           if lo <= x <= hi: return 1
-       return 0
+        for lo, hi in self.pairs:
+            if lo <= x <= hi: return 1
+        return 0
 
     def append(self, x):
-       for i in range(len(self.pairs)):
-           lo, hi = self.pairs[i]
-           if x < lo: # Need to insert before
-               if x+1 == lo:
-                   self.pairs[i] = (x, hi)
-               else:
-                   self.pairs.insert(i, (x, x))
-               if i > 0 and x-1 == self.pairs[i-1][1]:
-                   # Merge with previous
-                   self.pairs[i-1:i+1] = [
-                           (self.pairs[i-1][0],
-                            self.pairs[i][1])
-                         ]
-               return
-           if x <= hi: # Already in set
-               return
-       i = len(self.pairs) - 1
-       if i >= 0:
-           lo, hi = self.pairs[i]
-           if x-1 == hi:
-               self.pairs[i] = lo, x
-               return
-       self.pairs.append((x, x))
+        for i in range(len(self.pairs)):
+            lo, hi = self.pairs[i]
+            if x < lo: # Need to insert before
+                if x+1 == lo:
+                    self.pairs[i] = (x, hi)
+                else:
+                    self.pairs.insert(i, (x, x))
+                if i > 0 and x-1 == self.pairs[i-1][1]:
+                    # Merge with previous
+                    self.pairs[i-1:i+1] = [
+                            (self.pairs[i-1][0],
+                             self.pairs[i][1])
+                          ]
+                return
+            if x <= hi: # Already in set
+                return
+        i = len(self.pairs) - 1
+        if i >= 0:
+            lo, hi = self.pairs[i]
+            if x-1 == hi:
+                self.pairs[i] = lo, x
+                return
+        self.pairs.append((x, x))
 
     def addpair(self, xlo, xhi):
-       if xlo > xhi: return
-       self.pairs.append((xlo, xhi))
-       self.normalize()
+        if xlo > xhi: return
+        self.pairs.append((xlo, xhi))
+        self.normalize()
 
     def fromstring(self, data):
-       import string
-       new = []
-       for part in string.splitfields(data, self.sep):
-           list = []
-           for subp in string.splitfields(part, self.rng):
-               s = string.strip(subp)
-               list.append(string.atoi(s))
-           if len(list) == 1:
-               new.append((list[0], list[0]))
-           elif len(list) == 2 and list[0] <= list[1]:
-               new.append((list[0], list[1]))
-           else:
-               raise ValueError, 'bad data passed to IntSet'
-       self.pairs = self.pairs + new
-       self.normalize()
+        import string
+        new = []
+        for part in string.splitfields(data, self.sep):
+            list = []
+            for subp in string.splitfields(part, self.rng):
+                s = string.strip(subp)
+                list.append(string.atoi(s))
+            if len(list) == 1:
+                new.append((list[0], list[0]))
+            elif len(list) == 2 and list[0] <= list[1]:
+                new.append((list[0], list[1]))
+            else:
+                raise ValueError, 'bad data passed to IntSet'
+        self.pairs = self.pairs + new
+        self.normalize()
 
 
 # Subroutines to read/write entries in .mh_profile and .mh_sequences
 
 def pickline(file, key, casefold = 1):
     try:
-       f = open(file, 'r')
+        f = open(file, 'r')
     except IOError:
-       return None
+        return None
     pat = re.escape(key) + ':'
     prog = re.compile(pat, casefold and re.IGNORECASE)
     while 1:
-       line = f.readline()
-       if not line: break
-       if prog.match(line):
-           text = line[len(key)+1:]
-           while 1:
-               line = f.readline()
-               if not line or line[0] not in string.whitespace:
-                   break
-               text = text + line
-           return string.strip(text)
+        line = f.readline()
+        if not line: break
+        if prog.match(line):
+            text = line[len(key)+1:]
+            while 1:
+                line = f.readline()
+                if not line or line[0] not in string.whitespace:
+                    break
+                text = text + line
+            return string.strip(text)
     return None
 
 def updateline(file, key, value, casefold = 1):
     try:
-       f = open(file, 'r')
-       lines = f.readlines()
-       f.close()
+        f = open(file, 'r')
+        lines = f.readlines()
+        f.close()
     except IOError:
-       lines = []
+        lines = []
     pat = re.escape(key) + ':(.*)\n'
     prog = re.compile(pat, casefold and re.IGNORECASE)
     if value is None:
-       newline = None
+        newline = None
     else:
-       newline = '%s: %s\n' % (key, value)
+        newline = '%s: %s\n' % (key, value)
     for i in range(len(lines)):
-       line = lines[i]
-       if prog.match(line):
-           if newline is None:
-               del lines[i]
-           else:
-               lines[i] = newline
-           break
+        line = lines[i]
+        if prog.match(line):
+            if newline is None:
+                del lines[i]
+            else:
+                lines[i] = newline
+            break
     else:
-       if newline is not None:
-           lines.append(newline)
+        if newline is not None:
+            lines.append(newline)
     tempfile = file + "~"
     f = open(tempfile, 'w')
     for line in lines:
-       f.write(line)
+        f.write(line)
     f.close()
     os.rename(tempfile, file)
 
@@ -963,8 +963,8 @@ def test():
     do('mh.listfolders()')
     do('mh.listallfolders()')
     testfolders = ['@test', '@test/test1', '@test/test2',
-                  '@test/test1/test11', '@test/test1/test12',
-                  '@test/test1/test11/test111']
+                   '@test/test1/test11', '@test/test1/test12',
+                   '@test/test1/test11/test111']
     for t in testfolders: do('mh.makefolder(%s)' % `t`)
     do('mh.listsubfolders(\'@test\')')
     do('mh.listallsubfolders(\'@test\')')
@@ -984,17 +984,17 @@ def test():
     f = mh.openfolder(context)
     do('f.getcurrent()')
     for seq in ['first', 'last', 'cur', '.', 'prev', 'next',
-               'first:3', 'last:3', 'cur:3', 'cur:-3',
-               'prev:3', 'next:3',
-               '1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
-               'all']:
-       try:
-           do('f.parsesequence(%s)' % `seq`)
-       except Error, msg:
-           print "Error:", msg
-       stuff = os.popen("pick %s 2>/dev/null" % `seq`).read()
-       list = map(string.atoi, string.split(stuff))
-       print list, "<-- pick"
+                'first:3', 'last:3', 'cur:3', 'cur:-3',
+                'prev:3', 'next:3',
+                '1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
+                'all']:
+        try:
+            do('f.parsesequence(%s)' % `seq`)
+        except Error, msg:
+            print "Error:", msg
+        stuff = os.popen("pick %s 2>/dev/null" % `seq`).read()
+        list = map(string.atoi, string.split(stuff))
+        print list, "<-- pick"
     do('f.listmessages()')
 
 
index fd0e1c52c9640ab355030d4053f618c269ec51db..296c0ca49e394f61a518e7a83a50eb529d3b963b 100644 (file)
@@ -48,49 +48,49 @@ def guess_type(url):
 
     """
     if not inited:
-       init()
+        init()
     base, ext = posixpath.splitext(url)
     while suffix_map.has_key(ext):
-       base, ext = posixpath.splitext(base + suffix_map[ext])
+        base, ext = posixpath.splitext(base + suffix_map[ext])
     if encodings_map.has_key(ext):
-       encoding = encodings_map[ext]
-       base, ext = posixpath.splitext(base)
+        encoding = encodings_map[ext]
+        base, ext = posixpath.splitext(base)
     else:
-       encoding = None
+        encoding = None
     if types_map.has_key(ext):
-       return types_map[ext], encoding
+        return types_map[ext], encoding
     elif types_map.has_key(string.lower(ext)):
-       return types_map[string.lower(ext)], encoding
+        return types_map[string.lower(ext)], encoding
     else:
-       return None, encoding
+        return None, encoding
 
 def init(files=None):
     global inited
     for file in files or knownfiles:
-       s = read_mime_types(file)
-       if s:
-           for key, value in s.items():
-               types_map[key] = value
+        s = read_mime_types(file)
+        if s:
+            for key, value in s.items():
+                types_map[key] = value
     inited = 1
 
 def read_mime_types(file):
     try:
-       f = open(file)
+        f = open(file)
     except IOError:
-       return None
+        return None
     map = {}
     while 1:
-       line = f.readline()
-       if not line: break
-       words = string.split(line)
-       for i in range(len(words)):
-           if words[i][0] == '#':
-               del words[i:]
-               break
-       if not words: continue
-       type, suffixes = words[0], words[1:]
-       for suff in suffixes:
-           map['.'+suff] = type
+        line = f.readline()
+        if not line: break
+        words = string.split(line)
+        for i in range(len(words)):
+            if words[i][0] == '#':
+                del words[i:]
+                break
+        if not words: continue
+        type, suffixes = words[0], words[1:]
+        for suff in suffixes:
+            map['.'+suff] = type
     f.close()
     return map
 
index 70411f9abf9930e607d0454f08e189ce42ce67bd..33a1e778c476b323930545d11b29f38ec6ed0662 100644 (file)
@@ -89,7 +89,7 @@ class Pickler:
         self.write(STOP)
 
     def dump_special(self, callable, args, state = None):
-       if type(args) is not TupleType and args is not None:
+        if type(args) is not TupleType and args is not None:
             raise PicklingError, "Second argument to dump_special " \
                                  "must be a tuple"
 
@@ -121,8 +121,8 @@ class Pickler:
         memo = self.memo
 
         if (not pers_save):
-           pid = self.persistent_id(object)
-           if (pid is not None):
+            pid = self.persistent_id(object)
+            if (pid is not None):
                 self.save_pers(pid)
                 return
 
@@ -130,8 +130,8 @@ class Pickler:
  
         t = type(object)
 
-       if ((t is TupleType) and (len(object) == 0)):
-           if (self.bin):
+        if ((t is TupleType) and (len(object) == 0)):
+            if (self.bin):
                 self.save_empty_tuple(object)
             else:
                 self.save_tuple(object)
@@ -162,9 +162,9 @@ class Pickler:
             else:
                 tup = reduce(object)
 
-           if type(tup) is StringType:
-               self.save_global(object, tup)
-               return
+            if type(tup) is StringType:
+                self.save_global(object, tup)
+                return
 
             if (type(tup) is not TupleType):
                 raise PicklingError, "Value returned by %s must be a " \
@@ -172,7 +172,7 @@ class Pickler:
 
             l = len(tup)
    
-           if ((l != 2) and (l != 3)):
+            if ((l != 2) and (l != 3)):
                 raise PicklingError, "tuple returned by %s must contain " \
                                      "only two or three elements" % reduce
 
@@ -189,9 +189,9 @@ class Pickler:
                                      "by %s must be a tuple" % reduce
 
             self.save_reduce(callable, arg_tup, state) 
-           memo_len = len(memo)
-           self.write(self.put(memo_len))
-           memo[d] = (memo_len, object)
+            memo_len = len(memo)
+            self.write(self.put(memo_len))
+            memo[d] = (memo_len, object)
             return
 
         f(self, object)
@@ -217,7 +217,7 @@ class Pickler:
         save(arg_tup)
         write(REDUCE)
         
-       if (state is not None):
+        if (state is not None):
             save(state)
             write(BUILD)
 
@@ -284,7 +284,7 @@ class Pickler:
             save(element)
 
         if (len(object) and memo.has_key(d)):
-           if (self.bin):
+            if (self.bin):
                 write(POP_MARK + self.get(memo[d][0]))
                 return
            
@@ -306,7 +306,7 @@ class Pickler:
         save  = self.save
         memo  = self.memo
 
-       if (self.bin):
+        if (self.bin):
             write(EMPTY_LIST)
         else:
             write(MARK + LIST)
@@ -337,7 +337,7 @@ class Pickler:
         save  = self.save
         memo  = self.memo
 
-       if (self.bin):
+        if (self.bin):
             write(EMPTY_DICT)
         else:
             write(MARK + DICT)
@@ -375,7 +375,7 @@ class Pickler:
         if hasattr(object, '__getinitargs__'):
             args = object.__getinitargs__()
             len(args) # XXX Assert it's a sequence
-           _keep_alive(args, memo)
+            _keep_alive(args, memo)
         else:
             args = ()
 
@@ -402,7 +402,7 @@ class Pickler:
             stuff = object.__dict__
         else:
             stuff = getstate()
-           _keep_alive(stuff, memo)
+            _keep_alive(stuff, memo)
         save(stuff)
         write(BUILD)
     dispatch[InstanceType] = save_inst
@@ -414,10 +414,10 @@ class Pickler:
         if (name is None):
             name = object.__name__
 
-       try:
-           module = object.__module__
-       except AttributeError:
-           module = whichmodule(object, name)
+        try:
+            module = object.__module__
+        except AttributeError:
+            module = whichmodule(object, name)
 
         memo_len = len(memo)
         write(GLOBAL + module + '\n' + name + '\n' +
@@ -439,10 +439,10 @@ def _keep_alive(x, memo):
     the memo itself...
     """
     try:
-       memo[id(memo)].append(x)
+        memo[id(memo)].append(x)
     except KeyError:
-       # aha, this is the first one :-)
-       memo[id(memo)]=[x]
+        # aha, this is the first one :-)
+        memo[id(memo)]=[x]
 
 
 classmap = {}
@@ -461,8 +461,8 @@ def whichmodule(cls, clsname):
     import sys
 
     for name, module in sys.modules.items():
-       if name != '__main__' and \
-           hasattr(module, clsname) and \
+        if name != '__main__' and \
+            hasattr(module, clsname) and \
             getattr(module, clsname) is cls:
             break
     else:
@@ -601,18 +601,18 @@ class Unpickler:
         module = self.readline()[:-1]
         name = self.readline()[:-1]
         klass = self.find_class(module, name)
-       instantiated = 0
-       if (not args and type(klass) is ClassType and
-           not hasattr(klass, "__getinitargs__")):
-           try:
-               value = _EmptyClass()
-               value.__class__ = klass
-           except RuntimeError:
-               # In restricted execution, assignment to inst.__class__ is
-               # prohibited
-               pass
-       if not instantiated:
-           value = apply(klass, args)
+        instantiated = 0
+        if (not args and type(klass) is ClassType and
+            not hasattr(klass, "__getinitargs__")):
+            try:
+                value = _EmptyClass()
+                value.__class__ = klass
+            except RuntimeError:
+                # In restricted execution, assignment to inst.__class__ is
+                # prohibited
+                pass
+        if not instantiated:
+            value = apply(klass, args)
         self.append(value)
     dispatch[INST] = load_inst
 
@@ -623,19 +623,19 @@ class Unpickler:
         del stack[k + 1]
         args = tuple(stack[k + 1:]) 
         del stack[k:]
-       instantiated = 0
-       if (not args and type(klass) is ClassType and
-           not hasattr(klass, "__getinitargs__")):
-           try:
-               value = _EmptyClass()
-               value.__class__ = klass
-               instantiated = 1
-           except RuntimeError:
-               # In restricted execution, assignment to inst.__class__ is
-               # prohibited
-               pass
-       if not instantiated:
-           value = apply(klass, args)
+        instantiated = 0
+        if (not args and type(klass) is ClassType and
+            not hasattr(klass, "__getinitargs__")):
+            try:
+                value = _EmptyClass()
+                value.__class__ = klass
+                instantiated = 1
+            except RuntimeError:
+                # In restricted execution, assignment to inst.__class__ is
+                # prohibited
+                pass
+        if not instantiated:
+            value = apply(klass, args)
         self.append(value)
     dispatch[OBJ] = load_obj                
 
@@ -663,11 +663,11 @@ class Unpickler:
 
         callable = stack[-2]
         arg_tup  = stack[-1]
-       del stack[-2:]
+        del stack[-2:]
 
-       if type(callable) is not ClassType:
-           if not safe_constructors.has_key(callable):
-               try:
+        if type(callable) is not ClassType:
+            if not safe_constructors.has_key(callable):
+                try:
                     safe = callable.__safe_for_unpickling__
                 except AttributeError:
                     safe = None
@@ -676,10 +676,10 @@ class Unpickler:
                    raise UnpicklingError, "%s is not safe for " \
                                           "unpickling" % callable
 
-       if arg_tup is None:
-           value = callable.__basicnew__()
-       else:
-           value = apply(callable, arg_tup)
+        if arg_tup is None:
+            value = callable.__basicnew__()
+        else:
+            value = apply(callable, arg_tup)
         self.append(value)
     dispatch[REDUCE] = load_reduce
 
@@ -689,7 +689,7 @@ class Unpickler:
 
     def load_pop_mark(self):
         k = self.marker()
-       del self.stack[k:]
+        del self.stack[k:]
     dispatch[POP_MARK] = load_pop_mark
 
     def load_dup(self):
@@ -736,7 +736,7 @@ class Unpickler:
         stack = self.stack
         mark = self.marker()
         list = stack[mark - 1]
-       for i in range(mark + 1, len(stack)):
+        for i in range(mark + 1, len(stack)):
             list.append(stack[i])
 
         del stack[mark:]
@@ -755,7 +755,7 @@ class Unpickler:
         stack = self.stack
         mark = self.marker()
         dict = stack[mark - 1]
-       for i in range(mark + 1, len(stack), 2):
+        for i in range(mark + 1, len(stack), 2):
             dict[stack[i]] = stack[i + 1]
 
         del stack[mark:]
@@ -769,15 +769,15 @@ class Unpickler:
         try:
             setstate = inst.__setstate__
         except AttributeError:
-           try:
-               inst.__dict__.update(value)
-           except RuntimeError:
-               # XXX In restricted execution, the instance's __dict__ is not
-               # accessible.  Use the old way of unpickling the instance
-               # variables.  This is a semantic different when unpickling in
-               # restricted vs. unrestricted modes.
-               for k, v in value.items():
-                   setattr(inst, k, v)
+            try:
+                inst.__dict__.update(value)
+            except RuntimeError:
+                # XXX In restricted execution, the instance's __dict__ is not
+                # accessible.  Use the old way of unpickling the instance
+                # variables.  This is a semantic different when unpickling in
+                # restricted vs. unrestricted modes.
+                for k, v in value.items():
+                    setattr(inst, k, v)
         else:
             setstate(value)
     dispatch[BUILD] = load_build
index d62f6a957cdbd1f78c94f9e01398fdaa3a073644..4c429589133886f182feec01a5678f7824f63d29 100644 (file)
@@ -2,71 +2,71 @@ import os
 import sys
 import string
 
-MAXFD = 256    # Max number of file descriptors (os.getdtablesize()???)
+MAXFD = 256     # Max number of file descriptors (os.getdtablesize()???)
 
 _active = []
 
 def _cleanup():
     for inst in _active[:]:
-       inst.poll()
+        inst.poll()
 
 class Popen3:
     def __init__(self, cmd, capturestderr=0, bufsize=-1):
-       if type(cmd) == type(''):
-           cmd = ['/bin/sh', '-c', cmd]
-       p2cread, p2cwrite = os.pipe()
-       c2pread, c2pwrite = os.pipe()
-       if capturestderr:
-           errout, errin = os.pipe()
-       self.pid = os.fork()
-       if self.pid == 0:
-           # Child
-           os.close(0)
-           os.close(1)
-           if os.dup(p2cread) <> 0:
-               sys.stderr.write('popen2: bad read dup\n')
-           if os.dup(c2pwrite) <> 1:
-               sys.stderr.write('popen2: bad write dup\n')
-           if capturestderr:
-               os.close(2)
-               if os.dup(errin) <> 2: pass
-           for i in range(3, MAXFD):
-               try:
-                   os.close(i)
-               except: pass
-           try:
-               os.execvp(cmd[0], cmd)
-           finally:
-               os._exit(1)
-           # Shouldn't come here, I guess
-           os._exit(1)
-       os.close(p2cread)
-       self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
-       os.close(c2pwrite)
-       self.fromchild = os.fdopen(c2pread, 'r', bufsize)
-       if capturestderr:
-           os.close(errin)
-           self.childerr = os.fdopen(errout, 'r', bufsize)
-       else:
-           self.childerr = None
-       self.sts = -1 # Child not completed yet
-       _active.append(self)
+        if type(cmd) == type(''):
+            cmd = ['/bin/sh', '-c', cmd]
+        p2cread, p2cwrite = os.pipe()
+        c2pread, c2pwrite = os.pipe()
+        if capturestderr:
+            errout, errin = os.pipe()
+        self.pid = os.fork()
+        if self.pid == 0:
+            # Child
+            os.close(0)
+            os.close(1)
+            if os.dup(p2cread) <> 0:
+                sys.stderr.write('popen2: bad read dup\n')
+            if os.dup(c2pwrite) <> 1:
+                sys.stderr.write('popen2: bad write dup\n')
+            if capturestderr:
+                os.close(2)
+                if os.dup(errin) <> 2: pass
+            for i in range(3, MAXFD):
+                try:
+                    os.close(i)
+                except: pass
+            try:
+                os.execvp(cmd[0], cmd)
+            finally:
+                os._exit(1)
+            # Shouldn't come here, I guess
+            os._exit(1)
+        os.close(p2cread)
+        self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
+        os.close(c2pwrite)
+        self.fromchild = os.fdopen(c2pread, 'r', bufsize)
+        if capturestderr:
+            os.close(errin)
+            self.childerr = os.fdopen(errout, 'r', bufsize)
+        else:
+            self.childerr = None
+        self.sts = -1 # Child not completed yet
+        _active.append(self)
     def poll(self):
-       if self.sts < 0:
-           try:
-               pid, sts = os.waitpid(self.pid, os.WNOHANG)
-               if pid == self.pid:
-                   self.sts = sts
-                   _active.remove(self)
-           except os.error:
-               pass
-       return self.sts
+        if self.sts < 0:
+            try:
+                pid, sts = os.waitpid(self.pid, os.WNOHANG)
+                if pid == self.pid:
+                    self.sts = sts
+                    _active.remove(self)
+            except os.error:
+                pass
+        return self.sts
     def wait(self):
-       pid, sts = os.waitpid(self.pid, 0)
-       if pid == self.pid:
-           self.sts = sts
-           _active.remove(self)
-       return self.sts
+        pid, sts = os.waitpid(self.pid, 0)
+        if pid == self.pid:
+            self.sts = sts
+            _active.remove(self)
+        return self.sts
 
 def popen2(cmd, bufsize=-1):
     _cleanup()
index d66517fbbc4565c55f9c8d12135b6ce541582d93..d3aeac07762fe890c5088f82f58e325638d2a123 100644 (file)
@@ -6,56 +6,56 @@
 # Extended file operations
 #
 # f = posixfile.open(filename, [mode, [bufsize]])
-#      will create a new posixfile object
+#       will create a new posixfile object
 #
 # f = posixfile.fileopen(fileobject)
-#      will create a posixfile object from a builtin file object
+#       will create a posixfile object from a builtin file object
 #
 # f.file()
-#      will return the original builtin file object
+#       will return the original builtin file object
 #
 # f.dup()
-#      will return a new file object based on a new filedescriptor
+#       will return a new file object based on a new filedescriptor
 #
 # f.dup2(fd)
-#      will return a new file object based on the given filedescriptor
+#       will return a new file object based on the given filedescriptor
 #
 # f.flags(mode)
-#      will turn on the associated flag (merge)
-#      mode can contain the following characters:
+#       will turn on the associated flag (merge)
+#       mode can contain the following characters:
 #
 #   (character representing a flag)
-#      a       append only flag
-#      c       close on exec flag
-#      n       no delay flag
-#      s       synchronization flag
+#       a       append only flag
+#       c       close on exec flag
+#       n       no delay flag
+#       s       synchronization flag
 #   (modifiers)
-#      !       turn flags 'off' instead of default 'on'
-#      =       copy flags 'as is' instead of default 'merge'
-#      ?       return a string in which the characters represent the flags
-#              that are set
+#       !       turn flags 'off' instead of default 'on'
+#       =       copy flags 'as is' instead of default 'merge'
+#       ?       return a string in which the characters represent the flags
+#               that are set
 #
-#      note: - the '!' and '=' modifiers are mutually exclusive.
-#            - the '?' modifier will return the status of the flags after they
-#              have been changed by other characters in the mode string
+#       note: - the '!' and '=' modifiers are mutually exclusive.
+#             - the '?' modifier will return the status of the flags after they
+#               have been changed by other characters in the mode string
 #
 # f.lock(mode [, len [, start [, whence]]])
-#      will (un)lock a region
-#      mode can contain the following characters:
+#       will (un)lock a region
+#       mode can contain the following characters:
 #
 #   (character representing type of lock)
-#      u       unlock
-#      r       read lock
-#      w       write lock
+#       u       unlock
+#       r       read lock
+#       w       write lock
 #   (modifiers)
-#      |       wait until the lock can be granted
-#      ?       return the first lock conflicting with the requested lock
-#              or 'None' if there is no conflict. The lock returned is in the
-#              format (mode, len, start, whence, pid) where mode is a
-#              character representing the type of lock ('r' or 'w')
+#       |       wait until the lock can be granted
+#       ?       return the first lock conflicting with the requested lock
+#               or 'None' if there is no conflict. The lock returned is in the
+#               format (mode, len, start, whence, pid) where mode is a
+#               character representing the type of lock ('r' or 'w')
 #
-#      note: - the '?' modifier prevents a region from being locked; it is
-#              query only
+#       note: - the '?' modifier prevents a region from being locked; it is
+#               query only
 #
 
 class _posixfile_:
@@ -65,149 +65,149 @@ class _posixfile_:
     # Internal routines
     #
     def __repr__(self):
-       file = self._file_
-       return "<%s posixfile '%s', mode '%s' at %s>" % \
-               (self.states[file.closed], file.name, file.mode, \
-                hex(id(self))[2:])
+        file = self._file_
+        return "<%s posixfile '%s', mode '%s' at %s>" % \
+                (self.states[file.closed], file.name, file.mode, \
+                 hex(id(self))[2:])
 
     def __del__(self):
-       self._file_.close()
+        self._file_.close()
 
     #
     # Initialization routines
     #
     def open(self, name, mode='r', bufsize=-1):
-       import __builtin__
-       return self.fileopen(__builtin__.open(name, mode, bufsize))
+        import __builtin__
+        return self.fileopen(__builtin__.open(name, mode, bufsize))
 
     def fileopen(self, file):
-       if `type(file)` != "<type 'file'>":
-           raise TypeError, 'posixfile.fileopen() arg must be file object'
-       self._file_  = file
-       # Copy basic file methods
-       for method in file.__methods__:
-           setattr(self, method, getattr(file, method))
-       return self
+        if `type(file)` != "<type 'file'>":
+            raise TypeError, 'posixfile.fileopen() arg must be file object'
+        self._file_  = file
+        # Copy basic file methods
+        for method in file.__methods__:
+            setattr(self, method, getattr(file, method))
+        return self
 
     #
     # New methods
     #
     def file(self):
-       return self._file_
+        return self._file_
 
     def dup(self):
-       import posix
+        import posix
 
-       try: ignore = posix.fdopen
-       except: raise AttributeError, 'dup() method unavailable'
+        try: ignore = posix.fdopen
+        except: raise AttributeError, 'dup() method unavailable'
 
-       return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
+        return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
 
     def dup2(self, fd):
-       import posix
+        import posix
 
-       try: ignore = posix.fdopen
-       except: raise AttributeError, 'dup() method unavailable'
+        try: ignore = posix.fdopen
+        except: raise AttributeError, 'dup() method unavailable'
 
-       posix.dup2(self._file_.fileno(), fd)
-       return posix.fdopen(fd, self._file_.mode)
+        posix.dup2(self._file_.fileno(), fd)
+        return posix.fdopen(fd, self._file_.mode)
 
     def flags(self, *which):
-       import fcntl, FCNTL
-
-       if which:
-           if len(which) > 1:
-               raise TypeError, 'Too many arguments'
-           which = which[0]
-       else: which = '?'
-
-       l_flags = 0
-       if 'n' in which: l_flags = l_flags | FCNTL.O_NDELAY
-       if 'a' in which: l_flags = l_flags | FCNTL.O_APPEND
-       if 's' in which: l_flags = l_flags | FCNTL.O_SYNC
-
-       file = self._file_
-
-       if '=' not in which:
-           cur_fl = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
-           if '!' in which: l_flags = cur_fl & ~ l_flags
-           else: l_flags = cur_fl | l_flags
-
-       l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFL, l_flags)
-
-       if 'c' in which:        
-           arg = ('!' not in which)    # 0 is don't, 1 is do close on exec
-           l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFD, arg)
-
-       if '?' in which:
-           which = ''                  # Return current flags
-           l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
-           if FCNTL.O_APPEND & l_flags: which = which + 'a'
-           if fcntl.fcntl(file.fileno(), FCNTL.F_GETFD, 0) & 1:
-               which = which + 'c'
-           if FCNTL.O_NDELAY & l_flags: which = which + 'n'
-           if FCNTL.O_SYNC & l_flags: which = which + 's'
-           return which
-       
+        import fcntl, FCNTL
+
+        if which:
+            if len(which) > 1:
+                raise TypeError, 'Too many arguments'
+            which = which[0]
+        else: which = '?'
+
+        l_flags = 0
+        if 'n' in which: l_flags = l_flags | FCNTL.O_NDELAY
+        if 'a' in which: l_flags = l_flags | FCNTL.O_APPEND
+        if 's' in which: l_flags = l_flags | FCNTL.O_SYNC
+
+        file = self._file_
+
+        if '=' not in which:
+            cur_fl = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
+            if '!' in which: l_flags = cur_fl & ~ l_flags
+            else: l_flags = cur_fl | l_flags
+
+        l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFL, l_flags)
+
+        if 'c' in which:        
+            arg = ('!' not in which)    # 0 is don't, 1 is do close on exec
+            l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFD, arg)
+
+        if '?' in which:
+            which = ''                  # Return current flags
+            l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
+            if FCNTL.O_APPEND & l_flags: which = which + 'a'
+            if fcntl.fcntl(file.fileno(), FCNTL.F_GETFD, 0) & 1:
+                which = which + 'c'
+            if FCNTL.O_NDELAY & l_flags: which = which + 'n'
+            if FCNTL.O_SYNC & l_flags: which = which + 's'
+            return which
+        
     def lock(self, how, *args):
-       import struct, fcntl, FCNTL
-
-       if 'w' in how: l_type = FCNTL.F_WRLCK
-       elif 'r' in how: l_type = FCNTL.F_RDLCK
-       elif 'u' in how: l_type = FCNTL.F_UNLCK
-       else: raise TypeError, 'no type of lock specified'
-
-       if '|' in how: cmd = FCNTL.F_SETLKW
-       elif '?' in how: cmd = FCNTL.F_GETLK
-       else: cmd = FCNTL.F_SETLK
-
-       l_whence = 0
-       l_start = 0
-       l_len = 0
-
-       if len(args) == 1:
-           l_len = args[0]
-       elif len(args) == 2:
-           l_len, l_start = args
-       elif len(args) == 3:
-           l_len, l_start, l_whence = args
-       elif len(args) > 3:
-           raise TypeError, 'too many arguments'
-
-       # Hack by davem@magnet.com to get locking to go on freebsd;
-       # additions for AIX by Vladimir.Marangozov@imag.fr
+        import struct, fcntl, FCNTL
+
+        if 'w' in how: l_type = FCNTL.F_WRLCK
+        elif 'r' in how: l_type = FCNTL.F_RDLCK
+        elif 'u' in how: l_type = FCNTL.F_UNLCK
+        else: raise TypeError, 'no type of lock specified'
+
+        if '|' in how: cmd = FCNTL.F_SETLKW
+        elif '?' in how: cmd = FCNTL.F_GETLK
+        else: cmd = FCNTL.F_SETLK
+
+        l_whence = 0
+        l_start = 0
+        l_len = 0
+
+        if len(args) == 1:
+            l_len = args[0]
+        elif len(args) == 2:
+            l_len, l_start = args
+        elif len(args) == 3:
+            l_len, l_start, l_whence = args
+        elif len(args) > 3:
+            raise TypeError, 'too many arguments'
+
+        # Hack by davem@magnet.com to get locking to go on freebsd;
+        # additions for AIX by Vladimir.Marangozov@imag.fr
         import sys, os
         if sys.platform in ('netbsd1', 'freebsd2', 'freebsd3'):
-           flock = struct.pack('lxxxxlxxxxlhh', \
-                 l_start, l_len, os.getpid(), l_type, l_whence) 
+            flock = struct.pack('lxxxxlxxxxlhh', \
+                  l_start, l_len, os.getpid(), l_type, l_whence) 
         elif sys.platform in ['aix3', 'aix4']:
             flock = struct.pack('hhlllii', \
                   l_type, l_whence, l_start, l_len, 0, 0, 0)
-       else:
-           flock = struct.pack('hhllhh', \
-                 l_type, l_whence, l_start, l_len, 0, 0)
+        else:
+            flock = struct.pack('hhllhh', \
+                  l_type, l_whence, l_start, l_len, 0, 0)
 
-       flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
+        flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
 
-       if '?' in how:
-           if sys.platform in ('netbsd1', 'freebsd2', 'freebsd3'):
-               l_start, l_len, l_pid, l_type, l_whence = \
-                   struct.unpack('lxxxxlxxxxlhh', flock)
+        if '?' in how:
+            if sys.platform in ('netbsd1', 'freebsd2', 'freebsd3'):
+                l_start, l_len, l_pid, l_type, l_whence = \
+                    struct.unpack('lxxxxlxxxxlhh', flock)
             elif sys.platform in ['aix3', 'aix4']:
                 l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
                     struct.unpack('hhlllii', flock)
-           elif sys.platform == "linux2":
-               l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
-                   struct.unpack('hhllhh', flock)
-           else:
-               l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
-                   struct.unpack('hhllhh', flock)
-
-           if l_type != FCNTL.F_UNLCK:
-               if l_type == FCNTL.F_RDLCK:
-                   return 'r', l_len, l_start, l_whence, l_pid
-               else:
-                   return 'w', l_len, l_start, l_whence, l_pid
+            elif sys.platform == "linux2":
+                l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
+                    struct.unpack('hhllhh', flock)
+            else:
+                l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
+                    struct.unpack('hhllhh', flock)
+
+            if l_type != FCNTL.F_UNLCK:
+                if l_type == FCNTL.F_RDLCK:
+                    return 'r', l_len, l_start, l_whence, l_pid
+                else:
+                    return 'w', l_len, l_start, l_whence, l_pid
 
 #
 # Public routine to obtain a posixfile object
index d95cf1a18087323ec513364d23a179f93ed0e351..fcf09046c8a75c60eb04b82f11952bc4786f0395 100644 (file)
@@ -1,5 +1,5 @@
-#  Author:     Fred L. Drake, Jr.
-#              fdrake@cnri.reston.va.us, fdrake@acm.org
+#  Author:      Fred L. Drake, Jr.
+#               fdrake@cnri.reston.va.us, fdrake@acm.org
 #
 #  This is a simple little module I wrote to make life easier.  I didn't
 #  see anything quite like it in the library, though I may have overlooked
@@ -70,119 +70,119 @@ def saferepr(object):
 
 class PrettyPrinter:
     def __init__(self, indent=1, width=80, depth=None, stream=None):
-       """Handle pretty printing operations onto a stream using a set of
-       configured parameters.
-
-       indent
-           Number of spaces to indent for each level of nesting.
-
-       width
-           Attempted maximum number of columns in the output.
-
-       depth
-           The maximum depth to print out nested structures.
-
-       stream
-           The desired output stream.  If omitted (or false), the standard
-           output stream available at construction will be used.
-
-       """
-       indent = int(indent)
-       width = int(width)
-       assert indent >= 0
-       assert (not depth) or depth > 0, "depth may not be negative"
-       assert width
-       self.__depth = depth
-       self.__indent_per_level = indent
-       self.__width = width
-       if stream:
-           self.__stream = stream
-       else:
-           import sys
-           self.__stream = sys.stdout
+        """Handle pretty printing operations onto a stream using a set of
+        configured parameters.
+
+        indent
+            Number of spaces to indent for each level of nesting.
+
+        width
+            Attempted maximum number of columns in the output.
+
+        depth
+            The maximum depth to print out nested structures.
+
+        stream
+            The desired output stream.  If omitted (or false), the standard
+            output stream available at construction will be used.
+
+        """
+        indent = int(indent)
+        width = int(width)
+        assert indent >= 0
+        assert (not depth) or depth > 0, "depth may not be negative"
+        assert width
+        self.__depth = depth
+        self.__indent_per_level = indent
+        self.__width = width
+        if stream:
+            self.__stream = stream
+        else:
+            import sys
+            self.__stream = sys.stdout
 
     def pprint(self, object):
-       self.__stream.write(self.pformat(object) + "\n")
+        self.__stream.write(self.pformat(object) + "\n")
 
     def pformat(self, object):
-       sio = StringIO()
-       self.__format(object, sio, 0, 0, {}, 0)
-       return sio.getvalue()
+        sio = StringIO()
+        self.__format(object, sio, 0, 0, {}, 0)
+        return sio.getvalue()
 
     def isrecursive(self, object):
-       self.__recursive = 0
-       self.pformat(object)
-       return self.__recursive
+        self.__recursive = 0
+        self.pformat(object)
+        return self.__recursive
 
     def isreadable(self, object):
-       self.__recursive = 0
-       self.__readable = 1
-       self.pformat(object)
-       return self.__readable and not self.__recursive
+        self.__recursive = 0
+        self.__readable = 1
+        self.pformat(object)
+        return self.__readable and not self.__recursive
 
     def __format(self, object, stream, indent, allowance, context, level):
-       level = level + 1
-       if context.has_key(id(object)):
-           object = _Recursion(object)
-           self.__recursive = 1
-       rep = self.__repr(object, context, level - 1)
-       objid = id(object)
-       context[objid] = 1
-       typ = type(object)
-       sepLines = len(rep) > (self.__width - 1 - indent - allowance)
-
-       if sepLines and typ in (ListType, TupleType):
-           #  Pretty-print the sequence.
-           stream.write((typ is ListType) and '[' or '(')
-           if self.__indent_per_level > 1:
-               stream.write((self.__indent_per_level - 1) * ' ')
-           length = len(object)
-           if length:
-               indent = indent + self.__indent_per_level
-               self.__format(object[0], stream, indent, allowance + 1,
-                             context, level)
-               if len(object) > 1:
-                   for ent in object[1:]:
-                       stream.write(',\n' + ' '*indent)
-                       self.__format(ent, stream, indent,
-                                     allowance + 1, context, level)
-               indent = indent - self.__indent_per_level
-           if typ is TupleType and length == 1:
-               stream.write(',')
-           stream.write(((typ is ListType) and ']') or ')')
-
-       elif sepLines and typ is DictType:
-           stream.write('{')
-           if self.__indent_per_level > 1:
-               stream.write((self.__indent_per_level - 1) * ' ')
-           length = len(object)
-           if length:
-               indent = indent + self.__indent_per_level
-               items  = object.items()
-               items.sort()
-               key, ent = items[0]
-               rep = self.__repr(key, context, level) + ': '
-               stream.write(rep)
-               self.__format(ent, stream, indent + len(rep),
-                             allowance + 1, context, level)
-               if len(items) > 1:
-                   for key, ent in items[1:]:
-                       rep = self.__repr(key, context, level) + ': '
-                       stream.write(',\n' + ' '*indent + rep)
-                       self.__format(ent, stream, indent + len(rep),
-                                     allowance + 1, context, level)
-               indent = indent - self.__indent_per_level
-           stream.write('}')
-
-       else:
-           stream.write(rep)
-           del context[objid]
+        level = level + 1
+        if context.has_key(id(object)):
+            object = _Recursion(object)
+            self.__recursive = 1
+        rep = self.__repr(object, context, level - 1)
+        objid = id(object)
+        context[objid] = 1
+        typ = type(object)
+        sepLines = len(rep) > (self.__width - 1 - indent - allowance)
+
+        if sepLines and typ in (ListType, TupleType):
+            #  Pretty-print the sequence.
+            stream.write((typ is ListType) and '[' or '(')
+            if self.__indent_per_level > 1:
+                stream.write((self.__indent_per_level - 1) * ' ')
+            length = len(object)
+            if length:
+                indent = indent + self.__indent_per_level
+                self.__format(object[0], stream, indent, allowance + 1,
+                              context, level)
+                if len(object) > 1:
+                    for ent in object[1:]:
+                        stream.write(',\n' + ' '*indent)
+                        self.__format(ent, stream, indent,
+                                      allowance + 1, context, level)
+                indent = indent - self.__indent_per_level
+            if typ is TupleType and length == 1:
+                stream.write(',')
+            stream.write(((typ is ListType) and ']') or ')')
+
+        elif sepLines and typ is DictType:
+            stream.write('{')
+            if self.__indent_per_level > 1:
+                stream.write((self.__indent_per_level - 1) * ' ')
+            length = len(object)
+            if length:
+                indent = indent + self.__indent_per_level
+                items  = object.items()
+                items.sort()
+                key, ent = items[0]
+                rep = self.__repr(key, context, level) + ': '
+                stream.write(rep)
+                self.__format(ent, stream, indent + len(rep),
+                              allowance + 1, context, level)
+                if len(items) > 1:
+                    for key, ent in items[1:]:
+                        rep = self.__repr(key, context, level) + ': '
+                        stream.write(',\n' + ' '*indent + rep)
+                        self.__format(ent, stream, indent + len(rep),
+                                      allowance + 1, context, level)
+                indent = indent - self.__indent_per_level
+            stream.write('}')
+
+        else:
+            stream.write(rep)
+            del context[objid]
 
     def __repr(self, object, context, level):
-       repr, readable = _safe_repr(object, context, self.__depth, level)
-       if not readable:
-           self.__readable = 0
-       return repr
+        repr, readable = _safe_repr(object, context, self.__depth, level)
+        if not readable:
+            self.__readable = 0
+        return repr
 
 
 def _safe_repr(object, context, maxlevels=None, level=0):
@@ -190,54 +190,54 @@ def _safe_repr(object, context, maxlevels=None, level=0):
     readable = 1
     typ = type(object)
     if not (typ in (DictType, ListType, TupleType) and object):
-       rep = `object`
-       if rep:
-           if rep[0] == '<':
-               readable = 0
-       else:
-           readable = 0
-       return `object`, readable
+        rep = `object`
+        if rep:
+            if rep[0] == '<':
+                readable = 0
+        else:
+            readable = 0
+        return `object`, readable
     if context.has_key(id(object)):
-       return `_Recursion(object)`, 0
+        return `_Recursion(object)`, 0
     objid = id(object)
     context[objid] = 1
     if typ is DictType:
-       if maxlevels and level >= maxlevels:
-           s = "{...}"
-           readable = 0
-       else:
-           items = object.items()
-           k, v = items[0]
-           krepr, kreadable = _safe_repr(k, context, maxlevels, level)
-           vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
-           readable = readable and kreadable and vreadable
-           s = "{%s: %s" % (krepr, vrepr)
-           for k, v in items[1:]:
-               krepr, kreadable = _safe_repr(k, context, maxlevels, level)
-               vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
-               readable = readable and kreadable and vreadable
-               s = "%s, %s: %s" % (s, krepr, vrepr)
-           s = s + "}"
+        if maxlevels and level >= maxlevels:
+            s = "{...}"
+            readable = 0
+        else:
+            items = object.items()
+            k, v = items[0]
+            krepr, kreadable = _safe_repr(k, context, maxlevels, level)
+            vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
+            readable = readable and kreadable and vreadable
+            s = "{%s: %s" % (krepr, vrepr)
+            for k, v in items[1:]:
+                krepr, kreadable = _safe_repr(k, context, maxlevels, level)
+                vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
+                readable = readable and kreadable and vreadable
+                s = "%s, %s: %s" % (s, krepr, vrepr)
+            s = s + "}"
     else:
-       s, term = (typ is ListType) and ('[', ']') or ('(', ')')
-       if maxlevels and level >= maxlevels:
-           s = s + "..."
-           readable = 0
-       else:
-           subrepr, subreadable = _safe_repr(
-               object[0], context, maxlevels, level)
-           readable = readable and subreadable
-           s = s + subrepr
-           tail = object[1:]
-           if not tail:
-               if typ is TupleType:
-                   s = s + ','
-           for ent in tail:
-               subrepr, subreadable = _safe_repr(
-                   ent, context, maxlevels, level)
-               readable = readable and subreadable
-               s = "%s, %s" % (s, subrepr)
-       s = s + term
+        s, term = (typ is ListType) and ('[', ']') or ('(', ')')
+        if maxlevels and level >= maxlevels:
+            s = s + "..."
+            readable = 0
+        else:
+            subrepr, subreadable = _safe_repr(
+                object[0], context, maxlevels, level)
+            readable = readable and subreadable
+            s = s + subrepr
+            tail = object[1:]
+            if not tail:
+                if typ is TupleType:
+                    s = s + ','
+            for ent in tail:
+                subrepr, subreadable = _safe_repr(
+                    ent, context, maxlevels, level)
+                readable = readable and subreadable
+                s = "%s, %s" % (s, subrepr)
+        s = s + term
     del context[objid]
     return s, readable
 
@@ -246,8 +246,8 @@ class _Recursion:
     # represent a recursive relationship; really only used for the __repr__()
     # method...
     def __init__(self, object):
-       self.__repr = "<Recursion on %s with id=%s>" \
-                     % (type(object).__name__, id(object))
+        self.__repr = "<Recursion on %s with id=%s>" \
+                      % (type(object).__name__, id(object))
 
     def __repr__(self):
-       return self.__repr
+        return self.__repr
index c58cc312656d03f7f04203f75fe804c3ae15a2ad..949de6ca8befc6acb4051aa3e986032e4ad2b4f2 100644 (file)
@@ -20,9 +20,9 @@ def compile(file, cfile=None, dfile=None):
 
     file:  source filename
     cfile: target filename; defaults to source with 'c' or 'o' appended
-          ('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
+           ('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
     dfile: purported filename; defaults to source (this is the filename
-          that will show up in error messages)
+           that will show up in error messages)
 
     Note that it isn't necessary to byte-compile Python modules for
     execution efficiency -- Python itself byte-compiles a module when
@@ -44,16 +44,16 @@ def compile(file, cfile=None, dfile=None):
     import os, marshal, __builtin__
     f = open(file)
     try:
-       timestamp = os.fstat(file.fileno())
+        timestamp = os.fstat(file.fileno())
     except AttributeError:
-       timestamp = long(os.stat(file)[8])
+        timestamp = long(os.stat(file)[8])
     codestring = f.read()
     f.close()
     if codestring and codestring[-1] != '\n':
-       codestring = codestring + '\n'
+        codestring = codestring + '\n'
     codeobject = __builtin__.compile(codestring, dfile or file, 'exec')
     if not cfile:
-       cfile = file + (__debug__ and 'c' or 'o')
+        cfile = file + (__debug__ and 'c' or 'o')
     fc = open(cfile, 'wb')
     fc.write('\0\0\0\0')
     wr_long(fc, timestamp)
@@ -63,6 +63,6 @@ def compile(file, cfile=None, dfile=None):
     fc.write(MAGIC)
     fc.close()
     if os.name == 'mac':
-       import macfs
-       macfs.FSSpec(cfile).SetCreatorType('Pyth', 'PYC ')
-       macfs.FSSpec(file).SetCreatorType('Pyth', 'TEXT')
+        import macfs
+        macfs.FSSpec(cfile).SetCreatorType('Pyth', 'PYC ')
+        macfs.FSSpec(file).SetCreatorType('Pyth', 'TEXT')
index e67d142ff17c796115483dcba69215586091ffa7..41987739a2b26ce7e0c1cfecebc3b45caebfdda7 100644 (file)
--- a/Lib/re.py
+++ b/Lib/re.py
@@ -30,12 +30,12 @@ _MAXCACHE = 20
 def _cachecompile(pattern, flags=0):
     key = (pattern, flags)
     try:
-       return _cache[key]
+        return _cache[key]
     except KeyError:
-       pass
+        pass
     value = compile(pattern, flags)
     if len(_cache) >= _MAXCACHE:
-       _cache.clear()
+        _cache.clear()
     _cache[key] = value
     return value
 
@@ -47,17 +47,17 @@ def search(pattern, string, flags=0):
   
 def sub(pattern, repl, string, count=0):
     if type(pattern) == type(''):
-       pattern = _cachecompile(pattern)
+        pattern = _cachecompile(pattern)
     return pattern.sub(repl, string, count)
 
 def subn(pattern, repl, string, count=0):
     if type(pattern) == type(''):
-       pattern = _cachecompile(pattern)
+        pattern = _cachecompile(pattern)
     return pattern.subn(repl, string, count)
   
 def split(pattern, string, maxsplit=0):
     if type(pattern) == type(''):
-       pattern = _cachecompile(pattern)
+        pattern = _cachecompile(pattern)
     return pattern.split(string, maxsplit)
 
 def escape(pattern):
@@ -65,10 +65,10 @@ def escape(pattern):
     result = []
     alphanum=string.letters+'_'+string.digits
     for char in pattern:
-       if char not in alphanum:
-           if char == '\000': result.append(r'\000')
-           else: result.append('\\' + char)
-       else: result.append(char)
+        if char not in alphanum:
+            if char == '\000': result.append(r'\000')
+            else: result.append('\\' + char)
+        else: result.append(char)
     return string.join(result, '')
 
 def compile(pattern, flags=0):
@@ -84,137 +84,137 @@ def compile(pattern, flags=0):
 
 class RegexObject:
     def __init__(self, pattern, flags, code, groupindex):
-       self.code = code 
-       self.flags = flags
-       self.pattern = pattern
-       self.groupindex = groupindex
+        self.code = code 
+        self.flags = flags
+        self.pattern = pattern
+        self.groupindex = groupindex
 
     def search(self, string, pos=0, endpos=None):
-       """Scan through string looking for a match to the pattern, returning
-       a MatchObject instance, or None if no match was found."""
+        """Scan through string looking for a match to the pattern, returning
+        a MatchObject instance, or None if no match was found."""
 
-       if endpos is None or endpos>len(string): 
-           endpos=len(string)
-       if endpos<pos: endpos=pos
-       regs = self.code.match(string, pos, endpos, 0)
-       if regs is None:
-           return None
-       self._num_regs=len(regs)
-       
-       return MatchObject(self,
-                          string,
-                          pos, endpos,
-                          regs)
+        if endpos is None or endpos>len(string): 
+            endpos=len(string)
+        if endpos<pos: endpos=pos
+        regs = self.code.match(string, pos, endpos, 0)
+        if regs is None:
+            return None
+        self._num_regs=len(regs)
+        
+        return MatchObject(self,
+                           string,
+                           pos, endpos,
+                           regs)
     
     def match(self, string, pos=0, endpos=None):
-       """Try to apply the pattern at the start of the string, returning
-       a MatchObject instance, or None if no match was found."""
+        """Try to apply the pattern at the start of the string, returning
+        a MatchObject instance, or None if no match was found."""
 
-       if endpos is None or endpos>len(string): 
-           endpos=len(string)
-       if endpos<pos: endpos=pos
-       regs = self.code.match(string, pos, endpos, ANCHORED)
-       if regs is None:
-           return None
-       self._num_regs=len(regs)
-       return MatchObject(self,
-                          string,
-                          pos, endpos,
-                          regs)
+        if endpos is None or endpos>len(string): 
+            endpos=len(string)
+        if endpos<pos: endpos=pos
+        regs = self.code.match(string, pos, endpos, ANCHORED)
+        if regs is None:
+            return None
+        self._num_regs=len(regs)
+        return MatchObject(self,
+                           string,
+                           pos, endpos,
+                           regs)
     
     def sub(self, repl, string, count=0):
-       """Return the string obtained by replacing the leftmost
-       non-overlapping occurrences of the pattern in string by the
-       replacement repl""" 
+        """Return the string obtained by replacing the leftmost
+        non-overlapping occurrences of the pattern in string by the
+        replacement repl""" 
 
         return self.subn(repl, string, count)[0]
     
     def subn(self, repl, source, count=0): 
-       """Return a 2-tuple containing (new_string, number).
-       new_string is the string obtained by replacing the leftmost
-       non-overlapping occurrences of the pattern in string by the
-       replacement repl.  number is the number of substitutions that
-       were made."""
+        """Return a 2-tuple containing (new_string, number).
+        new_string is the string obtained by replacing the leftmost
+        non-overlapping occurrences of the pattern in string by the
+        replacement repl.  number is the number of substitutions that
+        were made."""
 
-       if count < 0:
-           raise error, "negative substitution count"
-       if count == 0:
-           import sys
-           count = sys.maxint
-       if type(repl) == type(''):
-           if '\\' in repl:
-               repl = lambda m, r=repl: pcre_expand(m, r)
-           else:
-               repl = lambda m, r=repl: r
-       n = 0           # Number of matches
-       pos = 0         # Where to start searching
-       lastmatch = -1  # End of last match
-       results = []    # Substrings making up the result
-       end = len(source)
-       while n < count and pos <= end:
-           m = self.search(source, pos)
-           if not m:
-               break
-           i, j = m.span(0)
-           if i == j == lastmatch:
-               # Empty match adjacent to previous match
-               pos = pos + 1
-               results.append(source[lastmatch:pos])
-               continue
-           if pos < i:
-               results.append(source[pos:i])
-           results.append(repl(m))
-           pos = lastmatch = j
-           if i == j:
-               # Last match was empty; don't try here again
-               pos = pos + 1
-               results.append(source[lastmatch:pos])
-           n = n + 1
-       results.append(source[pos:])
-       return (string.join(results, ''), n)
-                                                                           
+        if count < 0:
+            raise error, "negative substitution count"
+        if count == 0:
+            import sys
+            count = sys.maxint
+        if type(repl) == type(''):
+            if '\\' in repl:
+                repl = lambda m, r=repl: pcre_expand(m, r)
+            else:
+                repl = lambda m, r=repl: r
+        n = 0           # Number of matches
+        pos = 0         # Where to start searching
+        lastmatch = -1  # End of last match
+        results = []    # Substrings making up the result
+        end = len(source)
+        while n < count and pos <= end:
+            m = self.search(source, pos)
+            if not m:
+                break
+            i, j = m.span(0)
+            if i == j == lastmatch:
+                # Empty match adjacent to previous match
+                pos = pos + 1
+                results.append(source[lastmatch:pos])
+                continue
+            if pos < i:
+                results.append(source[pos:i])
+            results.append(repl(m))
+            pos = lastmatch = j
+            if i == j:
+                # Last match was empty; don't try here again
+                pos = pos + 1
+                results.append(source[lastmatch:pos])
+            n = n + 1
+        results.append(source[pos:])
+        return (string.join(results, ''), n)
+                                                                            
     def split(self, source, maxsplit=0):
-       """Split \var{string} by the occurrences of the pattern,
-       returning a list containing the resulting substrings."""
+        """Split \var{string} by the occurrences of the pattern,
+        returning a list containing the resulting substrings."""
 
-       if maxsplit < 0:
-           raise error, "negative split count"
-       if maxsplit == 0:
-           import sys
-           maxsplit = sys.maxint
-       n = 0
-       pos = 0
-       lastmatch = 0
-       results = []
-       end = len(source)
-       while n < maxsplit:
-           m = self.search(source, pos)
-           if not m:
-               break
-           i, j = m.span(0)
-           if i == j:
-               # Empty match
-               if pos >= end:
-                   break
-               pos = pos+1
-               continue
-           results.append(source[lastmatch:i])
-           g = m.groups()
-           if g:
-               if type(g)==type( "" ): g = [g]
-               results[len(results):] = list(g)
-           pos = lastmatch = j
-           n = n + 1
-       results.append(source[lastmatch:])
-       return results
+        if maxsplit < 0:
+            raise error, "negative split count"
+        if maxsplit == 0:
+            import sys
+            maxsplit = sys.maxint
+        n = 0
+        pos = 0
+        lastmatch = 0
+        results = []
+        end = len(source)
+        while n < maxsplit:
+            m = self.search(source, pos)
+            if not m:
+                break
+            i, j = m.span(0)
+            if i == j:
+                # Empty match
+                if pos >= end:
+                    break
+                pos = pos+1
+                continue
+            results.append(source[lastmatch:i])
+            g = m.groups()
+            if g:
+                if type(g)==type( "" ): g = [g]
+                results[len(results):] = list(g)
+            pos = lastmatch = j
+            n = n + 1
+        results.append(source[lastmatch:])
+        return results
 
     # The following 3 functions were contributed by Mike Fletcher, and
     # allow pickling and unpickling of RegexObject instances.
     def __getinitargs__(self):
         return (None,None,None,None) # any 4 elements, to work around
                                      # problems with the
-                                    # pickle/cPickle modules not yet 
-                                    # ignoring the __init__ function
+                                     # pickle/cPickle modules not yet 
+                                     # ignoring the __init__ function
     def __getstate__(self):
         return self.pattern, self.flags, self.groupindex
     def __setstate__(self, statetuple):
@@ -225,70 +225,70 @@ class RegexObject:
 
 class MatchObject:
     def __init__(self, re, string, pos, endpos, regs):
-       self.re = re
-       self.string = string
-       self.pos = pos 
-       self.endpos = endpos
-       self.regs = regs
-       
+        self.re = re
+        self.string = string
+        self.pos = pos 
+        self.endpos = endpos
+        self.regs = regs
+        
     def start(self, g = 0):
-       "Return the start of the substring matched by group g"
-       if type(g) == type(''):
-           try:
-               g = self.re.groupindex[g]
-           except (KeyError, TypeError):
-               raise IndexError, ('group "' + g + '" is undefined')
-       return self.regs[g][0]
+        "Return the start of the substring matched by group g"
+        if type(g) == type(''):
+            try:
+                g = self.re.groupindex[g]
+            except (KeyError, TypeError):
+                raise IndexError, ('group "' + g + '" is undefined')
+        return self.regs[g][0]
     
     def end(self, g = 0):
-       "Return the end of the substring matched by group g"
-       if type(g) == type(''):
-           try:
-               g = self.re.groupindex[g]
-           except (KeyError, TypeError):
-               raise IndexError, ('group "' + g + '" is undefined')
-       return self.regs[g][1]
+        "Return the end of the substring matched by group g"
+        if type(g) == type(''):
+            try:
+                g = self.re.groupindex[g]
+            except (KeyError, TypeError):
+                raise IndexError, ('group "' + g + '" is undefined')
+        return self.regs[g][1]
     
     def span(self, g = 0):
-       """Return a tuple containing the start,end of the substring 
-       matched by group g"""
-       if type(g) == type(''):
-           try:
-               g = self.re.groupindex[g]
-           except (KeyError, TypeError):
-               raise IndexError, ('group "' + g + '" is undefined')
-       return self.regs[g]
+        """Return a tuple containing the start,end of the substring 
+        matched by group g"""
+        if type(g) == type(''):
+            try:
+                g = self.re.groupindex[g]
+            except (KeyError, TypeError):
+                raise IndexError, ('group "' + g + '" is undefined')
+        return self.regs[g]
     
     def groups(self):
-       "Return a tuple containing all subgroups of the match object"
-       result = []
-       for g in range(1, self.re._num_regs):
-           if (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
-               result.append(None)
-           else:
-               result.append(self.string[self.regs[g][0]:self.regs[g][1]])
-       return tuple(result)
+        "Return a tuple containing all subgroups of the match object"
+        result = []
+        for g in range(1, self.re._num_regs):
+            if (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
+                result.append(None)
+            else:
+                result.append(self.string[self.regs[g][0]:self.regs[g][1]])
+        return tuple(result)
 
     def group(self, *groups):
-       "Return one or more groups of the match."
-       if len(groups) == 0:
-           groups = (0,)
-       result = []
-       for g in groups:
-           if type(g) == type(''):
-               try:
-                   g = self.re.groupindex[g]
-               except (KeyError, TypeError):
-                   raise IndexError, ('group "' + g + '" is undefined')
-           if len(self.regs)<=g: raise IndexError, ('group "' + str(g) + '" is undefined')
-           elif (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
-               result.append(None)
-           else:
-               result.append(self.string[self.regs[g][0]:self.regs[g][1]])
-       if len(result) > 1:
-           return tuple(result)
-       elif len(result) == 1:
-           return result[0]
-       else:
-           return ()
+        "Return one or more groups of the match."
+        if len(groups) == 0:
+            groups = (0,)
+        result = []
+        for g in groups:
+            if type(g) == type(''):
+                try:
+                    g = self.re.groupindex[g]
+                except (KeyError, TypeError):
+                    raise IndexError, ('group "' + g + '" is undefined')
+            if len(self.regs)<=g: raise IndexError, ('group "' + str(g) + '" is undefined')
+            elif (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
+                result.append(None)
+            else:
+                result.append(self.string[self.regs[g][0]:self.regs[g][1]])
+        if len(result) > 1:
+            return tuple(result)
+        elif len(result) == 1:
+            return result[0]
+        else:
+            return ()
 
index f0b61fc11513963a95305fb8563e966fa1d77df1..2b044b636ac22a3e967af2fee96ca22af316ffa2 100755 (executable)
@@ -108,36 +108,36 @@ def convert(s, syntax=None):
     """
     table = mastertable.copy()
     if syntax is None:
-       syntax = regex.get_syntax()
+        syntax = regex.get_syntax()
     if syntax & RE_NO_BK_PARENS:
-       del table[r'\('], table[r'\)']
-       del table['('], table[')']
+        del table[r'\('], table[r'\)']
+        del table['('], table[')']
     if syntax & RE_NO_BK_VBAR:
-       del table[r'\|']
-       del table['|']
+        del table[r'\|']
+        del table['|']
     if syntax & RE_BK_PLUS_QM:
-       table['+'] = r'\+'
-       table['?'] = r'\?'
-       table[r'\+'] = '+'
-       table[r'\?'] = '?'
+        table['+'] = r'\+'
+        table['?'] = r'\?'
+        table[r'\+'] = '+'
+        table[r'\?'] = '?'
     if syntax & RE_NEWLINE_OR:
-       table['\n'] = '|'
+        table['\n'] = '|'
     res = ""
 
     i = 0
     end = len(s)
     while i < end:
-       c = s[i]
-       i = i+1
-       if c == '\\':
-           c = s[i]
-           i = i+1
-           key = '\\' + c
-           key = table.get(key, key)
-           res = res + key
-       else:
-           c = table.get(c, c)
-           res = res + c
+        c = s[i]
+        i = i+1
+        if c == '\\':
+            c = s[i]
+            i = i+1
+            key = '\\' + c
+            key = table.get(key, key)
+            res = res + key
+        else:
+            c = table.get(c, c)
+            res = res + c
     return res
 
 
@@ -155,21 +155,21 @@ def quote(s, quote=None):
 
     """
     if quote is None:
-       q = "'"
-       altq = "'"
-       if q in s and altq not in s:
-           q = altq
+        q = "'"
+        altq = "'"
+        if q in s and altq not in s:
+            q = altq
     else:
-       assert quote in ('"', "'")
-       q = quote
+        assert quote in ('"', "'")
+        q = quote
     res = q
     for c in s:
-       if c == q: c = '\\' + c
-       elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
-       res = res + c
+        if c == q: c = '\\' + c
+        elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
+        res = res + c
     res = res + q
     if '\\' in res:
-       res = 'r' + res
+        res = 'r' + res
     return res
 
 
@@ -179,7 +179,7 @@ def main():
     s = eval(sys.stdin.read())
     sys.stdout.write(quote(convert(s)))
     if sys.stdout.isatty():
-       sys.stdout.write("\n")
+        sys.stdout.write("\n")
 
 
 if __name__ == '__main__':
index 8ddac679628b8047f42b9967b23e68bd2825e9db..e6e0696d406f3aae970c59937ddb78aa982caa16 100644 (file)
@@ -600,7 +600,7 @@ class AddrlistClass:
 # Parse a date field
 
 _monthnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
-              'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+               'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
 _daynames = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
 
 # The timezone table does not include the military time zones defined
@@ -610,12 +610,12 @@ _daynames = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
 # instead of timezone names.
 
 _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 
-             'AST': -400, 'ADT': -300,  # Atlantic standard
-             'EST': -500, 'EDT': -400,  # Eastern
-             'CST': -600, 'CDT':-500,   # Centreal
-             'MST':-700, 'MDT':-600,    # Mountain
-             'PST':-800, 'PDT':-700     # Pacific
-             }    
+              'AST': -400, 'ADT': -300,  # Atlantic standard
+              'EST': -500, 'EDT': -400,  # Eastern
+              'CST': -600, 'CDT':-500,   # Centreal
+              'MST':-700, 'MDT':-600,    # Mountain
+              'PST':-800, 'PDT':-700     # Pacific
+              }    
 
 
 def parsedate_tz(data):
@@ -672,12 +672,12 @@ def parsedate_tz(data):
             pass
     # Convert a timezone offset into seconds ; -0500 -> -18000
     if tzoffset:
-       if tzoffset < 0:
-           tzsign = -1
-           tzoffset = -tzoffset
-       else:
-           tzsign = 1
-       tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60)
+        if tzoffset < 0:
+            tzsign = -1
+            tzoffset = -tzoffset
+        else:
+            tzsign = 1
+        tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60)
     tuple = (yy, mm, dd, thh, tmm, tss, 0, 0, 0, tzoffset)
     return tuple
 
@@ -700,11 +700,11 @@ def mktime_tz(data):
     
     """
     if data[9] is None:
-       # No zone info, so localtime is better assumption than GMT
-       return time.mktime(data[:8] + (-1,))
+        # No zone info, so localtime is better assumption than GMT
+        return time.mktime(data[:8] + (-1,))
     else:
-       t = time.mktime(data[:8] + (0,))
-       return t - data[9] - time.timezone
+        t = time.mktime(data[:8] + (0,))
+        return t - data[9] - time.timezone
 
 
 # When used as script, run a small test program.
index 4deb0bceb43b3b5fcac43571c969651e1e713a42..e0ae72c9aca3f2b94ed37428cc1d26d8253705d7 100644 (file)
@@ -47,60 +47,60 @@ import __main__
 class Completer:
 
     def complete(self, text, state):
-       """Return the next possible completion for 'text'.
+        """Return the next possible completion for 'text'.
 
-       This is called successively with state == 0, 1, 2, ... until it
-       returns None.  The completion should begin with 'text'.
+        This is called successively with state == 0, 1, 2, ... until it
+        returns None.  The completion should begin with 'text'.
 
-       """
-       if state == 0:
-           if "." in text:
-               self.matches = self.attr_matches(text)
-           else:
-               self.matches = self.global_matches(text)
-       return self.matches[state]
+        """
+        if state == 0:
+            if "." in text:
+                self.matches = self.attr_matches(text)
+            else:
+                self.matches = self.global_matches(text)
+        return self.matches[state]
 
     def global_matches(self, text):
-       """Compute matches when text is a simple name.
-
-       Return a list of all keywords, built-in functions and names
-       currently defines in __main__ that match.
-
-       """
-       import keyword
-       matches = []
-       n = len(text)
-       for list in [keyword.kwlist,
-                    __builtin__.__dict__.keys(),
-                    __main__.__dict__.keys()]:
-           for word in list:
-               if word[:n] == text:
-                   matches.append(word)
-       return matches
+        """Compute matches when text is a simple name.
+
+        Return a list of all keywords, built-in functions and names
+        currently defines in __main__ that match.
+
+        """
+        import keyword
+        matches = []
+        n = len(text)
+        for list in [keyword.kwlist,
+                     __builtin__.__dict__.keys(),
+                     __main__.__dict__.keys()]:
+            for word in list:
+                if word[:n] == text:
+                    matches.append(word)
+        return matches
 
     def attr_matches(self, text):
-       """Compute matches when text contains a dot.
-
-       Assuming the text is of the form NAME.NAME....[NAME], and is
-       evaluabable in the globals of __main__, it will be evaluated
-       and its attributes (as revealed by dir()) are used as possible
-       completions.
-
-       WARNING: this can still invoke arbitrary C code, if an object
-       with a __getattr__ hook is evaluated.
-
-       """
-       import re
-       m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
-       if not m:
-           return
-       expr, attr = m.group(1, 3)
-       words = dir(eval(expr, __main__.__dict__))
-       matches = []
-       n = len(attr)
-       for word in words:
-           if word[:n] == attr:
-               matches.append("%s.%s" % (expr, word))
-       return matches
+        """Compute matches when text contains a dot.
+
+        Assuming the text is of the form NAME.NAME....[NAME], and is
+        evaluabable in the globals of __main__, it will be evaluated
+        and its attributes (as revealed by dir()) are used as possible
+        completions.
+
+        WARNING: this can still invoke arbitrary C code, if an object
+        with a __getattr__ hook is evaluated.
+
+        """
+        import re
+        m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
+        if not m:
+            return
+        expr, attr = m.group(1, 3)
+        words = dir(eval(expr, __main__.__dict__))
+        matches = []
+        n = len(attr)
+        for word in words:
+            if word[:n] == attr:
+                matches.append("%s.%s" % (expr, word))
+        return matches
 
 readline.set_completer(Completer().complete)
index 8baf519e709420df0dc05add6fafdf19d6d1bdb2..035e891e5e0f73cdaebc1a410437061a213839ff 100644 (file)
@@ -16,9 +16,9 @@ import string
 
 interesting = re.compile('[&<]')
 incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
-                          '<([a-zA-Z][^<>]*|'
-                             '/([a-zA-Z][^<>]*)?|'
-                             '![^<>]*)?')
+                           '<([a-zA-Z][^<>]*|'
+                              '/([a-zA-Z][^<>]*)?|'
+                              '![^<>]*)?')
 
 entityref = re.compile('&([a-zA-Z][a-zA-Z0-9]*)[^a-zA-Z0-9]')
 charref = re.compile('&#([0-9]+)[^0-9]')
@@ -53,300 +53,300 @@ class SGMLParser:
 
     # Interface -- initialize and reset this instance
     def __init__(self, verbose=0):
-       self.verbose = verbose
-       self.reset()
+        self.verbose = verbose
+        self.reset()
 
     # Interface -- reset this instance.  Loses all unprocessed data
     def reset(self):
-       self.rawdata = ''
-       self.stack = []
-       self.lasttag = '???'
-       self.nomoretags = 0
-       self.literal = 0
+        self.rawdata = ''
+        self.stack = []
+        self.lasttag = '???'
+        self.nomoretags = 0
+        self.literal = 0
 
     # For derived classes only -- enter literal mode (CDATA) till EOF
     def setnomoretags(self):
-       self.nomoretags = self.literal = 1
+        self.nomoretags = self.literal = 1
 
     # For derived classes only -- enter literal mode (CDATA)
     def setliteral(self, *args):
-       self.literal = 1
+        self.literal = 1
 
     # Interface -- feed some data to the parser.  Call this as
     # often as you want, with as little or as much text as you
     # want (may include '\n').  (This just saves the text, all the
     # processing is done by goahead().)
     def feed(self, data):
-       self.rawdata = self.rawdata + data
-       self.goahead(0)
+        self.rawdata = self.rawdata + data
+        self.goahead(0)
 
     # Interface -- handle the remaining data
     def close(self):
-       self.goahead(1)
+        self.goahead(1)
 
     # Internal -- handle data as far as reasonable.  May leave state
     # and data to be processed by a subsequent call.  If 'end' is
     # true, force handling all data as if followed by EOF marker.
     def goahead(self, end):
-       rawdata = self.rawdata
-       i = 0
-       n = len(rawdata)
-       while i < n:
-           if self.nomoretags:
-               self.handle_data(rawdata[i:n])
-               i = n
-               break
-           match = interesting.search(rawdata, i)
-           if match: j = match.start(0)
-           else: j = n
-           if i < j: self.handle_data(rawdata[i:j])
-           i = j
-           if i == n: break
-           if rawdata[i] == '<':
-               if starttagopen.match(rawdata, i):
-                   if self.literal:
-                       self.handle_data(rawdata[i])
-                       i = i+1
-                       continue
-                   k = self.parse_starttag(i)
-                   if k < 0: break
-                   i = k
-                   continue
-               if endtagopen.match(rawdata, i):
-                   k = self.parse_endtag(i)
-                   if k < 0: break
-                   i =  k
-                   self.literal = 0
-                   continue
-               if commentopen.match(rawdata, i):
-                   if self.literal:
-                       self.handle_data(rawdata[i])
-                       i = i+1
-                       continue
-                   k = self.parse_comment(i)
-                   if k < 0: break
-                   i = i+k
-                   continue
-               match = special.match(rawdata, i)
-               if match:
-                   if self.literal:
-                       self.handle_data(rawdata[i])
-                       i = i+1
-                       continue
-                   i = match.end(0)
-                   continue
-           elif rawdata[i] == '&':
-               match = charref.match(rawdata, i)
-               if match:
-                   name = match.group(1)
-                   self.handle_charref(name)
-                   i = match.end(0)
-                   if rawdata[i-1] != ';': i = i-1
-                   continue
-               match = entityref.match(rawdata, i)
-               if match:
-                   name = match.group(1)
-                   self.handle_entityref(name)
-                   i = match.end(0)
-                   if rawdata[i-1] != ';': i = i-1
-                   continue
-           else:
-               raise RuntimeError, 'neither < nor & ??'
-           # We get here only if incomplete matches but
-           # nothing else
-           match = incomplete.match(rawdata, i)
-           if not match:
-               self.handle_data(rawdata[i])
-               i = i+1
-               continue
-           j = match.end(0)
-           if j == n:
-               break # Really incomplete
-           self.handle_data(rawdata[i:j])
-           i = j
-       # end while
-       if end and i < n:
-           self.handle_data(rawdata[i:n])
-           i = n
-       self.rawdata = rawdata[i:]
-       # XXX if end: check for empty stack
+        rawdata = self.rawdata
+        i = 0
+        n = len(rawdata)
+        while i < n:
+            if self.nomoretags:
+                self.handle_data(rawdata[i:n])
+                i = n
+                break
+            match = interesting.search(rawdata, i)
+            if match: j = match.start(0)
+            else: j = n
+            if i < j: self.handle_data(rawdata[i:j])
+            i = j
+            if i == n: break
+            if rawdata[i] == '<':
+                if starttagopen.match(rawdata, i):
+                    if self.literal:
+                        self.handle_data(rawdata[i])
+                        i = i+1
+                        continue
+                    k = self.parse_starttag(i)
+                    if k < 0: break
+                    i = k
+                    continue
+                if endtagopen.match(rawdata, i):
+                    k = self.parse_endtag(i)
+                    if k < 0: break
+                    i =  k
+                    self.literal = 0
+                    continue
+                if commentopen.match(rawdata, i):
+                    if self.literal:
+                        self.handle_data(rawdata[i])
+                        i = i+1
+                        continue
+                    k = self.parse_comment(i)
+                    if k < 0: break
+                    i = i+k
+                    continue
+                match = special.match(rawdata, i)
+                if match:
+                    if self.literal:
+                        self.handle_data(rawdata[i])
+                        i = i+1
+                        continue
+                    i = match.end(0)
+                    continue
+            elif rawdata[i] == '&':
+                match = charref.match(rawdata, i)
+                if match:
+                    name = match.group(1)
+                    self.handle_charref(name)
+                    i = match.end(0)
+                    if rawdata[i-1] != ';': i = i-1
+                    continue
+                match = entityref.match(rawdata, i)
+                if match:
+                    name = match.group(1)
+                    self.handle_entityref(name)
+                    i = match.end(0)
+                    if rawdata[i-1] != ';': i = i-1
+                    continue
+            else:
+                raise RuntimeError, 'neither < nor & ??'
+            # We get here only if incomplete matches but
+            # nothing else
+            match = incomplete.match(rawdata, i)
+            if not match:
+                self.handle_data(rawdata[i])
+                i = i+1
+                continue
+            j = match.end(0)
+            if j == n:
+                break # Really incomplete
+            self.handle_data(rawdata[i:j])
+            i = j
+        # end while
+        if end and i < n:
+            self.handle_data(rawdata[i:n])
+            i = n
+        self.rawdata = rawdata[i:]
+        # XXX if end: check for empty stack
 
     # Internal -- parse comment, return length or -1 if not terminated
     def parse_comment(self, i):
-       rawdata = self.rawdata
-       if rawdata[i:i+4] <> '<!--':
-           raise RuntimeError, 'unexpected call to handle_comment'
-       match = commentclose.search(rawdata, i+4)
-       if not match:
-           return -1
-       j = match.start(0)
-       self.handle_comment(rawdata[i+4: j])
-       j = match.end(0)
-       return j-i
+        rawdata = self.rawdata
+        if rawdata[i:i+4] <> '<!--':
+            raise RuntimeError, 'unexpected call to handle_comment'
+        match = commentclose.search(rawdata, i+4)
+        if not match:
+            return -1
+        j = match.start(0)
+        self.handle_comment(rawdata[i+4: j])
+        j = match.end(0)
+        return j-i
 
     # Internal -- handle starttag, return length or -1 if not terminated
     def parse_starttag(self, i):
-       rawdata = self.rawdata
-       if shorttagopen.match(rawdata, i):
-           # SGML shorthand: <tag/data/ == <tag>data</tag>
-           # XXX Can data contain &... (entity or char refs)?
-           # XXX Can data contain < or > (tag characters)?
-           # XXX Can there be whitespace before the first /?
-           match = shorttag.match(rawdata, i)
-           if not match:
-               return -1
-           tag, data = match.group(1, 2)
-           tag = string.lower(tag)
-           self.finish_shorttag(tag, data)
-           k = match.end(0)
-           return k
-       # XXX The following should skip matching quotes (' or ")
-       match = endbracket.search(rawdata, i+1)
-       if not match:
-           return -1
-       j = match.start(0)
-       # Now parse the data between i+1 and j into a tag and attrs
-       attrs = []
-       if rawdata[i:i+2] == '<>':
-           # SGML shorthand: <> == <last open tag seen>
-           k = j
-           tag = self.lasttag
-       else:
-           match = tagfind.match(rawdata, i+1)
-           if not match:
-               raise RuntimeError, 'unexpected call to parse_starttag'
-           k = match.end(0)
-           tag = string.lower(rawdata[i+1:k])
-           self.lasttag = tag
-       while k < j:
-           match = attrfind.match(rawdata, k)
-           if not match: break
-           attrname, rest, attrvalue = match.group(1, 2, 3)
-           if not rest:
-               attrvalue = attrname
-           elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
-                attrvalue[:1] == '"' == attrvalue[-1:]:
-               attrvalue = attrvalue[1:-1]
-           attrs.append((string.lower(attrname), attrvalue))
-           k = match.end(0)
-       if rawdata[j] == '>':
-           j = j+1
-       self.finish_starttag(tag, attrs)
-       return j
+        rawdata = self.rawdata
+        if shorttagopen.match(rawdata, i):
+            # SGML shorthand: <tag/data/ == <tag>data</tag>
+            # XXX Can data contain &... (entity or char refs)?
+            # XXX Can data contain < or > (tag characters)?
+            # XXX Can there be whitespace before the first /?
+            match = shorttag.match(rawdata, i)
+            if not match:
+                return -1
+            tag, data = match.group(1, 2)
+            tag = string.lower(tag)
+            self.finish_shorttag(tag, data)
+            k = match.end(0)
+            return k
+        # XXX The following should skip matching quotes (' or ")
+        match = endbracket.search(rawdata, i+1)
+        if not match:
+            return -1
+        j = match.start(0)
+        # Now parse the data between i+1 and j into a tag and attrs
+        attrs = []
+        if rawdata[i:i+2] == '<>':
+            # SGML shorthand: <> == <last open tag seen>
+            k = j
+            tag = self.lasttag
+        else:
+            match = tagfind.match(rawdata, i+1)
+            if not match:
+                raise RuntimeError, 'unexpected call to parse_starttag'
+            k = match.end(0)
+            tag = string.lower(rawdata[i+1:k])
+            self.lasttag = tag
+        while k < j:
+            match = attrfind.match(rawdata, k)
+            if not match: break
+            attrname, rest, attrvalue = match.group(1, 2, 3)
+            if not rest:
+                attrvalue = attrname
+            elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+                 attrvalue[:1] == '"' == attrvalue[-1:]:
+                attrvalue = attrvalue[1:-1]
+            attrs.append((string.lower(attrname), attrvalue))
+            k = match.end(0)
+        if rawdata[j] == '>':
+            j = j+1
+        self.finish_starttag(tag, attrs)
+        return j
 
     # Internal -- parse endtag
     def parse_endtag(self, i):
-       rawdata = self.rawdata
-       match = endbracket.search(rawdata, i+1)
-       if not match:
-           return -1
-       j = match.start(0)
-       tag = string.lower(string.strip(rawdata[i+2:j]))
-       if rawdata[j] == '>':
-           j = j+1
-       self.finish_endtag(tag)
-       return j
+        rawdata = self.rawdata
+        match = endbracket.search(rawdata, i+1)
+        if not match:
+            return -1
+        j = match.start(0)
+        tag = string.lower(string.strip(rawdata[i+2:j]))
+        if rawdata[j] == '>':
+            j = j+1
+        self.finish_endtag(tag)
+        return j
 
     # Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
     def finish_shorttag(self, tag, data):
-       self.finish_starttag(tag, [])
-       self.handle_data(data)
-       self.finish_endtag(tag)
+        self.finish_starttag(tag, [])
+        self.handle_data(data)
+        self.finish_endtag(tag)
 
     # Internal -- finish processing of start tag
     # Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
     def finish_starttag(self, tag, attrs):
-       try:
-           method = getattr(self, 'start_' + tag)
-       except AttributeError:
-           try:
-               method = getattr(self, 'do_' + tag)
-           except AttributeError:
-               self.unknown_starttag(tag, attrs)
-               return -1
-           else:
-               self.handle_starttag(tag, method, attrs)
-               return 0
-       else:
-           self.stack.append(tag)
-           self.handle_starttag(tag, method, attrs)
-           return 1
+        try:
+            method = getattr(self, 'start_' + tag)
+        except AttributeError:
+            try:
+                method = getattr(self, 'do_' + tag)
+            except AttributeError:
+                self.unknown_starttag(tag, attrs)
+                return -1
+            else:
+                self.handle_starttag(tag, method, attrs)
+                return 0
+        else:
+            self.stack.append(tag)
+            self.handle_starttag(tag, method, attrs)
+            return 1
 
     # Internal -- finish processing of end tag
     def finish_endtag(self, tag):
-       if not tag:
-           found = len(self.stack) - 1
-           if found < 0:
-               self.unknown_endtag(tag)
-               return
-       else:
-           if tag not in self.stack:
-               try:
-                   method = getattr(self, 'end_' + tag)
-               except AttributeError:
-                   self.unknown_endtag(tag)
-               return
-           found = len(self.stack)
-           for i in range(found):
-               if self.stack[i] == tag: found = i
-       while len(self.stack) > found:
-           tag = self.stack[-1]
-           try:
-               method = getattr(self, 'end_' + tag)
-           except AttributeError:
-               method = None
-           if method:
-               self.handle_endtag(tag, method)
-           else:
-               self.unknown_endtag(tag)
-           del self.stack[-1]
+        if not tag:
+            found = len(self.stack) - 1
+            if found < 0:
+                self.unknown_endtag(tag)
+                return
+        else:
+            if tag not in self.stack:
+                try:
+                    method = getattr(self, 'end_' + tag)
+                except AttributeError:
+                    self.unknown_endtag(tag)
+                return
+            found = len(self.stack)
+            for i in range(found):
+                if self.stack[i] == tag: found = i
+        while len(self.stack) > found:
+            tag = self.stack[-1]
+            try:
+                method = getattr(self, 'end_' + tag)
+            except AttributeError:
+                method = None
+            if method:
+                self.handle_endtag(tag, method)
+            else:
+                self.unknown_endtag(tag)
+            del self.stack[-1]
 
     # Overridable -- handle start tag
     def handle_starttag(self, tag, method, attrs):
-       method(attrs)
+        method(attrs)
 
     # Overridable -- handle end tag
     def handle_endtag(self, tag, method):
-       method()
+        method()
 
     # Example -- report an unbalanced </...> tag.
     def report_unbalanced(self, tag):
-       if self.verbose:
-           print '*** Unbalanced </' + tag + '>'
-           print '*** Stack:', self.stack
+        if self.verbose:
+            print '*** Unbalanced </' + tag + '>'
+            print '*** Stack:', self.stack
 
     # Example -- handle character reference, no need to override
     def handle_charref(self, name):
-       try:
-           n = string.atoi(name)
-       except string.atoi_error:
-           self.unknown_charref(name)
-           return
-       if not 0 <= n <= 255:
-           self.unknown_charref(name)
-           return
-       self.handle_data(chr(n))
+        try:
+            n = string.atoi(name)
+        except string.atoi_error:
+            self.unknown_charref(name)
+            return
+        if not 0 <= n <= 255:
+            self.unknown_charref(name)
+            return
+        self.handle_data(chr(n))
 
     # Definition of entities -- derived classes may override
     entitydefs = \
-           {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
+            {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
 
     # Example -- handle entity reference, no need to override
     def handle_entityref(self, name):
-       table = self.entitydefs
-       if table.has_key(name):
-           self.handle_data(table[name])
-       else:
-           self.unknown_entityref(name)
-           return
+        table = self.entitydefs
+        if table.has_key(name):
+            self.handle_data(table[name])
+        else:
+            self.unknown_entityref(name)
+            return
 
     # Example -- handle data, should be overridden
     def handle_data(self, data):
-       pass
+        pass
 
     # Example -- handle comment, could be overridden
     def handle_comment(self, data):
-       pass
+        pass
 
     # To be overridden -- handlers for unknown objects
     def unknown_starttag(self, tag, attrs): pass
@@ -358,87 +358,87 @@ class SGMLParser:
 class TestSGMLParser(SGMLParser):
 
     def __init__(self, verbose=0):
-       self.testdata = ""
-       SGMLParser.__init__(self, verbose)
+        self.testdata = ""
+        SGMLParser.__init__(self, verbose)
 
     def handle_data(self, data):
-       self.testdata = self.testdata + data
-       if len(`self.testdata`) >= 70:
-           self.flush()
+        self.testdata = self.testdata + data
+        if len(`self.testdata`) >= 70:
+            self.flush()
 
     def flush(self):
-       data = self.testdata
-       if data:
-           self.testdata = ""
-           print 'data:', `data`
+        data = self.testdata
+        if data:
+            self.testdata = ""
+            print 'data:', `data`
 
     def handle_comment(self, data):
-       self.flush()
-       r = `data`
-       if len(r) > 68:
-           r = r[:32] + '...' + r[-32:]
-       print 'comment:', r
+        self.flush()
+        r = `data`
+        if len(r) > 68:
+            r = r[:32] + '...' + r[-32:]
+        print 'comment:', r
 
     def unknown_starttag(self, tag, attrs):
-       self.flush()
-       if not attrs:
-           print 'start tag: <' + tag + '>'
-       else:
-           print 'start tag: <' + tag,
-           for name, value in attrs:
-               print name + '=' + '"' + value + '"',
-           print '>'
+        self.flush()
+        if not attrs:
+            print 'start tag: <' + tag + '>'
+        else:
+            print 'start tag: <' + tag,
+            for name, value in attrs:
+                print name + '=' + '"' + value + '"',
+            print '>'
 
     def unknown_endtag(self, tag):
-       self.flush()
-       print 'end tag: </' + tag + '>'
+        self.flush()
+        print 'end tag: </' + tag + '>'
 
     def unknown_entityref(self, ref):
-       self.flush()
-       print '*** unknown entity ref: &' + ref + ';'
+        self.flush()
+        print '*** unknown entity ref: &' + ref + ';'
 
     def unknown_charref(self, ref):
-       self.flush()
-       print '*** unknown char ref: &#' + ref + ';'
+        self.flush()
+        print '*** unknown char ref: &#' + ref + ';'
 
     def close(self):
-       SGMLParser.close(self)
-       self.flush()
+        SGMLParser.close(self)
+        self.flush()
 
 
 def test(args = None):
     import sys
 
     if not args:
-       args = sys.argv[1:]
+        args = sys.argv[1:]
 
     if args and args[0] == '-s':
-       args = args[1:]
-       klass = SGMLParser
+        args = args[1:]
+        klass = SGMLParser
     else:
-       klass = TestSGMLParser
+        klass = TestSGMLParser
 
     if args:
-       file = args[0]
+        file = args[0]
     else:
-       file = 'test.html'
+        file = 'test.html'
 
     if file == '-':
-       f = sys.stdin
+        f = sys.stdin
     else:
-       try:
-           f = open(file, 'r')
-       except IOError, msg:
-           print file, ":", msg
-           sys.exit(1)
+        try:
+            f = open(file, 'r')
+        except IOError, msg:
+            print file, ":", msg
+            sys.exit(1)
 
     data = f.read()
     if f is not sys.stdin:
-       f.close()
+        f.close()
 
     x = klass()
     for c in data:
-       x.feed(c)
+        x.feed(c)
     x.close()
 
 
index 2bfe33137d8ca3429407e7fe1d2d2191b234912f..6d1857033c2241cbe705c61d8f2dee2153c43046 100644 (file)
@@ -13,18 +13,18 @@ def copyfile(src, dst):
     fsrc = None
     fdst = None
     try:
-       fsrc = open(src, 'rb')
-       fdst = open(dst, 'wb')
-       while 1:
-           buf = fsrc.read(16*1024)
-           if not buf:
-               break
-           fdst.write(buf)
+        fsrc = open(src, 'rb')
+        fdst = open(dst, 'wb')
+        while 1:
+            buf = fsrc.read(16*1024)
+            if not buf:
+                break
+            fdst.write(buf)
     finally:
-       if fdst:
-           fdst.close()
-       if fsrc:
-           fsrc.close()
+        if fdst:
+            fdst.close()
+        if fsrc:
+            fsrc.close()
 
 def copymode(src, dst):
     """Copy mode bits from src to dst"""
@@ -47,7 +47,7 @@ def copy(src, dst):
 
     """
     if os.path.isdir(dst):
-       dst = os.path.join(dst, os.path.basename(src))
+        dst = os.path.join(dst, os.path.basename(src))
     copyfile(src, dst)
     copymode(src, dst)
 
@@ -58,7 +58,7 @@ def copy2(src, dst):
 
     """
     if os.path.isdir(dst):
-       dst = os.path.join(dst, os.path.basename(src))
+        dst = os.path.join(dst, os.path.basename(src))
     copyfile(src, dst)
     copystat(src, dst)
 
@@ -80,19 +80,19 @@ def copytree(src, dst, symlinks=0):
     names = os.listdir(src)
     os.mkdir(dst)
     for name in names:
-       srcname = os.path.join(src, name)
-       dstname = os.path.join(dst, name)
-       try:
-           if symlinks and os.path.islink(srcname):
-               linkto = os.readlink(srcname)
-               os.symlink(linkto, dstname)
-           elif os.path.isdir(srcname):
-               copytree(srcname, dstname)
-           else:
-               copy2(srcname, dstname)
-           # XXX What about devices, sockets etc.?
-       except (IOError, os.error), why:
-           print "Can't copy %s to %s: %s" % (`srcname`, `dstname`, str(why))
+        srcname = os.path.join(src, name)
+        dstname = os.path.join(dst, name)
+        try:
+            if symlinks and os.path.islink(srcname):
+                linkto = os.readlink(srcname)
+                os.symlink(linkto, dstname)
+            elif os.path.isdir(srcname):
+                copytree(srcname, dstname)
+            else:
+                copy2(srcname, dstname)
+            # XXX What about devices, sockets etc.?
+        except (IOError, os.error), why:
+            print "Can't copy %s to %s: %s" % (`srcname`, `dstname`, str(why))
 
 def rmtree(path, ignore_errors=0, onerror=None):
     """Recursively delete a directory tree.
@@ -105,23 +105,23 @@ def rmtree(path, ignore_errors=0, onerror=None):
     cmdtuples = []
     _build_cmdtuple(path, cmdtuples)
     for cmd in cmdtuples:
-       try:
-           apply(cmd[0], (cmd[1],))
-       except:
-           exc = sys.exc_info()
-           if ignore_errors:
-               pass
-           elif onerror:
-               onerror(cmd[0], cmd[1], exc)
-           else:
-               raise exc[0], (exc[1][0], exc[1][1] + ' removing '+cmd[1])
+        try:
+            apply(cmd[0], (cmd[1],))
+        except:
+            exc = sys.exc_info()
+            if ignore_errors:
+                pass
+            elif onerror:
+                onerror(cmd[0], cmd[1], exc)
+            else:
+                raise exc[0], (exc[1][0], exc[1][1] + ' removing '+cmd[1])
 
 # Helper for rmtree()
 def _build_cmdtuple(path, cmdtuples):
     for f in os.listdir(path):
-       real_f = os.path.join(path,f)
-       if os.path.isdir(real_f) and not os.path.islink(real_f):
-           _build_cmdtuple(real_f, cmdtuples)
-       else:
-           cmdtuples.append(os.remove, real_f)
+        real_f = os.path.join(path,f)
+        if os.path.isdir(real_f) and not os.path.islink(real_f):
+            _build_cmdtuple(real_f, cmdtuples)
+        else:
+            cmdtuples.append(os.remove, real_f)
     cmdtuples.append(os.rmdir, path)
index 876c0d1c881aa52566fb28e6dca096922cfb1b75..4ef8cb8455cfa572f190b46f5b637a70a7bd459a 100644 (file)
@@ -61,61 +61,61 @@ import sys, os
 
 def addsitedir(sitedir):
     if sitedir not in sys.path:
-       sys.path.append(sitedir)        # Add path component
+        sys.path.append(sitedir)        # Add path component
     try:
-       names = os.listdir(sitedir)
+        names = os.listdir(sitedir)
     except os.error:
-       return
+        return
     names = map(os.path.normcase, names)
     names.sort()
     for name in names:
-       if name[-4:] == ".pth":
-           addpackage(sitedir, name)
+        if name[-4:] == ".pth":
+            addpackage(sitedir, name)
 
 def addpackage(sitedir, name):
     fullname = os.path.join(sitedir, name)
     try:
-       f = open(fullname)
+        f = open(fullname)
     except IOError:
-       return
+        return
     while 1:
-       dir = f.readline()
-       if not dir:
-           break
-       if dir[0] == '#':
-           continue
-       if dir[-1] == '\n':
-           dir = dir[:-1]
-       dir = os.path.join(sitedir, dir)
-       if dir not in sys.path and os.path.exists(dir):
-           sys.path.append(dir)
+        dir = f.readline()
+        if not dir:
+            break
+        if dir[0] == '#':
+            continue
+        if dir[-1] == '\n':
+            dir = dir[:-1]
+        dir = os.path.join(sitedir, dir)
+        if dir not in sys.path and os.path.exists(dir):
+            sys.path.append(dir)
 
 prefixes = [sys.prefix]
 if sys.exec_prefix != sys.prefix:
     prefixes.append(sys.exec_prefix)
 for prefix in prefixes:
     if prefix:
-       if os.sep == '/':
-           sitedirs = [os.path.join(prefix,
-                                    "lib",
-                                    "python" + sys.version[:3],
-                                    "site-packages"),
-                       os.path.join(prefix, "lib", "site-python")]
-       else:
-           sitedirs = [prefix]
-       for sitedir in sitedirs:
-           if os.path.isdir(sitedir):
-               addsitedir(sitedir)
+        if os.sep == '/':
+            sitedirs = [os.path.join(prefix,
+                                     "lib",
+                                     "python" + sys.version[:3],
+                                     "site-packages"),
+                        os.path.join(prefix, "lib", "site-python")]
+        else:
+            sitedirs = [prefix]
+        for sitedir in sitedirs:
+            if os.path.isdir(sitedir):
+                addsitedir(sitedir)
 
 try:
-    import sitecustomize               # Run arbitrary site specific code
+    import sitecustomize                # Run arbitrary site specific code
 except ImportError:
-    pass                               # No site customization module
+    pass                                # No site customization module
 
 def _test():
     print "sys.path = ["
     for dir in sys.path:
-       print "    %s," % `dir`
+        print "    %s," % `dir`
     print "]"
 
 if __name__ == '__main__':
index a45d95a2988efa768bbf0919bc8848bdae3b44b2..24e46ad96cee3d789567f9b1961b7d35ef5cb809 100755 (executable)
@@ -53,7 +53,7 @@ class SMTP:
         """
         self.debuglevel = 0
         self.file = None
-       self.helo_resp = None
+        self.helo_resp = None
         if host: self.connect(host, port)
     
     def set_debuglevel(self, debuglevel):
@@ -83,17 +83,17 @@ class SMTP:
         self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         if self.debuglevel > 0: print 'connect:', (host, port)
         self.sock.connect(host, port)
-       (code,msg)=self.getreply()
-       if self.debuglevel >0 : print "connect:", msg
-       return msg
+        (code,msg)=self.getreply()
+        if self.debuglevel >0 : print "connect:", msg
+        return msg
     
     def send(self, str):
         """Send `str' to the server."""
         if self.debuglevel > 0: print 'send:', `str`
-       if self.sock:
+        if self.sock:
             self.sock.send(str)
         else:
-           raise SMTPServerDisconnected
+            raise SMTPServerDisconnected
  
     def putcmd(self, cmd, args=""):
         """Send a command to the server.
@@ -108,117 +108,117 @@ class SMTP:
         - server response code (e.g. '250', or such, if all goes well)
           Note: returns -1 if it can't read responce code.
         - server response string corresponding to response code
-               (note : multiline responces converted to a single, 
+                (note : multiline responces converted to a single, 
                  multiline string)
         """
         resp=[]
-       self.file = self.sock.makefile('rb')
-       while 1:
+        self.file = self.sock.makefile('rb')
+        while 1:
             line = self.file.readline()
             if self.debuglevel > 0: print 'reply:', `line`
-           resp.append(string.strip(line[4:]))
-           code=line[:3]
-           #check if multiline resp
-           if line[3:4]!="-":
+            resp.append(string.strip(line[4:]))
+            code=line[:3]
+            #check if multiline resp
+            if line[3:4]!="-":
                 break
         try:
             errcode = string.atoi(code)
         except(ValueError):
-           errcode = -1
+            errcode = -1
 
-       errmsg = string.join(resp,"\n")
-       if self.debuglevel > 0: 
+        errmsg = string.join(resp,"\n")
+        if self.debuglevel > 0: 
             print 'reply: retcode (%s); Msg: %s' % (errcode,errmsg)
         return errcode, errmsg
     
     def docmd(self, cmd, args=""):
-       """ Send a command, and return it's responce code """
-       
-       self.putcmd(cmd,args)
-       (code,msg)=self.getreply()
-       return code
+        """ Send a command, and return it's responce code """
+        
+        self.putcmd(cmd,args)
+        (code,msg)=self.getreply()
+        return code
 # std smtp commands
 
     def helo(self, name=''):
         """ SMTP 'helo' command. Hostname to send for this command  
         defaults to the FQDN of the local host """
-       name=string.strip(name)
-       if len(name)==0:
-               name=socket.gethostbyaddr(socket.gethostname())[0]
-       self.putcmd("helo",name)
-       (code,msg)=self.getreply()
-       self.helo_resp=msg
-       return code
+        name=string.strip(name)
+        if len(name)==0:
+                name=socket.gethostbyaddr(socket.gethostname())[0]
+        self.putcmd("helo",name)
+        (code,msg)=self.getreply()
+        self.helo_resp=msg
+        return code
 
     def help(self):
-       """ SMTP 'help' command. Returns help text from server """
-       self.putcmd("help")
-       (code,msg)=self.getreply()
-       return msg
+        """ SMTP 'help' command. Returns help text from server """
+        self.putcmd("help")
+        (code,msg)=self.getreply()
+        return msg
 
     def rset(self):
         """ SMTP 'rset' command. Resets session. """
-       code=self.docmd("rset")
-       return code
+        code=self.docmd("rset")
+        return code
 
     def noop(self):
         """ SMTP 'noop' command. Dosen't do anything :> """
-       code=self.docmd("noop")
-       return code
+        code=self.docmd("noop")
+        return code
 
     def mail(self,sender):
         """ SMTP 'mail' command. Begins mail xfer session. """
         self.putcmd("mail","from: %s" % sender)
-       return self.getreply()
+        return self.getreply()
 
     def rcpt(self,recip):
         """ SMTP 'rcpt' command. Indicates 1 recipient for this mail. """
-       self.putcmd("rcpt","to: %s" % recip)
-       return self.getreply()
+        self.putcmd("rcpt","to: %s" % recip)
+        return self.getreply()
 
     def data(self,msg):
         """ SMTP 'DATA' command. Sends message data to server. 
             Automatically quotes lines beginning with a period per rfc821 """
-       #quote periods in msg according to RFC821
+        #quote periods in msg according to RFC821
         # ps, I don't know why I have to do it this way... doing: 
-       # quotepat=re.compile(r"^[.]",re.M)
-       # msg=re.sub(quotepat,"..",msg)
+        # quotepat=re.compile(r"^[.]",re.M)
+        # msg=re.sub(quotepat,"..",msg)
         # should work, but it dosen't (it doubles the number of any 
         # contiguous series of .'s at the beginning of a line, 
         #instead of just adding one. )
-       quotepat=re.compile(r"^[.]+",re.M)
+        quotepat=re.compile(r"^[.]+",re.M)
         def m(pat):
           return "."+pat.group(0)
-       msg=re.sub(quotepat,m,msg)
-       self.putcmd("data")
-       (code,repl)=self.getreply()
-       if self.debuglevel >0 : print "data:", (code,repl)
-       if code <> 354:
-           return -1
-       else:
-           self.send(msg)
-           self.send("\n.\n")
-           (code,msg)=self.getreply()
-           if self.debuglevel >0 : print "data:", (code,msg)
+        msg=re.sub(quotepat,m,msg)
+        self.putcmd("data")
+        (code,repl)=self.getreply()
+        if self.debuglevel >0 : print "data:", (code,repl)
+        if code <> 354:
+            return -1
+        else:
+            self.send(msg)
+            self.send("\n.\n")
+            (code,msg)=self.getreply()
+            if self.debuglevel >0 : print "data:", (code,msg)
             return code
 
 #some usefull methods
     def sendmail(self,from_addr,to_addrs,msg):
         """ This command performs an entire mail transaction. 
-           The arguments are: 
+            The arguments are: 
                - from_addr : The address sending this mail.
                - to_addrs :  a list of addresses to send this mail to
                - msg : the message to send. 
 
-       This method will return normally if the mail is accepted for at least 
+        This method will return normally if the mail is accepted for at least 
         one recipiant.
         Otherwise it will throw an exception (either SMTPSenderRefused,
           SMTPRecipientsRefused, or SMTPDataError)
 
-       That is, if this method does not throw an exception, then someone 
+        That is, if this method does not throw an exception, then someone 
         should get your mail.
 
-       It returns a dictionary , with one entry for each recipient that was 
+        It returns a dictionary , with one entry for each recipient that was 
         refused. 
 
         example:
@@ -241,27 +241,27 @@ class SMTP:
          will return an empty dictionary.  
          """
 
-       if not self.helo_resp:
-           self.helo()
+        if not self.helo_resp:
+            self.helo()
         (code,resp)=self.mail(from_addr)
         if code <>250:
-           self.rset()
-           raise SMTPSenderRefused
-       senderrs={}
+            self.rset()
+            raise SMTPSenderRefused
+        senderrs={}
         for each in to_addrs:
-           (code,resp)=self.rcpt(each)
-           if (code <> 250) and (code <> 251):
+            (code,resp)=self.rcpt(each)
+            if (code <> 250) and (code <> 251):
                 senderrs[each]=(code,resp)
         if len(senderrs)==len(to_addrs):
-           #th' server refused all our recipients
+            #th' server refused all our recipients
             self.rset()
             raise SMTPRecipientsRefused
         code=self.data(msg)
-       if code <>250 :
+        if code <>250 :
             self.rset()
-           raise SMTPDataError
+            raise SMTPDataError
         #if we got here then somebody got our mail
-       return senderrs         
+        return senderrs         
 
 
     def close(self):
@@ -275,5 +275,5 @@ class SMTP:
 
 
     def quit(self):
-       self.docmd("quit")
-       self.close()
+        self.docmd("quit")
+        self.close()
index 439d4c8da113a7ef4ec271dfa531865b3bc9fc3a..73b33e0f00c3f7bcf8803f36accda36175dd10a0 100755 (executable)
@@ -72,14 +72,14 @@ argument = 312
 sym_name = {}
 for _name, _value in globals().items():
     if type(_value) is type(0):
-       sym_name[_value] = _name
+        sym_name[_value] = _name
 
 
 def main():
     import sys
     import token
     if len(sys.argv) == 1:
-       sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
+        sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
     token.main()
 
 if __name__ == "__main__":
index 2012e16d41e13b742cffdb4d8bf2abd7920c3076..8cf372e3f9d4d8b97e65b640582708c445975e35 100644 (file)
@@ -76,7 +76,7 @@ class Telnet:
 
     read_until(expected, [timeout])
         Read until the expected string has been seen, or a timeout is
-       hit (default is no timeout); may block.
+        hit (default is no timeout); may block.
 
     read_all()
         Read all data until EOF; may block.
@@ -86,362 +86,362 @@ class Telnet:
 
     read_very_eager()
         Read all data available already queued or on the socket,
-       without blocking.
+        without blocking.
 
     read_eager()
         Read either data already queued or some data available on the
-       socket, without blocking.
+        socket, without blocking.
 
     read_lazy()
         Read all data in the raw queue (processing it first), without
-       doing any socket I/O.
+        doing any socket I/O.
 
     read_very_lazy()
         Reads all data in the cooked queue, without doing any socket
-       I/O.
+        I/O.
 
     """
 
     def __init__(self, host=None, port=0):
-       """Constructor.
-
-       When called without arguments, create an unconnected instance.
-       With a hostname argument, it connects the instance; a port
-       number is optional.
-
-       """
-       self.debuglevel = DEBUGLEVEL
-       self.host = host
-       self.port = port
-       self.sock = None
-       self.rawq = ''
-       self.irawq = 0
-       self.cookedq = ''
-       self.eof = 0
-       if host:
-           self.open(host, port)
+        """Constructor.
+
+        When called without arguments, create an unconnected instance.
+        With a hostname argument, it connects the instance; a port
+        number is optional.
+
+        """
+        self.debuglevel = DEBUGLEVEL
+        self.host = host
+        self.port = port
+        self.sock = None
+        self.rawq = ''
+        self.irawq = 0
+        self.cookedq = ''
+        self.eof = 0
+        if host:
+            self.open(host, port)
 
     def open(self, host, port=0):
-       """Connect to a host.
+        """Connect to a host.
 
-       The optional second argument is the port number, which
-       defaults to the standard telnet port (23).
+        The optional second argument is the port number, which
+        defaults to the standard telnet port (23).
 
-       Don't try to reopen an already connected instance.
+        Don't try to reopen an already connected instance.
 
-       """
-       self.eof = 0
-       if not port:
-           port = TELNET_PORT
-       self.host = host
-       self.port = port
-       self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-       self.sock.connect((self.host, self.port))
+        """
+        self.eof = 0
+        if not port:
+            port = TELNET_PORT
+        self.host = host
+        self.port = port
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.connect((self.host, self.port))
 
     def __del__(self):
-       """Destructor -- close the connection."""
-       self.close()
+        """Destructor -- close the connection."""
+        self.close()
 
     def msg(self, msg, *args):
-       """Print a debug message, when the debug level is > 0.
+        """Print a debug message, when the debug level is > 0.
 
-       If extra arguments are present, they are substituted in the
-       message using the standard string formatting operator.
+        If extra arguments are present, they are substituted in the
+        message using the standard string formatting operator.
 
-       """
-       if self.debuglevel > 0:
-           print 'Telnet(%s,%d):' % (self.host, self.port),
-           if args:
-               print msg % args
-           else:
-               print msg
+        """
+        if self.debuglevel > 0:
+            print 'Telnet(%s,%d):' % (self.host, self.port),
+            if args:
+                print msg % args
+            else:
+                print msg
 
     def set_debuglevel(self, debuglevel):
-       """Set the debug level.
+        """Set the debug level.
 
-       The higher it is, the more debug output you get (on sys.stdout).
+        The higher it is, the more debug output you get (on sys.stdout).
 
-       """
-       self.debuglevel = debuglevel
+        """
+        self.debuglevel = debuglevel
 
     def close(self):
-       """Close the connection."""
-       if self.sock:
-           self.sock.close()
-       self.sock = 0
-       self.eof = 1
+        """Close the connection."""
+        if self.sock:
+            self.sock.close()
+        self.sock = 0
+        self.eof = 1
 
     def get_socket(self):
-       """Return the socket object used internally."""
-       return self.sock
+        """Return the socket object used internally."""
+        return self.sock
 
     def fileno(self):
-       """Return the fileno() of the socket object used internally."""
-       return self.sock.fileno()
+        """Return the fileno() of the socket object used internally."""
+        return self.sock.fileno()
 
     def write(self, buffer):
-       """Write a string to the socket, doubling any IAC characters.
+        """Write a string to the socket, doubling any IAC characters.
 
-       Can block if the connection is blocked.  May raise
-       socket.error if the connection is closed.
+        Can block if the connection is blocked.  May raise
+        socket.error if the connection is closed.
 
-       """
-       if IAC in buffer:
-           buffer = string.replace(buffer, IAC, IAC+IAC)
-       self.msg("send %s", `buffer`)
-       self.sock.send(buffer)
+        """
+        if IAC in buffer:
+            buffer = string.replace(buffer, IAC, IAC+IAC)
+        self.msg("send %s", `buffer`)
+        self.sock.send(buffer)
 
     def read_until(self, match, timeout=None):
-       """Read until a given string is encountered or until timeout.
-
-       When no match is found, return whatever is available instead,
-       possibly the empty string.  Raise EOFError if the connection
-       is closed and no cooked data is available.
-
-       """
-       n = len(match)
-       self.process_rawq()
-       i = string.find(self.cookedq, match)
-       if i >= 0:
-           i = i+n
-           buf = self.cookedq[:i]
-           self.cookedq = self.cookedq[i:]
-           return buf
-       s_reply = ([self], [], [])
-       s_args = s_reply
-       if timeout is not None:
-           s_args = s_args + (timeout,)
-       while not self.eof and apply(select.select, s_args) == s_reply:
-           i = max(0, len(self.cookedq)-n)
-           self.fill_rawq()
-           self.process_rawq()
-           i = string.find(self.cookedq, match, i)
-           if i >= 0:
-               i = i+n
-               buf = self.cookedq[:i]
-               self.cookedq = self.cookedq[i:]
-               return buf
-       return self.read_very_lazy()
+        """Read until a given string is encountered or until timeout.
+
+        When no match is found, return whatever is available instead,
+        possibly the empty string.  Raise EOFError if the connection
+        is closed and no cooked data is available.
+
+        """
+        n = len(match)
+        self.process_rawq()
+        i = string.find(self.cookedq, match)
+        if i >= 0:
+            i = i+n
+            buf = self.cookedq[:i]
+            self.cookedq = self.cookedq[i:]
+            return buf
+        s_reply = ([self], [], [])
+        s_args = s_reply
+        if timeout is not None:
+            s_args = s_args + (timeout,)
+        while not self.eof and apply(select.select, s_args) == s_reply:
+            i = max(0, len(self.cookedq)-n)
+            self.fill_rawq()
+            self.process_rawq()
+            i = string.find(self.cookedq, match, i)
+            if i >= 0:
+                i = i+n
+                buf = self.cookedq[:i]
+                self.cookedq = self.cookedq[i:]
+                return buf
+        return self.read_very_lazy()
 
     def read_all(self):
-       """Read all data until EOF; block until connection closed."""
-       self.process_rawq()
-       while not self.eof:
-           self.fill_rawq()
-           self.process_rawq()
-       buf = self.cookedq
-       self.cookedq = ''
-       return buf
+        """Read all data until EOF; block until connection closed."""
+        self.process_rawq()
+        while not self.eof:
+            self.fill_rawq()
+            self.process_rawq()
+        buf = self.cookedq
+        self.cookedq = ''
+        return buf
 
     def read_some(self):
-       """Read at least one byte of cooked data unless EOF is hit.
+        """Read at least one byte of cooked data unless EOF is hit.
 
-       Return '' if EOF is hit.  Block if no data is immediately
-       available.
+        Return '' if EOF is hit.  Block if no data is immediately
+        available.
 
-       """
-       self.process_rawq()
-       while not self.cookedq and not self.eof:
-           self.fill_rawq()
-           self.process_rawq()
-       buf = self.cookedq
-       self.cookedq = ''
-       return buf
+        """
+        self.process_rawq()
+        while not self.cookedq and not self.eof:
+            self.fill_rawq()
+            self.process_rawq()
+        buf = self.cookedq
+        self.cookedq = ''
+        return buf
 
     def read_very_eager(self):
-       """Read everything that's possible without blocking in I/O (eager).
-       
-       Raise EOFError if connection closed and no cooked data
-       available.  Return '' if no cooked data available otherwise.
-       Don't block unless in the midst of an IAC sequence.
-
-       """
-       self.process_rawq()
-       while not self.eof and self.sock_avail():
-           self.fill_rawq()
-           self.process_rawq()
-       return self.read_very_lazy()
+        """Read everything that's possible without blocking in I/O (eager).
+        
+        Raise EOFError if connection closed and no cooked data
+        available.  Return '' if no cooked data available otherwise.
+        Don't block unless in the midst of an IAC sequence.
+
+        """
+        self.process_rawq()
+        while not self.eof and self.sock_avail():
+            self.fill_rawq()
+            self.process_rawq()
+        return self.read_very_lazy()
 
     def read_eager(self):
-       """Read readily available data.
+        """Read readily available data.
 
-       Raise EOFError if connection closed and no cooked data
-       available.  Return '' if no cooked data available otherwise.
-       Don't block unless in the midst of an IAC sequence.
+        Raise EOFError if connection closed and no cooked data
+        available.  Return '' if no cooked data available otherwise.
+        Don't block unless in the midst of an IAC sequence.
 
-       """
-       self.process_rawq()
-       while not self.cookedq and not self.eof and self.sock_avail():
-           self.fill_rawq()
-           self.process_rawq()
-       return self.read_very_lazy()
+        """
+        self.process_rawq()
+        while not self.cookedq and not self.eof and self.sock_avail():
+            self.fill_rawq()
+            self.process_rawq()
+        return self.read_very_lazy()
 
     def read_lazy(self):
-       """Process and return data that's already in the queues (lazy).
-       
-       Raise EOFError if connection closed and no data available.
-       Return '' if no cooked data available otherwise.  Don't block
-       unless in the midst of an IAC sequence.
+        """Process and return data that's already in the queues (lazy).
+        
+        Raise EOFError if connection closed and no data available.
+        Return '' if no cooked data available otherwise.  Don't block
+        unless in the midst of an IAC sequence.
 
-       """
-       self.process_rawq()
-       return self.read_very_lazy()
+        """
+        self.process_rawq()
+        return self.read_very_lazy()
 
     def read_very_lazy(self):
-       """Return any data available in the cooked queue (very lazy).
+        """Return any data available in the cooked queue (very lazy).
 
-       Raise EOFError if connection closed and no data available.
-       Return '' if no cooked data available otherwise.  Don't block.
+        Raise EOFError if connection closed and no data available.
+        Return '' if no cooked data available otherwise.  Don't block.
 
-       """
-       buf = self.cookedq
-       self.cookedq = ''
-       if not buf and self.eof and not self.rawq:
-           raise EOFError, 'telnet connection closed'
-       return buf
+        """
+        buf = self.cookedq
+        self.cookedq = ''
+        if not buf and self.eof and not self.rawq:
+            raise EOFError, 'telnet connection closed'
+        return buf
 
     def process_rawq(self):
-       """Transfer from raw queue to cooked queue.
-
-       Set self.eof when connection is closed.  Don't block unless in
-       the midst of an IAC sequence.
-
-       """
-       buf = ''
-       try:
-           while self.rawq:
-               c = self.rawq_getchar()
-               if c == theNULL:
-                   continue
-               if c == "\021":
-                   continue
-               if c != IAC:
-                   buf = buf + c
-                   continue
-               c = self.rawq_getchar()
-               if c == IAC:
-                   buf = buf + c
-               elif c in (DO, DONT):
-                   opt = self.rawq_getchar()
-                   self.msg('IAC %s %d', c == DO and 'DO' or 'DONT', ord(c))
-                   self.sock.send(IAC + WONT + opt)
-               elif c in (WILL, WONT):
-                   opt = self.rawq_getchar()
-                   self.msg('IAC %s %d',
-                            c == WILL and 'WILL' or 'WONT', ord(c))
-               else:
-                   self.msg('IAC %s not recognized' % `c`)
-       except EOFError: # raised by self.rawq_getchar()
-           pass
-       self.cookedq = self.cookedq + buf
+        """Transfer from raw queue to cooked queue.
+
+        Set self.eof when connection is closed.  Don't block unless in
+        the midst of an IAC sequence.
+
+        """
+        buf = ''
+        try:
+            while self.rawq:
+                c = self.rawq_getchar()
+                if c == theNULL:
+                    continue
+                if c == "\021":
+                    continue
+                if c != IAC:
+                    buf = buf + c
+                    continue
+                c = self.rawq_getchar()
+                if c == IAC:
+                    buf = buf + c
+                elif c in (DO, DONT):
+                    opt = self.rawq_getchar()
+                    self.msg('IAC %s %d', c == DO and 'DO' or 'DONT', ord(c))
+                    self.sock.send(IAC + WONT + opt)
+                elif c in (WILL, WONT):
+                    opt = self.rawq_getchar()
+                    self.msg('IAC %s %d',
+                             c == WILL and 'WILL' or 'WONT', ord(c))
+                else:
+                    self.msg('IAC %s not recognized' % `c`)
+        except EOFError: # raised by self.rawq_getchar()
+            pass
+        self.cookedq = self.cookedq + buf
 
     def rawq_getchar(self):
-       """Get next char from raw queue.
-
-       Block if no data is immediately available.  Raise EOFError
-       when connection is closed.
-
-       """
-       if not self.rawq:
-           self.fill_rawq()
-           if self.eof:
-               raise EOFError
-       c = self.rawq[self.irawq]
-       self.irawq = self.irawq + 1
-       if self.irawq >= len(self.rawq):
-           self.rawq = ''
-           self.irawq = 0
-       return c
+        """Get next char from raw queue.
+
+        Block if no data is immediately available.  Raise EOFError
+        when connection is closed.
+
+        """
+        if not self.rawq:
+            self.fill_rawq()
+            if self.eof:
+                raise EOFError
+        c = self.rawq[self.irawq]
+        self.irawq = self.irawq + 1
+        if self.irawq >= len(self.rawq):
+            self.rawq = ''
+            self.irawq = 0
+        return c
 
     def fill_rawq(self):
-       """Fill raw queue from exactly one recv() system call.
-
-       Block if no data is immediately available.  Set self.eof when
-       connection is closed.
-
-       """
-       if self.irawq >= len(self.rawq):
-           self.rawq = ''
-           self.irawq = 0
-       # The buffer size should be fairly small so as to avoid quadratic
-       # behavior in process_rawq() above
-       buf = self.sock.recv(50)
-       self.msg("recv %s", `buf`)
-       self.eof = (not buf)
-       self.rawq = self.rawq + buf
+        """Fill raw queue from exactly one recv() system call.
+
+        Block if no data is immediately available.  Set self.eof when
+        connection is closed.
+
+        """
+        if self.irawq >= len(self.rawq):
+            self.rawq = ''
+            self.irawq = 0
+        # The buffer size should be fairly small so as to avoid quadratic
+        # behavior in process_rawq() above
+        buf = self.sock.recv(50)
+        self.msg("recv %s", `buf`)
+        self.eof = (not buf)
+        self.rawq = self.rawq + buf
 
     def sock_avail(self):
-       """Test whether data is available on the socket."""
-       return select.select([self], [], [], 0) == ([self], [], [])
+        """Test whether data is available on the socket."""
+        return select.select([self], [], [], 0) == ([self], [], [])
 
     def interact(self):
-       """Interaction function, emulates a very dumb telnet client."""
-       while 1:
-           rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
-           if self in rfd:
-               try:
-                   text = self.read_eager()
-               except EOFError:
-                   print '*** Connection closed by remote host ***'
-                   break
-               if text:
-                   sys.stdout.write(text)
-                   sys.stdout.flush()
-           if sys.stdin in rfd:
-               line = sys.stdin.readline()
-               if not line:
-                   break
-               self.write(line)
+        """Interaction function, emulates a very dumb telnet client."""
+        while 1:
+            rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
+            if self in rfd:
+                try:
+                    text = self.read_eager()
+                except EOFError:
+                    print '*** Connection closed by remote host ***'
+                    break
+                if text:
+                    sys.stdout.write(text)
+                    sys.stdout.flush()
+            if sys.stdin in rfd:
+                line = sys.stdin.readline()
+                if not line:
+                    break
+                self.write(line)
 
     def expect(self, list, timeout=None):
-       """Read until one from a list of a regular expressions matches.
-
-       The first argument is a list of regular expressions, either
-       compiled (re.RegexObject instances) or uncompiled (strings).
-       The optional second argument is a timeout, in seconds; default
-       is no timeout.
-
-       Return a tuple of three items: the index in the list of the
-       first regular expression that matches; the match object
-       returned; and the text read up till and including the match.
-
-       If EOF is read and no text was read, raise EOFError.
-       Otherwise, when nothing matches, return (-1, None, text) where
-       text is the text received so far (may be the empty string if a
-       timeout happened).
-
-       If a regular expression ends with a greedy match (e.g. '.*')
-       or if more than one expression can match the same input, the
-       results are undeterministic, and may depend on the I/O timing.
-
-       """
-       re = None
-       list = list[:]
-       indices = range(len(list))
-       for i in indices:
-           if not hasattr(list[i], "search"):
-               if not re: import re
-               list[i] = re.compile(list[i])
-       while 1:
-           self.process_rawq()
-           for i in indices:
-               m = list[i].search(self.cookedq)
-               if m:
-                   e = m.end()
-                   text = self.cookedq[:e]
-                   self.cookedq = self.cookedq[e:]
-                   return (i, m, text)
-           if self.eof:
-               break
-           if timeout is not None:
-               r, w, x = select.select([self.fileno()], [], [], timeout)
-               if not r:
-                   break
-           self.fill_rawq()
-       text = self.read_very_lazy()
-       if not text and self.eof:
-           raise EOFError
-       return (-1, None, text)
+        """Read until one from a list of a regular expressions matches.
+
+        The first argument is a list of regular expressions, either
+        compiled (re.RegexObject instances) or uncompiled (strings).
+        The optional second argument is a timeout, in seconds; default
+        is no timeout.
+
+        Return a tuple of three items: the index in the list of the
+        first regular expression that matches; the match object
+        returned; and the text read up till and including the match.
+
+        If EOF is read and no text was read, raise EOFError.
+        Otherwise, when nothing matches, return (-1, None, text) where
+        text is the text received so far (may be the empty string if a
+        timeout happened).
+
+        If a regular expression ends with a greedy match (e.g. '.*')
+        or if more than one expression can match the same input, the
+        results are undeterministic, and may depend on the I/O timing.
+
+        """
+        re = None
+        list = list[:]
+        indices = range(len(list))
+        for i in indices:
+            if not hasattr(list[i], "search"):
+                if not re: import re
+                list[i] = re.compile(list[i])
+        while 1:
+            self.process_rawq()
+            for i in indices:
+                m = list[i].search(self.cookedq)
+                if m:
+                    e = m.end()
+                    text = self.cookedq[:e]
+                    self.cookedq = self.cookedq[e:]
+                    return (i, m, text)
+            if self.eof:
+                break
+            if timeout is not None:
+                r, w, x = select.select([self.fileno()], [], [], timeout)
+                if not r:
+                    break
+            self.fill_rawq()
+        text = self.read_very_lazy()
+        if not text and self.eof:
+            raise EOFError
+        return (-1, None, text)
 
 
 def test():
@@ -454,18 +454,18 @@ def test():
     """
     debuglevel = 0
     while sys.argv[1:] and sys.argv[1] == '-d':
-       debuglevel = debuglevel+1
-       del sys.argv[1]
+        debuglevel = debuglevel+1
+        del sys.argv[1]
     host = 'localhost'
     if sys.argv[1:]:
-       host = sys.argv[1]
+        host = sys.argv[1]
     port = 0
     if sys.argv[2:]:
-       portstr = sys.argv[2]
-       try:
-           port = int(portstr)
-       except ValueError:
-           port = socket.getservbyname(portstr, 'tcp')
+        portstr = sys.argv[2]
+        try:
+            port = int(portstr)
+        except ValueError:
+            port = socket.getservbyname(portstr, 'tcp')
     tn = Telnet()
     tn.set_debuglevel(debuglevel)
     tn.open(host, port)
index 5b4e388894a22a163502a790fc7122a1f746b61d..bd0ba606b33acc818b670f9ed6405edc5c58a372 100644 (file)
@@ -19,55 +19,55 @@ template = None
 def gettempdir():
     global tempdir
     if tempdir is not None:
-       return tempdir
+        return tempdir
     attempdirs = ['/usr/tmp', '/tmp', os.getcwd(), os.curdir]
     if os.name == 'nt':
-       attempdirs.insert(0, 'C:\\TEMP')
-       attempdirs.insert(0, '\\TEMP')
+        attempdirs.insert(0, 'C:\\TEMP')
+        attempdirs.insert(0, '\\TEMP')
     elif os.name == 'mac':
-       import macfs, MACFS
-       try:
-            refnum, dirid = macfs.FindFolder(MACFS.kOnSystemDisk,
-                                             MACFS.kTemporaryFolderType, 0)
-            dirname = macfs.FSSpec((refnum, dirid, '')).as_pathname()
-            attempdirs.insert(0, dirname)
-       except macfs.error:
-           pass
+        import macfs, MACFS
+        try:
+             refnum, dirid = macfs.FindFolder(MACFS.kOnSystemDisk,
+                                              MACFS.kTemporaryFolderType, 0)
+             dirname = macfs.FSSpec((refnum, dirid, '')).as_pathname()
+             attempdirs.insert(0, dirname)
+        except macfs.error:
+            pass
     for envname in 'TMPDIR', 'TEMP', 'TMP':
-       if os.environ.has_key(envname):
-           attempdirs.insert(0, os.environ[envname])
+        if os.environ.has_key(envname):
+            attempdirs.insert(0, os.environ[envname])
     testfile = gettempprefix() + 'test'
     for dir in attempdirs:
-       try:
-           filename = os.path.join(dir, testfile)
-           fp = open(filename, 'w')
-           fp.write('blat')
-           fp.close()
-           os.unlink(filename)
-           tempdir = dir
-           break
-       except IOError:
-           pass
+        try:
+            filename = os.path.join(dir, testfile)
+            fp = open(filename, 'w')
+            fp.write('blat')
+            fp.close()
+            os.unlink(filename)
+            tempdir = dir
+            break
+        except IOError:
+            pass
     if tempdir is None:
-       msg = "Can't find a usable temporary directory amongst " + `attempdirs`
-       raise IOError, msg
+        msg = "Can't find a usable temporary directory amongst " + `attempdirs`
+        raise IOError, msg
     return tempdir
 
 
 # Function to calculate a prefix of the filename to use
 
 def gettempprefix():
-       global template
-       if template == None:
-               if os.name == 'posix':
-                       template = '@' + `os.getpid()` + '.'
-               elif os.name == 'nt':
-                       template = '~' + `os.getpid()` + '-'
-               elif os.name == 'mac':
-                       template = 'Python-Tmp-'
-               else:
-                       template = 'tmp' # XXX might choose a better one
-       return template
+    global template
+    if template == None:
+        if os.name == 'posix':
+            template = '@' + `os.getpid()` + '.'
+        elif os.name == 'nt':
+            template = '~' + `os.getpid()` + '-'
+        elif os.name == 'mac':
+            template = 'Python-Tmp-'
+        else:
+            template = 'tmp' # XXX might choose a better one
+    return template
 
 
 # Counter for generating unique names
@@ -78,14 +78,14 @@ counter = 0
 # User-callable function to return a unique temporary file name
 
 def mktemp(suffix=""):
-       global counter
-       dir = gettempdir()
-       pre = gettempprefix()
-       while 1:
-               counter = counter + 1
-               file = os.path.join(dir, pre + `counter` + suffix)
-               if not os.path.exists(file):
-                       return file
+    global counter
+    dir = gettempdir()
+    pre = gettempprefix()
+    while 1:
+        counter = counter + 1
+        file = os.path.join(dir, pre + `counter` + suffix)
+        if not os.path.exists(file):
+            return file
 
 
 class TemporaryFileWrapper:
@@ -96,31 +96,31 @@ class TemporaryFileWrapper:
     no longer needed.
     """
     def __init__(self, file, path):
-       self.file = file
-       self.path = path
+        self.file = file
+        self.path = path
 
     def close(self):
-       self.file.close()
-       os.unlink(self.path)
+        self.file.close()
+        os.unlink(self.path)
 
     def __del__(self):
-       try: self.close()
-       except: pass
+        try: self.close()
+        except: pass
 
     def __getattr__(self, name):
-       file = self.__dict__['file']
-       a = getattr(file, name)
-       setattr(self, name, a)
-       return a
+        file = self.__dict__['file']
+        a = getattr(file, name)
+        setattr(self, name, a)
+        return a
 
 
 def TemporaryFile(mode='w+b', bufsize=-1, suffix=""):
     name = mktemp(suffix)
     file = open(name, mode, bufsize)
     try:
-       os.unlink(name)
+        os.unlink(name)
     except os.error:
-       # Non-unix -- can't unlink file that's still open, use wrapper
-       return TemporaryFileWrapper(file, name)
+        # Non-unix -- can't unlink file that's still open, use wrapper
+        return TemporaryFileWrapper(file, name)
     else:
-       return file
+        return file
index 888bb41a0931d0a7576f70e2ec997e2c6ba51ab3..69941dc031cd0d9936b301f67a59991cf3ae36e4 100755 (executable)
@@ -56,7 +56,7 @@ NT_OFFSET = 256
 tok_name = {}
 for _name, _value in globals().items():
     if type(_value) is type(0):
-       tok_name[_value] = _name
+        tok_name[_value] = _name
 
 
 def ISTERMINAL(x):
@@ -77,49 +77,49 @@ def main():
     inFileName = args and args[0] or "Include/token.h"
     outFileName = "Lib/token.py"
     if len(args) > 1:
-       outFileName = args[1]
+        outFileName = args[1]
     try:
-       fp = open(inFileName)
+        fp = open(inFileName)
     except IOError, err:
-       sys.stdout.write("I/O error: %s\n" % str(err))
-       sys.exit(1)
+        sys.stdout.write("I/O error: %s\n" % str(err))
+        sys.exit(1)
     lines = string.splitfields(fp.read(), "\n")
     fp.close()
     prog = re.compile(
-       "#define[ \t][ \t]*([A-Z][A-Z_]*)[ \t][ \t]*([0-9][0-9]*)",
-       re.IGNORECASE)
+        "#define[ \t][ \t]*([A-Z][A-Z_]*)[ \t][ \t]*([0-9][0-9]*)",
+        re.IGNORECASE)
     tokens = {}
     for line in lines:
-       match = prog.match(line)
-       if match:
-           name, val = match.group(1, 2)
-           val = string.atoi(val)
-           tokens[val] = name          # reverse so we can sort them...
+        match = prog.match(line)
+        if match:
+            name, val = match.group(1, 2)
+            val = string.atoi(val)
+            tokens[val] = name          # reverse so we can sort them...
     keys = tokens.keys()
     keys.sort()
     # load the output skeleton from the target:
     try:
-       fp = open(outFileName)
+        fp = open(outFileName)
     except IOError, err:
-       sys.stderr.write("I/O error: %s\n" % str(err))
-       sys.exit(2)
+        sys.stderr.write("I/O error: %s\n" % str(err))
+        sys.exit(2)
     format = string.splitfields(fp.read(), "\n")
     fp.close()
     try:
-       start = format.index("#--start constants--") + 1
-       end = format.index("#--end constants--")
+        start = format.index("#--start constants--") + 1
+        end = format.index("#--end constants--")
     except ValueError:
-       sys.stderr.write("target does not contain format markers")
-       sys.exit(3)
+        sys.stderr.write("target does not contain format markers")
+        sys.exit(3)
     lines = []
     for val in keys:
-       lines.append("%s = %d" % (tokens[val], val))
+        lines.append("%s = %d" % (tokens[val], val))
     format[start:end] = lines
     try:
-       fp = open(outFileName, 'w')
+        fp = open(outFileName, 'w')
     except IOError, err:
-       sys.stderr.write("I/O error: %s\n" % str(err))
-       sys.exit(4)
+        sys.stderr.write("I/O error: %s\n" % str(err))
+        sys.exit(4)
     fp.write(string.joinfields(format, "\n"))
     fp.close()
 
index aeac304d6d90f3fd488b2affa4d76a0e18b263fe..c440a5cdf7709f98d9d9bd9375f68bf8fa0abd7c 100644 (file)
@@ -22,7 +22,7 @@ DictType = DictionaryType = type({})
 
 def _f(): pass
 FunctionType = type(_f)
-LambdaType = type(lambda: None)                # Same as FunctionType
+LambdaType = type(lambda: None)         # Same as FunctionType
 try:
     CodeType = type(_f.func_code)
 except:
@@ -31,18 +31,18 @@ except:
 class _C:
     def _m(self): pass
 ClassType = type(_C)
-UnboundMethodType = type(_C._m)                # Same as MethodType
+UnboundMethodType = type(_C._m)         # Same as MethodType
 _x = _C()
 InstanceType = type(_x)
 MethodType = type(_x._m)
 
 BuiltinFunctionType = type(len)
-BuiltinMethodType = type([].append)    # Same as BuiltinFunctionType
+BuiltinMethodType = type([].append)     # Same as BuiltinFunctionType
 
 ModuleType = type(sys)
 
 try:
-    FileType = type(sys.stdin)         # XXX what if it was assigned to?
+    FileType = type(sys.stdin)          # XXX what if it was assigned to?
 except:
     pass
 XRangeType = type(xrange(0))
@@ -51,14 +51,14 @@ try:
     raise TypeError
 except TypeError:
     try:
-       tb = sys.exc_info()[2]
-       TracebackType = type(tb)
-       FrameType = type(tb.tb_frame)
+        tb = sys.exc_info()[2]
+        TracebackType = type(tb)
+        FrameType = type(tb.tb_frame)
     except:
-       pass
+        pass
     tb = None; del tb
 
 SliceType = type(slice(0))
 EllipsisType = type(Ellipsis)
 
-del sys, _f, _C, _x                    # Not for export
+del sys, _f, _C, _x                     # Not for export
index 125a7a267e8e2f8ffb64e5cee50d8e7300a678ad..1374c11f931c46a1370a7d8d641b90b1f1944077 100644 (file)
@@ -23,15 +23,15 @@ wishes to do different things depending on the Python version.
 
 import os
 
-home = os.curdir                       # Default
+home = os.curdir                        # Default
 if os.environ.has_key('HOME'):
     home = os.environ['HOME']
-elif os.name == 'nt':                  # Contributed by Jeff Bauer
+elif os.name == 'nt':                   # Contributed by Jeff Bauer
     if os.environ.has_key('HOMEPATH'):
-       if os.environ.has_key('HOMEDRIVE'):
-           home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
-       else:
-           home = os.environ['HOMEPATH']
+        if os.environ.has_key('HOMEDRIVE'):
+            home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
+        else:
+            home = os.environ['HOMEPATH']
 
 pythonrc = os.path.join(home, ".pythonrc.py")
 try:
index 145b5aa2a464c50b3d71b1f88bb1f898472f9cdd..a94291d4a056c08b9e2d1c0c02aec03e4328fec1 100755 (executable)
--- a/Lib/uu.py
+++ b/Lib/uu.py
@@ -41,38 +41,38 @@ def encode(in_file, out_file, name=None, mode=None):
     # If in_file is a pathname open it and change defaults
     #
     if in_file == '-':
-       in_file = sys.stdin
+        in_file = sys.stdin
     elif type(in_file) == type(''):
-       if name == None:
-           name = os.path.basename(in_file)
-       if mode == None:
-           try:
-               mode = os.stat(in_file)[0]
-           except AttributeError:
-               pass
-       in_file = open(in_file, 'rb')
+        if name == None:
+            name = os.path.basename(in_file)
+        if mode == None:
+            try:
+                mode = os.stat(in_file)[0]
+            except AttributeError:
+                pass
+        in_file = open(in_file, 'rb')
     #
     # Open out_file if it is a pathname
     #
     if out_file == '-':
-       out_file = sys.stdout
+        out_file = sys.stdout
     elif type(out_file) == type(''):
-       out_file = open(out_file, 'w')
+        out_file = open(out_file, 'w')
     #
     # Set defaults for name and mode
     #
     if name == None:
-       name = '-'
+        name = '-'
     if mode == None:
-       mode = 0666
+        mode = 0666
     #
     # Write the data
     #
     out_file.write('begin %o %s\n' % ((mode&0777),name))
     str = in_file.read(45)
     while len(str) > 0:
-       out_file.write(binascii.b2a_uu(str))
-       str = in_file.read(45)
+        out_file.write(binascii.b2a_uu(str))
+        str = in_file.read(45)
     out_file.write(' \nend\n')
 
 
@@ -82,50 +82,50 @@ def decode(in_file, out_file=None, mode=None):
     # Open the input file, if needed.
     #
     if in_file == '-':
-       in_file = sys.stdin
+        in_file = sys.stdin
     elif type(in_file) == type(''):
-       in_file = open(in_file)
+        in_file = open(in_file)
     #
     # Read until a begin is encountered or we've exhausted the file
     #
     while 1:
-       hdr = in_file.readline()
-       if not hdr:
-           raise Error, 'No valid begin line found in input file'
-       if hdr[:5] != 'begin':
-           continue
-       hdrfields = string.split(hdr)
-       if len(hdrfields) == 3 and hdrfields[0] == 'begin':
-           try:
-               string.atoi(hdrfields[1], 8)
-               break
-           except ValueError:
-               pass
+        hdr = in_file.readline()
+        if not hdr:
+            raise Error, 'No valid begin line found in input file'
+        if hdr[:5] != 'begin':
+            continue
+        hdrfields = string.split(hdr)
+        if len(hdrfields) == 3 and hdrfields[0] == 'begin':
+            try:
+                string.atoi(hdrfields[1], 8)
+                break
+            except ValueError:
+                pass
     if out_file == None:
-       out_file = hdrfields[2]
+        out_file = hdrfields[2]
     if mode == None:
-       mode = string.atoi(hdrfields[1], 8)
+        mode = string.atoi(hdrfields[1], 8)
     #
     # Open the output file
     #
     if out_file == '-':
-       out_file = sys.stdout
+        out_file = sys.stdout
     elif type(out_file) == type(''):
-       fp = open(out_file, 'wb')
-       try:
-           os.path.chmod(out_file, mode)
-       except AttributeError:
-           pass
-       out_file = fp
+        fp = open(out_file, 'wb')
+        try:
+            os.path.chmod(out_file, mode)
+        except AttributeError:
+            pass
+        out_file = fp
     #
     # Main decoding loop
     #
     str = in_file.readline()
     while str and str != 'end\n':
-       out_file.write(binascii.a2b_uu(str))
-       str = in_file.readline()
+        out_file.write(binascii.a2b_uu(str))
+        str = in_file.readline()
     if not str:
-       raise Error, 'Truncated input file'
+        raise Error, 'Truncated input file'
 
 def test():
     """uuencode/uudecode main program"""
@@ -138,40 +138,40 @@ def test():
     output = sys.stdout
     ok = 1
     try:
-       optlist, args = getopt.getopt(sys.argv[1:], 'dt')
+        optlist, args = getopt.getopt(sys.argv[1:], 'dt')
     except getopt.error:
-       ok = 0
+        ok = 0
     if not ok or len(args) > 2:
-       print 'Usage:', sys.argv[0], '[-d] [-t] [input [output]]'
-       print ' -d: Decode (in stead of encode)'
-       print ' -t: data is text, encoded format unix-compatible text'
-       sys.exit(1)
-       
+        print 'Usage:', sys.argv[0], '[-d] [-t] [input [output]]'
+        print ' -d: Decode (in stead of encode)'
+        print ' -t: data is text, encoded format unix-compatible text'
+        sys.exit(1)
+        
     for o, a in optlist:
-       if o == '-d': dopt = 1
-       if o == '-t': topt = 1
+        if o == '-d': dopt = 1
+        if o == '-t': topt = 1
 
     if len(args) > 0:
-       input = args[0]
+        input = args[0]
     if len(args) > 1:
-       output = args[1]
+        output = args[1]
 
     if dopt:
-       if topt:
-           if type(output) == type(''):
-               output = open(output, 'w')
-           else:
-               print sys.argv[0], ': cannot do -t to stdout'
-               sys.exit(1)
-       decode(input, output)
+        if topt:
+            if type(output) == type(''):
+                output = open(output, 'w')
+            else:
+                print sys.argv[0], ': cannot do -t to stdout'
+                sys.exit(1)
+        decode(input, output)
     else:
-       if topt:
-           if type(input) == type(''):
-               input = open(input, 'r')
-           else:
-               print sys.argv[0], ': cannot do -t from stdin'
-               sys.exit(1)
-       encode(input, output)
+        if topt:
+            if type(input) == type(''):
+                input = open(input, 'r')
+            else:
+                print sys.argv[0], ': cannot do -t from stdin'
+                sys.exit(1)
+        encode(input, output)
 
 if __name__ == '__main__':
     test()
index 8985062d5d97875dd517bdfd15bd025d9431f0bf..3186edf1a78dfa707a0171d37dc6a26eef738f40 100644 (file)
@@ -17,19 +17,19 @@ def whichdb(filename):
 
     # Check for dbm first -- this has a .pag and a .dir file
     try:
-       f = open(filename + ".pag", "rb")
-       f.close()
-       f = open(filename + ".dir", "rb")
-       f.close()
-       return "dbm"
+        f = open(filename + ".pag", "rb")
+        f.close()
+        f = open(filename + ".dir", "rb")
+        f.close()
+        return "dbm"
     except IOError:
-       pass
+        pass
 
     # See if the file exists, return None if not
     try:
-       f = open(filename, "rb")
+        f = open(filename, "rb")
     except IOError:
-       return None
+        return None
 
     # Read the first 4 bytes of the file -- the magic number
     s = f.read(4)
@@ -37,21 +37,21 @@ def whichdb(filename):
 
     # Return "" if not at least 4 bytes
     if len(s) != 4:
-       return ""
+        return ""
 
     # Convert to 4-byte int in native byte order -- return "" if impossible
     try:
-       (magic,) = struct.unpack("=l", s)
+        (magic,) = struct.unpack("=l", s)
     except struct.error:
-       return ""
+        return ""
 
     # Check for GNU dbm
     if magic == 0x13579ace:
-       return "gdbm"
+        return "gdbm"
 
     # Check for BSD hash
     if magic == 0x061561:
-       return "dbhash"
+        return "dbhash"
 
     # Unknown
     return ""
index 50a9eea3c40ec82fae74c48872e5397ca362b9e4..290d92ee48538c463907d6f33ef0c02354694d4e 100644 (file)
@@ -18,11 +18,11 @@ class Error:
 
     """
     def __init__(self, msg):
-       self.msg = msg
+        self.msg = msg
     def __repr__(self):
-       return repr(self.msg)
+        return repr(self.msg)
     def __str__(self):
-       return str(self.msg)
+        return str(self.msg)
 
 
 class ConversionError(Error):
@@ -34,76 +34,76 @@ class Packer:
     """Pack various data representations into a buffer."""
 
     def __init__(self):
-       self.reset()
+        self.reset()
 
     def reset(self):
-       self.__buf = ''
+        self.__buf = ''
 
     def get_buffer(self):
-       return self.__buf
+        return self.__buf
     # backwards compatibility
     get_buf = get_buffer
 
     def pack_uint(self, x):
-       self.__buf = self.__buf + struct.pack('>L', x)
+        self.__buf = self.__buf + struct.pack('>L', x)
 
     pack_int = pack_uint
     pack_enum = pack_int
 
     def pack_bool(self, x):
-       if x: self.__buf = self.__buf + '\0\0\0\1'
-       else: self.__buf = self.__buf + '\0\0\0\0'
+        if x: self.__buf = self.__buf + '\0\0\0\1'
+        else: self.__buf = self.__buf + '\0\0\0\0'
 
     def pack_uhyper(self, x):
-       self.pack_uint(x>>32 & 0xffffffffL)
-       self.pack_uint(x & 0xffffffffL)
+        self.pack_uint(x>>32 & 0xffffffffL)
+        self.pack_uint(x & 0xffffffffL)
 
     pack_hyper = pack_uhyper
 
     def pack_float(self, x):
-       try: self.__buf = self.__buf + struct.pack('>f', x)
-       except struct.error, msg:
-           raise ConversionError, msg
+        try: self.__buf = self.__buf + struct.pack('>f', x)
+        except struct.error, msg:
+            raise ConversionError, msg
 
     def pack_double(self, x):
-       try: self.__buf = self.__buf + struct.pack('>d', x)
-       except struct.error, msg:
-           raise ConversionError, msg
+        try: self.__buf = self.__buf + struct.pack('>d', x)
+        except struct.error, msg:
+            raise ConversionError, msg
 
     def pack_fstring(self, n, s):
-       if n < 0:
-           raise ValueError, 'fstring size must be nonnegative'
-       n = ((n+3)/4)*4
-       data = s[:n]
-       data = data + (n - len(data)) * '\0'
-       self.__buf = self.__buf + data
+        if n < 0:
+            raise ValueError, 'fstring size must be nonnegative'
+        n = ((n+3)/4)*4
+        data = s[:n]
+        data = data + (n - len(data)) * '\0'
+        self.__buf = self.__buf + data
 
     pack_fopaque = pack_fstring
 
     def pack_string(self, s):
-       n = len(s)
-       self.pack_uint(n)
-       self.pack_fstring(n, s)
+        n = len(s)
+        self.pack_uint(n)
+        self.pack_fstring(n, s)
 
     pack_opaque = pack_string
     pack_bytes = pack_string
 
     def pack_list(self, list, pack_item):
-       for item in list:
-           self.pack_uint(1)
-           pack_item(item)
-       self.pack_uint(0)
+        for item in list:
+            self.pack_uint(1)
+            pack_item(item)
+        self.pack_uint(0)
 
     def pack_farray(self, n, list, pack_item):
-       if len(list) <> n:
-           raise ValueError, 'wrong array size'
-       for item in list:
-           pack_item(item)
+        if len(list) <> n:
+            raise ValueError, 'wrong array size'
+        for item in list:
+            pack_item(item)
 
     def pack_array(self, list, pack_item):
-       n = len(list)
-       self.pack_uint(n)
-       self.pack_farray(n, list, pack_item)
+        n = len(list)
+        self.pack_uint(n)
+        self.pack_farray(n, list, pack_item)
 
 
 \f
@@ -111,168 +111,168 @@ class Unpacker:
     """Unpacks various data representations from the given buffer."""
 
     def __init__(self, data):
-       self.reset(data)
+        self.reset(data)
 
     def reset(self, data):
-       self.__buf = data
-       self.__pos = 0
+        self.__buf = data
+        self.__pos = 0
 
     def get_position(self):
-       return self.__pos
+        return self.__pos
 
     def set_position(self, position):
-       self.__pos = position
+        self.__pos = position
 
     def get_buffer(self):
-       return self.__buf
+        return self.__buf
 
     def done(self):
-       if self.__pos < len(self.__buf):
-           raise Error('unextracted data remains')
+        if self.__pos < len(self.__buf):
+            raise Error('unextracted data remains')
 
     def unpack_uint(self):
-       i = self.__pos
-       self.__pos = j = i+4
-       data = self.__buf[i:j]
-       if len(data) < 4:
-           raise EOFError
-       x = struct.unpack('>L', data)[0]
-       try:
-           return int(x)
-       except OverflowError:
-           return x
+        i = self.__pos
+        self.__pos = j = i+4
+        data = self.__buf[i:j]
+        if len(data) < 4:
+            raise EOFError
+        x = struct.unpack('>L', data)[0]
+        try:
+            return int(x)
+        except OverflowError:
+            return x
 
     def unpack_int(self):
-       i = self.__pos
-       self.__pos = j = i+4
-       data = self.__buf[i:j]
-       if len(data) < 4:
-           raise EOFError
-       return struct.unpack('>l', data)[0]
+        i = self.__pos
+        self.__pos = j = i+4
+        data = self.__buf[i:j]
+        if len(data) < 4:
+            raise EOFError
+        return struct.unpack('>l', data)[0]
 
     unpack_enum = unpack_int
     unpack_bool = unpack_int
 
     def unpack_uhyper(self):
-       hi = self.unpack_uint()
-       lo = self.unpack_uint()
-       return long(hi)<<32 | lo
+        hi = self.unpack_uint()
+        lo = self.unpack_uint()
+        return long(hi)<<32 | lo
 
     def unpack_hyper(self):
-       x = self.unpack_uhyper()
-       if x >= 0x8000000000000000L:
-           x = x - 0x10000000000000000L
-       return x
+        x = self.unpack_uhyper()
+        if x >= 0x8000000000000000L:
+            x = x - 0x10000000000000000L
+        return x
 
     def unpack_float(self):
-       i = self.__pos
-       self.__pos = j = i+4
-       data = self.__buf[i:j]
-       if len(data) < 4:
-           raise EOFError
-       return struct.unpack('>f', data)[0]
+        i = self.__pos
+        self.__pos = j = i+4
+        data = self.__buf[i:j]
+        if len(data) < 4:
+            raise EOFError
+        return struct.unpack('>f', data)[0]
 
     def unpack_double(self):
-       i = self.__pos
-       self.__pos = j = i+8
-       data = self.__buf[i:j]
-       if len(data) < 8:
-           raise EOFError
-       return struct.unpack('>d', data)[0]
+        i = self.__pos
+        self.__pos = j = i+8
+        data = self.__buf[i:j]
+        if len(data) < 8:
+            raise EOFError
+        return struct.unpack('>d', data)[0]
 
     def unpack_fstring(self, n):
-       if n < 0:
-           raise ValueError, 'fstring size must be nonnegative'
-       i = self.__pos
-       j = i + (n+3)/4*4
-       if j > len(self.__buf):
-           raise EOFError
-       self.__pos = j
-       return self.__buf[i:i+n]
+        if n < 0:
+            raise ValueError, 'fstring size must be nonnegative'
+        i = self.__pos
+        j = i + (n+3)/4*4
+        if j > len(self.__buf):
+            raise EOFError
+        self.__pos = j
+        return self.__buf[i:i+n]
 
     unpack_fopaque = unpack_fstring
 
     def unpack_string(self):
-       n = self.unpack_uint()
-       return self.unpack_fstring(n)
+        n = self.unpack_uint()
+        return self.unpack_fstring(n)
 
     unpack_opaque = unpack_string
     unpack_bytes = unpack_string
 
     def unpack_list(self, unpack_item):
-       list = []
-       while 1:
-           x = self.unpack_uint()
-           if x == 0: break
-           if x <> 1:
-               raise ConversionError, '0 or 1 expected, got ' + `x`
-           item = unpack_item()
-           list.append(item)
-       return list
+        list = []
+        while 1:
+            x = self.unpack_uint()
+            if x == 0: break
+            if x <> 1:
+                raise ConversionError, '0 or 1 expected, got ' + `x`
+            item = unpack_item()
+            list.append(item)
+        return list
 
     def unpack_farray(self, n, unpack_item):
-       list = []
-       for i in range(n):
-           list.append(unpack_item())
-       return list
+        list = []
+        for i in range(n):
+            list.append(unpack_item())
+        return list
 
     def unpack_array(self, unpack_item):
-       n = self.unpack_uint()
-       return self.unpack_farray(n, unpack_item)
+        n = self.unpack_uint()
+        return self.unpack_farray(n, unpack_item)
 
 \f
 # test suite
 def _test():
     p = Packer()
     packtest = [
-       (p.pack_uint,    (9,)),
-       (p.pack_bool,    (None,)),
-       (p.pack_bool,    ('hello',)),
-       (p.pack_uhyper,  (45L,)),
-       (p.pack_float,   (1.9,)),
-       (p.pack_double,  (1.9,)),
-       (p.pack_string,  ('hello world',)),
-       (p.pack_list,    (range(5), p.pack_uint)),
-       (p.pack_array,   (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
-       ]
+        (p.pack_uint,    (9,)),
+        (p.pack_bool,    (None,)),
+        (p.pack_bool,    ('hello',)),
+        (p.pack_uhyper,  (45L,)),
+        (p.pack_float,   (1.9,)),
+        (p.pack_double,  (1.9,)),
+        (p.pack_string,  ('hello world',)),
+        (p.pack_list,    (range(5), p.pack_uint)),
+        (p.pack_array,   (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
+        ]
     succeedlist = [1] * len(packtest)
     count = 0
     for method, args in packtest:
-       print 'pack test', count,
-       try:
-           apply(method, args)
-           print 'succeeded'
-       except ConversionError, var:
-           print 'ConversionError:', var.msg
-           succeedlist[count] = 0
-       count = count + 1
+        print 'pack test', count,
+        try:
+            apply(method, args)
+            print 'succeeded'
+        except ConversionError, var:
+            print 'ConversionError:', var.msg
+            succeedlist[count] = 0
+        count = count + 1
     data = p.get_buffer()
     # now verify
     up = Unpacker(data)
     unpacktest = [
-       (up.unpack_uint,   (), lambda x: x == 9),
-       (up.unpack_bool,   (), lambda x: not x),
-       (up.unpack_bool,   (), lambda x: x),
-       (up.unpack_uhyper, (), lambda x: x == 45L),
-       (up.unpack_float,  (), lambda x: 1.89 < x < 1.91),
-       (up.unpack_double, (), lambda x: 1.89 < x < 1.91),
-       (up.unpack_string, (), lambda x: x == 'hello world'),
-       (up.unpack_list,   (up.unpack_uint,), lambda x: x == range(5)),
-       (up.unpack_array,  (up.unpack_string,),
-        lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
-       ]
+        (up.unpack_uint,   (), lambda x: x == 9),
+        (up.unpack_bool,   (), lambda x: not x),
+        (up.unpack_bool,   (), lambda x: x),
+        (up.unpack_uhyper, (), lambda x: x == 45L),
+        (up.unpack_float,  (), lambda x: 1.89 < x < 1.91),
+        (up.unpack_double, (), lambda x: 1.89 < x < 1.91),
+        (up.unpack_string, (), lambda x: x == 'hello world'),
+        (up.unpack_list,   (up.unpack_uint,), lambda x: x == range(5)),
+        (up.unpack_array,  (up.unpack_string,),
+         lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
+        ]
     count = 0
     for method, args, pred in unpacktest:
-       print 'unpack test', count,
-       try:
-           if succeedlist[count]:
-               x = apply(method, args)
-               print pred(x) and 'succeeded' or 'failed', ':', x
-           else:
-               print 'skipping'
-       except ConversionError, var:
-           print 'ConversionError:', var.msg
-       count = count + 1
+        print 'unpack test', count,
+        try:
+            if succeedlist[count]:
+                x = apply(method, args)
+                print pred(x) and 'succeeded' or 'failed', ':', x
+            else:
+                print 'skipping'
+        except ConversionError, var:
+            print 'ConversionError:', var.msg
+        count = count + 1
 
 \f
 if __name__ == '__main__':
index 9f6e23ecca3da43cc026b894d744b014b9906ace..6d7f1d1bd9c7651a8b09e96ca16a9ad7614b5975 100644 (file)
@@ -12,10 +12,10 @@ _opS = '[ \t\r\n]*'
 _Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*'
 interesting = re.compile('[&<]')
 incomplete = re.compile('&(' + _Name + '|#[0-9]*|#x[0-9a-fA-F]*)?|'
-                          '<([a-zA-Z_:][^<>]*|'
-                             '/([a-zA-Z_:][^<>]*)?|'
-                             '![^<>]*|'
-                             r'\?[^<>]*)?')
+                           '<([a-zA-Z_:][^<>]*|'
+                              '/([a-zA-Z_:][^<>]*)?|'
+                              '![^<>]*|'
+                              r'\?[^<>]*)?')
 
 ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+);?')
 entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
@@ -57,497 +57,497 @@ class XMLParser:
 
     # Interface -- initialize and reset this instance
     def __init__(self, verbose=0):
-       self.verbose = verbose
-       self.reset()
+        self.verbose = verbose
+        self.reset()
 
     # Interface -- reset this instance.  Loses all unprocessed data
     def reset(self):
-       self.rawdata = ''
-       self.stack = []
-       self.nomoretags = 0
-       self.literal = 0
-       self.lineno = 1
-       self.__at_start = 1
-       self.__seen_doctype = None
-       self.__seen_starttag = 0
+        self.rawdata = ''
+        self.stack = []
+        self.nomoretags = 0
+        self.literal = 0
+        self.lineno = 1
+        self.__at_start = 1
+        self.__seen_doctype = None
+        self.__seen_starttag = 0
 
     # For derived classes only -- enter literal mode (CDATA) till EOF
     def setnomoretags(self):
-       self.nomoretags = self.literal = 1
+        self.nomoretags = self.literal = 1
 
     # For derived classes only -- enter literal mode (CDATA)
     def setliteral(self, *args):
-       self.literal = 1
+        self.literal = 1
 
     # Interface -- feed some data to the parser.  Call this as
     # often as you want, with as little or as much text as you
     # want (may include '\n').  (This just saves the text, all the
     # processing is done by goahead().)
     def feed(self, data):
-       self.rawdata = self.rawdata + data
-       self.goahead(0)
+        self.rawdata = self.rawdata + data
+        self.goahead(0)
 
     # Interface -- handle the remaining data
     def close(self):
-       self.goahead(1)
+        self.goahead(1)
 
     # Interface -- translate references
     def translate_references(self, data):
-       newdata = []
-       i = 0
-       while 1:
-           res = ref.search(data, i)
-           if res is None:
-               newdata.append(data[i:])
-               return string.join(newdata, '')
-           if data[res.end(0) - 1] != ';':
-               self.syntax_error("`;' missing after entity/char reference")
-           newdata.append(data[i:res.start(0)])
-           str = res.group(1)
-           if str[0] == '#':
-               if str[1] == 'x':
-                   newdata.append(chr(string.atoi(str[2:], 16)))
-               else:
-                   newdata.append(chr(string.atoi(str[1:])))
-           else:
-               try:
-                   newdata.append(self.entitydefs[str])
-               except KeyError:
-                   # can't do it, so keep the entity ref in
-                   newdata.append('&' + str + ';')
-           i = res.end(0)
+        newdata = []
+        i = 0
+        while 1:
+            res = ref.search(data, i)
+            if res is None:
+                newdata.append(data[i:])
+                return string.join(newdata, '')
+            if data[res.end(0) - 1] != ';':
+                self.syntax_error("`;' missing after entity/char reference")
+            newdata.append(data[i:res.start(0)])
+            str = res.group(1)
+            if str[0] == '#':
+                if str[1] == 'x':
+                    newdata.append(chr(string.atoi(str[2:], 16)))
+                else:
+                    newdata.append(chr(string.atoi(str[1:])))
+            else:
+                try:
+                    newdata.append(self.entitydefs[str])
+                except KeyError:
+                    # can't do it, so keep the entity ref in
+                    newdata.append('&' + str + ';')
+            i = res.end(0)
 
     # Internal -- handle data as far as reasonable.  May leave state
     # and data to be processed by a subsequent call.  If 'end' is
     # true, force handling all data as if followed by EOF marker.
     def goahead(self, end):
-       rawdata = self.rawdata
-       i = 0
-       n = len(rawdata)
-       while i < n:
-           if i > 0:
-               self.__at_start = 0
-           if self.nomoretags:
-               data = rawdata[i:n]
-               self.handle_data(data)
-               self.lineno = self.lineno + string.count(data, '\n')
-               i = n
-               break
-           res = interesting.search(rawdata, i)
-           if res:
-                   j = res.start(0)
-           else:
-                   j = n
-           if i < j:
-               self.__at_start = 0
-               data = rawdata[i:j]
-               self.handle_data(data)
-               self.lineno = self.lineno + string.count(data, '\n')
-           i = j
-           if i == n: break
-           if rawdata[i] == '<':
-               if starttagopen.match(rawdata, i):
-                   if self.literal:
-                       data = rawdata[i]
-                       self.handle_data(data)
-                       self.lineno = self.lineno + string.count(data, '\n')
-                       i = i+1
-                       continue
-                   k = self.parse_starttag(i)
-                   if k < 0: break
-                   self.__seen_starttag = 1
-                   self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
-                   i = k
-                   continue
-               if endtagopen.match(rawdata, i):
-                   k = self.parse_endtag(i)
-                   if k < 0: break
-                   self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
-                   i =  k
-                   self.literal = 0
-                   continue
-               if commentopen.match(rawdata, i):
-                   if self.literal:
-                       data = rawdata[i]
-                       self.handle_data(data)
-                       self.lineno = self.lineno + string.count(data, '\n')
-                       i = i+1
-                       continue
-                   k = self.parse_comment(i)
-                   if k < 0: break
-                   self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
-                   i = k
-                   continue
-               if cdataopen.match(rawdata, i):
-                   k = self.parse_cdata(i)
-                   if k < 0: break
-                   self.lineno = self.lineno + string.count(rawdata[i:i], '\n')
-                   i = k
-                   continue
-               res = procopen.match(rawdata, i)
-               if res:
-                   k = self.parse_proc(i)
-                   if k < 0: break
-                   self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
-                   i = k
-                   continue
-               res = doctype.match(rawdata, i)
-               if res:
-                   if self.literal:
-                       data = rawdata[i]
-                       self.handle_data(data)
-                       self.lineno = self.lineno + string.count(data, '\n')
-                       i = i+1
-                       continue
-                   if self.__seen_doctype:
-                       self.syntax_error('multiple DOCTYPE elements')
-                   if self.__seen_starttag:
-                       self.syntax_error('DOCTYPE not at beginning of document')
-                   k = self.parse_doctype(res)
-                   if k < 0: break
-                   self.__seen_doctype = res.group('name')
-                   self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
-                   i = k
-                   continue
-               res = special.match(rawdata, i)
-               if res:
-                   if self.literal:
-                       data = rawdata[i]
-                       self.handle_data(data)
-                       self.lineno = self.lineno + string.count(data, '\n')
-                       i = i+1
-                       continue
-                   self.handle_special(res.group('special'))
-                   self.lineno = self.lineno + string.count(res.group(0), '\n')
-                   i = res.end(0)
-                   continue
-           elif rawdata[i] == '&':
-               res = charref.match(rawdata, i)
-               if res is not None:
-                   i = res.end(0)
-                   if rawdata[i-1] != ';':
-                       self.syntax_error("`;' missing in charref")
-                       i = i-1
-                   self.handle_charref(res.group('char')[:-1])
-                   self.lineno = self.lineno + string.count(res.group(0), '\n')
-                   continue
-               res = entityref.match(rawdata, i)
-               if res is not None:
-                   i = res.end(0)
-                   if rawdata[i-1] != ';':
-                       self.syntax_error("`;' missing in entityref")
-                       i = i-1
-                   self.handle_entityref(res.group('name'))
-                   self.lineno = self.lineno + string.count(res.group(0), '\n')
-                   continue
-           else:
-               raise RuntimeError, 'neither < nor & ??'
-           # We get here only if incomplete matches but
-           # nothing else
-           res = incomplete.match(rawdata, i)
-           if not res:
-               data = rawdata[i]
-               self.handle_data(data)
-               self.lineno = self.lineno + string.count(data, '\n')
-               i = i+1
-               continue
-           j = res.end(0)
-           if j == n:
-               break # Really incomplete
-           self.syntax_error("bogus `<' or `&'")
-           data = res.group(0)
-           self.handle_data(data)
-           self.lineno = self.lineno + string.count(data, '\n')
-           i = j
-       # end while
-       if end and i < n:
-           data = rawdata[i:n]
-           self.handle_data(data)
-           self.lineno = self.lineno + string.count(data, '\n')
-           i = n
-       self.rawdata = rawdata[i:]
-       if end:
-           if self.stack:
-               self.syntax_error('missing end tags')
-               while self.stack:
-                   self.finish_endtag(self.stack[-1])
+        rawdata = self.rawdata
+        i = 0
+        n = len(rawdata)
+        while i < n:
+            if i > 0:
+                self.__at_start = 0
+            if self.nomoretags:
+                data = rawdata[i:n]
+                self.handle_data(data)
+                self.lineno = self.lineno + string.count(data, '\n')
+                i = n
+                break
+            res = interesting.search(rawdata, i)
+            if res:
+                    j = res.start(0)
+            else:
+                    j = n
+            if i < j:
+                self.__at_start = 0
+                data = rawdata[i:j]
+                self.handle_data(data)
+                self.lineno = self.lineno + string.count(data, '\n')
+            i = j
+            if i == n: break
+            if rawdata[i] == '<':
+                if starttagopen.match(rawdata, i):
+                    if self.literal:
+                        data = rawdata[i]
+                        self.handle_data(data)
+                        self.lineno = self.lineno + string.count(data, '\n')
+                        i = i+1
+                        continue
+                    k = self.parse_starttag(i)
+                    if k < 0: break
+                    self.__seen_starttag = 1
+                    self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
+                    i = k
+                    continue
+                if endtagopen.match(rawdata, i):
+                    k = self.parse_endtag(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
+                    i =  k
+                    self.literal = 0
+                    continue
+                if commentopen.match(rawdata, i):
+                    if self.literal:
+                        data = rawdata[i]
+                        self.handle_data(data)
+                        self.lineno = self.lineno + string.count(data, '\n')
+                        i = i+1
+                        continue
+                    k = self.parse_comment(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
+                    i = k
+                    continue
+                if cdataopen.match(rawdata, i):
+                    k = self.parse_cdata(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + string.count(rawdata[i:i], '\n')
+                    i = k
+                    continue
+                res = procopen.match(rawdata, i)
+                if res:
+                    k = self.parse_proc(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
+                    i = k
+                    continue
+                res = doctype.match(rawdata, i)
+                if res:
+                    if self.literal:
+                        data = rawdata[i]
+                        self.handle_data(data)
+                        self.lineno = self.lineno + string.count(data, '\n')
+                        i = i+1
+                        continue
+                    if self.__seen_doctype:
+                        self.syntax_error('multiple DOCTYPE elements')
+                    if self.__seen_starttag:
+                        self.syntax_error('DOCTYPE not at beginning of document')
+                    k = self.parse_doctype(res)
+                    if k < 0: break
+                    self.__seen_doctype = res.group('name')
+                    self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
+                    i = k
+                    continue
+                res = special.match(rawdata, i)
+                if res:
+                    if self.literal:
+                        data = rawdata[i]
+                        self.handle_data(data)
+                        self.lineno = self.lineno + string.count(data, '\n')
+                        i = i+1
+                        continue
+                    self.handle_special(res.group('special'))
+                    self.lineno = self.lineno + string.count(res.group(0), '\n')
+                    i = res.end(0)
+                    continue
+            elif rawdata[i] == '&':
+                res = charref.match(rawdata, i)
+                if res is not None:
+                    i = res.end(0)
+                    if rawdata[i-1] != ';':
+                        self.syntax_error("`;' missing in charref")
+                        i = i-1
+                    self.handle_charref(res.group('char')[:-1])
+                    self.lineno = self.lineno + string.count(res.group(0), '\n')
+                    continue
+                res = entityref.match(rawdata, i)
+                if res is not None:
+                    i = res.end(0)
+                    if rawdata[i-1] != ';':
+                        self.syntax_error("`;' missing in entityref")
+                        i = i-1
+                    self.handle_entityref(res.group('name'))
+                    self.lineno = self.lineno + string.count(res.group(0), '\n')
+                    continue
+            else:
+                raise RuntimeError, 'neither < nor & ??'
+            # We get here only if incomplete matches but
+            # nothing else
+            res = incomplete.match(rawdata, i)
+            if not res:
+                data = rawdata[i]
+                self.handle_data(data)
+                self.lineno = self.lineno + string.count(data, '\n')
+                i = i+1
+                continue
+            j = res.end(0)
+            if j == n:
+                break # Really incomplete
+            self.syntax_error("bogus `<' or `&'")
+            data = res.group(0)
+            self.handle_data(data)
+            self.lineno = self.lineno + string.count(data, '\n')
+            i = j
+        # end while
+        if end and i < n:
+            data = rawdata[i:n]
+            self.handle_data(data)
+            self.lineno = self.lineno + string.count(data, '\n')
+            i = n
+        self.rawdata = rawdata[i:]
+        if end:
+            if self.stack:
+                self.syntax_error('missing end tags')
+                while self.stack:
+                    self.finish_endtag(self.stack[-1])
 
     # Internal -- parse comment, return length or -1 if not terminated
     def parse_comment(self, i):
-       rawdata = self.rawdata
-       if rawdata[i:i+4] <> '<!--':
-           raise RuntimeError, 'unexpected call to handle_comment'
-       res = commentclose.search(rawdata, i+4)
-       if not res:
-           return -1
-       # doubledash search will succeed because it's a subset of commentclose
-       if doubledash.search(rawdata, i+4).start(0) < res.start(0):
-           self.syntax_error("`--' inside comment")
-       self.handle_comment(rawdata[i+4: res.start(0)])
-       return res.end(0)
+        rawdata = self.rawdata
+        if rawdata[i:i+4] <> '<!--':
+            raise RuntimeError, 'unexpected call to handle_comment'
+        res = commentclose.search(rawdata, i+4)
+        if not res:
+            return -1
+        # doubledash search will succeed because it's a subset of commentclose
+        if doubledash.search(rawdata, i+4).start(0) < res.start(0):
+            self.syntax_error("`--' inside comment")
+        self.handle_comment(rawdata[i+4: res.start(0)])
+        return res.end(0)
 
     # Internal -- handle DOCTYPE tag, return length or -1 if not terminated
     def parse_doctype(self, res):
-       rawdata = self.rawdata
-       n = len(rawdata)
-       name = res.group('name')
-       j = k = res.end(0)
-       level = 0
-       while k < n:
-           c = rawdata[k]
-           if c == '<':
-               level = level + 1
-           elif c == '>':
-               if level == 0:
-                   self.handle_doctype(name, rawdata[j:k])
-                   return k+1
-               level = level - 1
-           k = k+1
-       return -1
+        rawdata = self.rawdata
+        n = len(rawdata)
+        name = res.group('name')
+        j = k = res.end(0)
+        level = 0
+        while k < n:
+            c = rawdata[k]
+            if c == '<':
+                level = level + 1
+            elif c == '>':
+                if level == 0:
+                    self.handle_doctype(name, rawdata[j:k])
+                    return k+1
+                level = level - 1
+            k = k+1
+        return -1
 
     # Internal -- handle CDATA tag, return length or -1 if not terminated
     def parse_cdata(self, i):
-       rawdata = self.rawdata
-       if rawdata[i:i+9] <> '<![CDATA[':
-           raise RuntimeError, 'unexpected call to handle_cdata'
-       res = cdataclose.search(rawdata, i+9)
-       if not res:
-           return -1
-       self.handle_cdata(rawdata[i+9:res.start(0)])
-       return res.end(0)
+        rawdata = self.rawdata
+        if rawdata[i:i+9] <> '<![CDATA[':
+            raise RuntimeError, 'unexpected call to handle_cdata'
+        res = cdataclose.search(rawdata, i+9)
+        if not res:
+            return -1
+        self.handle_cdata(rawdata[i+9:res.start(0)])
+        return res.end(0)
 
     __xml_attributes = {'version': '1.0', 'standalone': 'no', 'encoding': None}
     # Internal -- handle a processing instruction tag
     def parse_proc(self, i):
-       rawdata = self.rawdata
-       end = procclose.search(rawdata, i)
-       if not end:
-           return -1
-       j = end.start(0)
-       res = tagfind.match(rawdata, i+2)
-       if not res:
-           raise RuntimeError, 'unexpected call to parse_proc'
-       k = res.end(0)
-       name = res.group(0)
-       if name == 'xml':
-           if self.__at_start:
-               attrdict, k = self.parse_attributes('xml', k, j,
-                                                   self.__xml_attributes)
-               if k != j:
-                   self.syntax_error('garbage at end of <?xml?>')
-               if attrdict['version'] != '1.0':
-                   self.syntax_error('only XML version 1.0 supported')
-               self.handle_xml(attrdict.get('encoding', None),
-                               attrdict['standalone'])
-               return end.end(0)
-           else:
-               self.syntax_error("<?xml?> tag not at start of document")
-       self.handle_proc(name, rawdata[k:j])
-       return end.end(0)
+        rawdata = self.rawdata
+        end = procclose.search(rawdata, i)
+        if not end:
+            return -1
+        j = end.start(0)
+        res = tagfind.match(rawdata, i+2)
+        if not res:
+            raise RuntimeError, 'unexpected call to parse_proc'
+        k = res.end(0)
+        name = res.group(0)
+        if name == 'xml':
+            if self.__at_start:
+                attrdict, k = self.parse_attributes('xml', k, j,
+                                                    self.__xml_attributes)
+                if k != j:
+                    self.syntax_error('garbage at end of <?xml?>')
+                if attrdict['version'] != '1.0':
+                    self.syntax_error('only XML version 1.0 supported')
+                self.handle_xml(attrdict.get('encoding', None),
+                                attrdict['standalone'])
+                return end.end(0)
+            else:
+                self.syntax_error("<?xml?> tag not at start of document")
+        self.handle_proc(name, rawdata[k:j])
+        return end.end(0)
 
     # Internal -- parse attributes between i and j
     def parse_attributes(self, tag, k, j, attributes = None):
-       rawdata = self.rawdata
-       # Now parse the data between k and j into a tag and attrs
-       attrdict = {}
-       try:
-           # convert attributes list to dictionary
-           d = {}
-           for a in attributes:
-               d[a] = None
-           attributes = d
-       except TypeError:
-           pass
-       while k < j:
-           res = attrfind.match(rawdata, k)
-           if not res: break
-           attrname, attrvalue = res.group('name', 'value')
-           if attrvalue is None:
-               self.syntax_error('no attribute value specified')
-               attrvalue = attrname
-           elif attrvalue[:1] == "'" == attrvalue[-1:] or \
-                attrvalue[:1] == '"' == attrvalue[-1:]:
-               attrvalue = attrvalue[1:-1]
-           else:
-               self.syntax_error('attribute value not quoted')
-           if attributes is not None and not attributes.has_key(attrname):
-               self.syntax_error('unknown attribute %s of element %s' %
-                                 (attrname, tag))
-           if attrdict.has_key(attrname):
-               self.syntax_error('attribute specified twice')
-           attrdict[attrname] = self.translate_references(attrvalue)
-           k = res.end(0)
-       if attributes is not None:
-           # fill in with default attributes
-           for key, val in attributes.items():
-               if val is not None and not attrdict.has_key(key):
-                   attrdict[key] = val
-       return attrdict, k
+        rawdata = self.rawdata
+        # Now parse the data between k and j into a tag and attrs
+        attrdict = {}
+        try:
+            # convert attributes list to dictionary
+            d = {}
+            for a in attributes:
+                d[a] = None
+            attributes = d
+        except TypeError:
+            pass
+        while k < j:
+            res = attrfind.match(rawdata, k)
+            if not res: break
+            attrname, attrvalue = res.group('name', 'value')
+            if attrvalue is None:
+                self.syntax_error('no attribute value specified')
+                attrvalue = attrname
+            elif attrvalue[:1] == "'" == attrvalue[-1:] or \
+                 attrvalue[:1] == '"' == attrvalue[-1:]:
+                attrvalue = attrvalue[1:-1]
+            else:
+                self.syntax_error('attribute value not quoted')
+            if attributes is not None and not attributes.has_key(attrname):
+                self.syntax_error('unknown attribute %s of element %s' %
+                                  (attrname, tag))
+            if attrdict.has_key(attrname):
+                self.syntax_error('attribute specified twice')
+            attrdict[attrname] = self.translate_references(attrvalue)
+            k = res.end(0)
+        if attributes is not None:
+            # fill in with default attributes
+            for key, val in attributes.items():
+                if val is not None and not attrdict.has_key(key):
+                    attrdict[key] = val
+        return attrdict, k
 
     # Internal -- handle starttag, return length or -1 if not terminated
     def parse_starttag(self, i):
-       rawdata = self.rawdata
-       # i points to start of tag
-       end = endbracket.search(rawdata, i+1)
-       if not end:
-           return -1
-       j = end.start(0)
-       res = tagfind.match(rawdata, i+1)
-       if not res:
-           raise RuntimeError, 'unexpected call to parse_starttag'
-       k = res.end(0)
-       tag = res.group(0)
-       if not self.__seen_starttag and self.__seen_doctype:
-           if tag != self.__seen_doctype:
-               self.syntax_error('starttag does not match DOCTYPE')
-       if hasattr(self, tag + '_attributes'):
-           attributes = getattr(self, tag + '_attributes')
-       else:
-           attributes = None
-       attrdict, k = self.parse_attributes(tag, k, j, attributes)
-       res = starttagend.match(rawdata, k)
-       if not res:
-           self.syntax_error('garbage in start tag')
-       self.finish_starttag(tag, attrdict)
-       if res and res.group('slash') == '/':
-           self.finish_endtag(tag)
-       return end.end(0)
+        rawdata = self.rawdata
+        # i points to start of tag
+        end = endbracket.search(rawdata, i+1)
+        if not end:
+            return -1
+        j = end.start(0)
+        res = tagfind.match(rawdata, i+1)
+        if not res:
+            raise RuntimeError, 'unexpected call to parse_starttag'
+        k = res.end(0)
+        tag = res.group(0)
+        if not self.__seen_starttag and self.__seen_doctype:
+            if tag != self.__seen_doctype:
+                self.syntax_error('starttag does not match DOCTYPE')
+        if hasattr(self, tag + '_attributes'):
+            attributes = getattr(self, tag + '_attributes')
+        else:
+            attributes = None
+        attrdict, k = self.parse_attributes(tag, k, j, attributes)
+        res = starttagend.match(rawdata, k)
+        if not res:
+            self.syntax_error('garbage in start tag')
+        self.finish_starttag(tag, attrdict)
+        if res and res.group('slash') == '/':
+            self.finish_endtag(tag)
+        return end.end(0)
 
     # Internal -- parse endtag
     def parse_endtag(self, i):
-       rawdata = self.rawdata
-       end = endbracket.search(rawdata, i+1)
-       if not end:
-           return -1
-       res = tagfind.match(rawdata, i+2)
-       if not res:
-           self.syntax_error('no name specified in end tag')
-           tag = ''
-           k = i+2
-       else:
-           tag = res.group(0)
-           k = res.end(0)
-       if k != end.start(0):
-           # check that there is only white space at end of tag
-           res = space.match(rawdata, k)
-           if res is None or res.end(0) != end.start(0):
-               self.syntax_error('garbage in end tag')
-       self.finish_endtag(tag)
-       return end.end(0)
+        rawdata = self.rawdata
+        end = endbracket.search(rawdata, i+1)
+        if not end:
+            return -1
+        res = tagfind.match(rawdata, i+2)
+        if not res:
+            self.syntax_error('no name specified in end tag')
+            tag = ''
+            k = i+2
+        else:
+            tag = res.group(0)
+            k = res.end(0)
+        if k != end.start(0):
+            # check that there is only white space at end of tag
+            res = space.match(rawdata, k)
+            if res is None or res.end(0) != end.start(0):
+                self.syntax_error('garbage in end tag')
+        self.finish_endtag(tag)
+        return end.end(0)
 
     # Internal -- finish processing of start tag
     # Return -1 for unknown tag, 1 for balanced tag
     def finish_starttag(self, tag, attrs):
-       self.stack.append(tag)
-       try:
-           method = getattr(self, 'start_' + tag)
-       except AttributeError:
-           self.unknown_starttag(tag, attrs)
-           return -1
-       else:
-           self.handle_starttag(tag, method, attrs)
-           return 1
+        self.stack.append(tag)
+        try:
+            method = getattr(self, 'start_' + tag)
+        except AttributeError:
+            self.unknown_starttag(tag, attrs)
+            return -1
+        else:
+            self.handle_starttag(tag, method, attrs)
+            return 1
 
     # Internal -- finish processing of end tag
     def finish_endtag(self, tag):
-       if not tag:
-           self.syntax_error('name-less end tag')
-           found = len(self.stack) - 1
-           if found < 0:
-               self.unknown_endtag(tag)
-               return
-       else:
-           if tag not in self.stack:
-               self.syntax_error('unopened end tag')
-               try:
-                   method = getattr(self, 'end_' + tag)
-               except AttributeError:
-                   self.unknown_endtag(tag)
-               return
-           found = len(self.stack)
-           for i in range(found):
-               if self.stack[i] == tag:
-                   found = i
-       while len(self.stack) > found:
-           if found < len(self.stack) - 1:
-               self.syntax_error('missing close tag for %s' % self.stack[-1])
-           tag = self.stack[-1]
-           try:
-               method = getattr(self, 'end_' + tag)
-           except AttributeError:
-               method = None
-           if method:
-               self.handle_endtag(tag, method)
-           else:
-               self.unknown_endtag(tag)
-           del self.stack[-1]
+        if not tag:
+            self.syntax_error('name-less end tag')
+            found = len(self.stack) - 1
+            if found < 0:
+                self.unknown_endtag(tag)
+                return
+        else:
+            if tag not in self.stack:
+                self.syntax_error('unopened end tag')
+                try:
+                    method = getattr(self, 'end_' + tag)
+                except AttributeError:
+                    self.unknown_endtag(tag)
+                return
+            found = len(self.stack)
+            for i in range(found):
+                if self.stack[i] == tag:
+                    found = i
+        while len(self.stack) > found:
+            if found < len(self.stack) - 1:
+                self.syntax_error('missing close tag for %s' % self.stack[-1])
+            tag = self.stack[-1]
+            try:
+                method = getattr(self, 'end_' + tag)
+            except AttributeError:
+                method = None
+            if method:
+                self.handle_endtag(tag, method)
+            else:
+                self.unknown_endtag(tag)
+            del self.stack[-1]
 
     # Overridable -- handle xml processing instruction
     def handle_xml(self, encoding, standalone):
-       pass
+        pass
 
     # Overridable -- handle DOCTYPE
     def handle_doctype(self, tag, data):
-       pass
+        pass
 
     # Overridable -- handle start tag
     def handle_starttag(self, tag, method, attrs):
-       method(attrs)
+        method(attrs)
 
     # Overridable -- handle end tag
     def handle_endtag(self, tag, method):
-       method()
+        method()
 
     # Example -- handle character reference, no need to override
     def handle_charref(self, name):
-       try:
-           if name[0] == 'x':
-               n = string.atoi(name[1:], 16)
-           else:
-               n = string.atoi(name)
-       except string.atoi_error:
-           self.unknown_charref(name)
-           return
-       if not 0 <= n <= 255:
-           self.unknown_charref(name)
-           return
-       self.handle_data(chr(n))
+        try:
+            if name[0] == 'x':
+                n = string.atoi(name[1:], 16)
+            else:
+                n = string.atoi(name)
+        except string.atoi_error:
+            self.unknown_charref(name)
+            return
+        if not 0 <= n <= 255:
+            self.unknown_charref(name)
+            return
+        self.handle_data(chr(n))
 
     # Definition of entities -- derived classes may override
     entitydefs = {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': "'"}
 
     # Example -- handle entity reference, no need to override
     def handle_entityref(self, name):
-       table = self.entitydefs
-       if table.has_key(name):
-           self.handle_data(table[name])
-       else:
-           self.unknown_entityref(name)
-           return
+        table = self.entitydefs
+        if table.has_key(name):
+            self.handle_data(table[name])
+        else:
+            self.unknown_entityref(name)
+            return
 
     # Example -- handle data, should be overridden
     def handle_data(self, data):
-       pass
+        pass
 
     # Example -- handle cdata, could be overridden
     def handle_cdata(self, data):
-       pass
+        pass
 
     # Example -- handle comment, could be overridden
     def handle_comment(self, data):
-       pass
+        pass
 
     # Example -- handle processing instructions, could be overridden
     def handle_proc(self, name, data):
-       pass
+        pass
 
     # Example -- handle special instructions, could be overridden
     def handle_special(self, data):
-       pass
+        pass
 
     # Example -- handle relatively harmless syntax errors, could be overridden
     def syntax_error(self, message):
-       raise RuntimeError, 'Syntax error at line %d: %s' % (self.lineno, message)
+        raise RuntimeError, 'Syntax error at line %d: %s' % (self.lineno, message)
 
     # To be overridden -- handlers for unknown objects
     def unknown_starttag(self, tag, attrs): pass
@@ -559,109 +559,109 @@ class XMLParser:
 class TestXMLParser(XMLParser):
 
     def __init__(self, verbose=0):
-       self.testdata = ""
-       XMLParser.__init__(self, verbose)
+        self.testdata = ""
+        XMLParser.__init__(self, verbose)
 
     def handle_xml(self, encoding, standalone):
-       self.flush()
-       print 'xml: encoding =',encoding,'standalone =',standalone
+        self.flush()
+        print 'xml: encoding =',encoding,'standalone =',standalone
 
     def handle_doctype(self, tag, data):
-       self.flush()
-       print 'DOCTYPE:',tag, `data`
+        self.flush()
+        print 'DOCTYPE:',tag, `data`
 
     def handle_data(self, data):
-       self.testdata = self.testdata + data
-       if len(`self.testdata`) >= 70:
-           self.flush()
+        self.testdata = self.testdata + data
+        if len(`self.testdata`) >= 70:
+            self.flush()
 
     def flush(self):
-       data = self.testdata
-       if data:
-           self.testdata = ""
-           print 'data:', `data`
+        data = self.testdata
+        if data:
+            self.testdata = ""
+            print 'data:', `data`
 
     def handle_cdata(self, data):
-       self.flush()
-       print 'cdata:', `data`
+        self.flush()
+        print 'cdata:', `data`
 
     def handle_proc(self, name, data):
-       self.flush()
-       print 'processing:',name,`data`
+        self.flush()
+        print 'processing:',name,`data`
 
     def handle_special(self, data):
-       self.flush()
-       print 'special:',`data`
+        self.flush()
+        print 'special:',`data`
 
     def handle_comment(self, data):
-       self.flush()
-       r = `data`
-       if len(r) > 68:
-           r = r[:32] + '...' + r[-32:]
-       print 'comment:', r
+        self.flush()
+        r = `data`
+        if len(r) > 68:
+            r = r[:32] + '...' + r[-32:]
+        print 'comment:', r
 
     def syntax_error(self, message):
-       print 'error at line %d:' % self.lineno, message
+        print 'error at line %d:' % self.lineno, message
 
     def unknown_starttag(self, tag, attrs):
-       self.flush()
-       if not attrs:
-           print 'start tag: <' + tag + '>'
-       else:
-           print 'start tag: <' + tag,
-           for name, value in attrs.items():
-               print name + '=' + '"' + value + '"',
-           print '>'
+        self.flush()
+        if not attrs:
+            print 'start tag: <' + tag + '>'
+        else:
+            print 'start tag: <' + tag,
+            for name, value in attrs.items():
+                print name + '=' + '"' + value + '"',
+            print '>'
 
     def unknown_endtag(self, tag):
-       self.flush()
-       print 'end tag: </' + tag + '>'
+        self.flush()
+        print 'end tag: </' + tag + '>'
 
     def unknown_entityref(self, ref):
-       self.flush()
-       print '*** unknown entity ref: &' + ref + ';'
+        self.flush()
+        print '*** unknown entity ref: &' + ref + ';'
 
     def unknown_charref(self, ref):
-       self.flush()
-       print '*** unknown char ref: &#' + ref + ';'
+        self.flush()
+        print '*** unknown char ref: &#' + ref + ';'
 
     def close(self):
-       XMLParser.close(self)
-       self.flush()
+        XMLParser.close(self)
+        self.flush()
 
 def test(args = None):
     import sys
 
     if not args:
-       args = sys.argv[1:]
+        args = sys.argv[1:]
 
     if args and args[0] == '-s':
-       args = args[1:]
-       klass = XMLParser
+        args = args[1:]
+        klass = XMLParser
     else:
-       klass = TestXMLParser
+        klass = TestXMLParser
 
     if args:
-       file = args[0]
+        file = args[0]
     else:
-       file = 'test.xml'
+        file = 'test.xml'
 
     if file == '-':
-       f = sys.stdin
+        f = sys.stdin
     else:
-       try:
-           f = open(file, 'r')
-       except IOError, msg:
-           print file, ":", msg
-           sys.exit(1)
+        try:
+            f = open(file, 'r')
+        except IOError, msg:
+            print file, ":", msg
+            sys.exit(1)
 
     data = f.read()
     if f is not sys.stdin:
-       f.close()
+        f.close()
 
     x = klass()
     for c in data:
-       x.feed(c)
+        x.feed(c)
     x.close()