2) PSF license for Python 2.2
The robots.txt Exclusion Protocol is implemented as specified in
- http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
+ http://www.robotstxt.org/norobots-rfc.txt
+
"""
import urlparse
import urllib
self.errcode = opener.errcode
if self.errcode in (401, 403):
self.disallow_all = True
- elif self.errcode >= 400:
+ elif self.errcode >= 400 and self.errcode < 500:
self.allow_all = True
elif self.errcode == 200 and lines:
self.parse(lines)
linenumber = 0
entry = Entry()
+ self.modified()
for line in lines:
linenumber += 1
if not line:
return False
if self.allow_all:
return True
+
+ # Until the robots.txt file has been read or found not
+ # to exist, we must assume that no url is allowable.
+ # This prevents false positives when a user erronenously
+ # calls can_fetch() before calling read().
+ if not self.last_checked:
+ return False
+
# search for given user agent matches
# the first match counts
parsed_url = urlparse.urlparse(urllib.unquote(url))
- Issue #21306: Backport hmac.compare_digest from Python 3. This is part of PEP
466.
+- Issue #21469: Reduced the risk of false positives in robotparser by
+ checking to make sure that robots.txt has been read or does not exist
+ prior to returning True in can_fetch().
+
- Issue #21321: itertools.islice() now releases the reference to the source
iterator when the slice is exhausted. Patch by Anton Afanasyev.