From a5413c499702a74fdc50e4bc8e7e6a480856a1f9 Mon Sep 17 00:00:00 2001 From: Raymond Hettinger Date: Mon, 12 May 2014 22:18:50 -0700 Subject: [PATCH] Issue 21469: Mitigate risk of false positives with robotparser. * Repair the broken link to norobots-rfc.txt. * HTTP response codes >= 500 treated as a failed read rather than as a not found. Not found means that we can assume the entire site is allowed. A 5xx server error tells us nothing. * A successful read() or parse() updates the mtime (which is defined to be "the time the robots.txt file was last fetched"). * The can_fetch() method returns False unless we've had a read() with a 2xx or 4xx response. This avoids false positives in the case where a user calls can_fetch() before calling read(). * I don't see any easy way to test this patch without hitting internet resources that might change or without use of mock objects that wouldn't provide must reassurance. --- Lib/robotparser.py | 14 ++++++++++++-- Misc/NEWS | 4 ++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/Lib/robotparser.py b/Lib/robotparser.py index ad3be94711..b46b753008 100644 --- a/Lib/robotparser.py +++ b/Lib/robotparser.py @@ -7,7 +7,8 @@ 2) PSF license for Python 2.2 The robots.txt Exclusion Protocol is implemented as specified in - http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html + http://www.robotstxt.org/norobots-rfc.txt + """ import urlparse import urllib @@ -60,7 +61,7 @@ class RobotFileParser: self.errcode = opener.errcode if self.errcode in (401, 403): self.disallow_all = True - elif self.errcode >= 400: + elif self.errcode >= 400 and self.errcode < 500: self.allow_all = True elif self.errcode == 200 and lines: self.parse(lines) @@ -86,6 +87,7 @@ class RobotFileParser: linenumber = 0 entry = Entry() + self.modified() for line in lines: linenumber += 1 if not line: @@ -131,6 +133,14 @@ class RobotFileParser: return False if self.allow_all: return True + + # Until the robots.txt file has been read or found not + # to exist, we must assume that no url is allowable. + # This prevents false positives when a user erronenously + # calls can_fetch() before calling read(). + if not self.last_checked: + return False + # search for given user agent matches # the first match counts parsed_url = urlparse.urlparse(urllib.unquote(url)) diff --git a/Misc/NEWS b/Misc/NEWS index 5d3209d955..2bda7260c0 100644 --- a/Misc/NEWS +++ b/Misc/NEWS @@ -52,6 +52,10 @@ Library - Issue #21306: Backport hmac.compare_digest from Python 3. This is part of PEP 466. +- Issue #21469: Reduced the risk of false positives in robotparser by + checking to make sure that robots.txt has been read or does not exist + prior to returning True in can_fetch(). + - Issue #21321: itertools.islice() now releases the reference to the source iterator when the slice is exhausted. Patch by Anton Afanasyev. -- 2.50.1