From 2612679ddc202ff684ea525142d2802c962b4f64 Mon Sep 17 00:00:00 2001 From: Ezio Melotti Date: Mon, 25 Nov 2013 05:14:51 +0200 Subject: [PATCH] #19620: Fix typo in docstring (noticed by Christopher Welborn). --- Lib/lib2to3/pgen2/tokenize.py | 4 ++-- Lib/tokenize.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py index f6e0284c2f..a2ba96d78e 100644 --- a/Lib/lib2to3/pgen2/tokenize.py +++ b/Lib/lib2to3/pgen2/tokenize.py @@ -252,7 +252,7 @@ def _get_normal_name(orig_enc): def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argment, readline, + be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used @@ -343,7 +343,7 @@ def untokenize(iterable): def generate_tokens(readline): """ - The generate_tokens() generator requires one argment, readline, which + The generate_tokens() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline diff --git a/Lib/tokenize.py b/Lib/tokenize.py index ca7b07493c..db02134ab5 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -263,7 +263,7 @@ def untokenize(iterable): def generate_tokens(readline): """ - The generate_tokens() generator requires one argment, readline, which + The generate_tokens() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline -- 2.50.1