mypy: Type simple generators as Iterator, not Iterable.

A generator that yields values without receiving or returning them is
an Iterator.  Although every Iterator happens to be iterable, Iterable
is a confusing annotation for generators because a generator is only
iterable once.

Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
Anders Kaseorg
2020-06-22 23:03:47 -07:00
committed by Tim Abbott
parent c242d176ba
commit 3ffed617a2
5 changed files with 27 additions and 15 deletions

View File

@@ -1,6 +1,6 @@
import json
import re
from typing import Callable, Iterable, List, Optional, Union
from typing import Callable, Iterator, List, Optional, Union
import scrapy
from scrapy.http import Request, Response
@@ -105,8 +105,8 @@ class BaseDocumentationSpider(scrapy.Spider):
return callback
def _make_requests(self, url: str) -> Iterable[Request]:
callback: Callable[[Response], Optional[Iterable[Request]]] = self.parse
def _make_requests(self, url: str) -> Iterator[Request]:
callback: Callable[[Response], Optional[Iterator[Request]]] = self.parse
dont_filter = False
method = 'GET'
if self._is_external_url(url):
@@ -120,11 +120,11 @@ class BaseDocumentationSpider(scrapy.Spider):
yield Request(url, method=method, callback=callback, dont_filter=dont_filter,
errback=self.error_callback)
def start_requests(self) -> Iterable[Request]:
def start_requests(self) -> Iterator[Request]:
for url in self.start_urls:
yield from self._make_requests(url)
def parse(self, response: Response) -> Iterable[Request]:
def parse(self, response: Response) -> Iterator[Request]:
self.log(response)
if getattr(self, 'validate_html', False):
@@ -142,7 +142,7 @@ class BaseDocumentationSpider(scrapy.Spider):
canonicalize=False).extract_links(response):
yield from self._make_requests(link.url)
def retry_request_with_get(self, request: Request) -> Iterable[Request]:
def retry_request_with_get(self, request: Request) -> Iterator[Request]:
request.method = 'GET'
request.dont_filter = True
yield request
@@ -150,7 +150,7 @@ class BaseDocumentationSpider(scrapy.Spider):
def exclude_error(self, url: str) -> bool:
return url in EXCLUDED_URLS
def error_callback(self, failure: Failure) -> Optional[Union[Failure, Iterable[Request]]]:
def error_callback(self, failure: Failure) -> Optional[Union[Failure, Iterator[Request]]]:
if failure.check(HttpError):
response = failure.value.response
if self.exclude_error(response.url):