mirror of
				https://github.com/zulip/zulip.git
				synced 2025-10-31 03:53:50 +00:00 
			
		
		
		
	python: Sort imports with isort.
Fixes #2665. Regenerated by tabbott with `lint --fix` after a rebase and change in parameters. Note from tabbott: In a few cases, this converts technical debt in the form of unsorted imports into different technical debt in the form of our largest files having very long, ugly import sequences at the start. I expect this change will increase pressure for us to split those files, which isn't a bad thing. Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
		
				
					committed by
					
						 Tim Abbott
						Tim Abbott
					
				
			
			
				
	
			
			
			
						parent
						
							b666aef2d3
						
					
				
				
					commit
					365fe0b3d5
				
			| @@ -1,8 +1,9 @@ | ||||
| import optparse | ||||
| from scrapy.crawler import Crawler | ||||
| from scrapy.commands import crawl | ||||
| from typing import List, Union | ||||
|  | ||||
| from scrapy.commands import crawl | ||||
| from scrapy.crawler import Crawler | ||||
|  | ||||
|  | ||||
| class Command(crawl.Command): | ||||
|     def run(self, args: List[str], opts: optparse.Values) -> None: | ||||
|   | ||||
| @@ -1,6 +1,5 @@ | ||||
| import os | ||||
| import pathlib | ||||
|  | ||||
| from typing import List | ||||
|  | ||||
| from .common.spiders import BaseDocumentationSpider | ||||
|   | ||||
| @@ -1,12 +1,10 @@ | ||||
| import os | ||||
|  | ||||
| from posixpath import basename | ||||
| from typing import Any, List, Set | ||||
| from urllib.parse import urlparse | ||||
|  | ||||
| from .common.spiders import BaseDocumentationSpider | ||||
|  | ||||
| from typing import Any, List, Set | ||||
|  | ||||
|  | ||||
| def get_images_dir(images_path: str) -> str: | ||||
|     # Get index html file as start url and convert it to file uri | ||||
|   | ||||
| @@ -1,7 +1,8 @@ | ||||
| import json | ||||
| import re | ||||
| import scrapy | ||||
| from typing import Callable, Iterable, List, Optional, Union | ||||
|  | ||||
| import scrapy | ||||
| from scrapy.http import Request, Response | ||||
| from scrapy.linkextractors import IGNORED_EXTENSIONS | ||||
| from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor | ||||
| @@ -9,8 +10,6 @@ from scrapy.spidermiddlewares.httperror import HttpError | ||||
| from scrapy.utils.url import url_has_any_extension | ||||
| from twisted.python.failure import Failure | ||||
|  | ||||
| from typing import Callable, Iterable, List, Optional, Union | ||||
|  | ||||
| EXCLUDED_URLS = [ | ||||
|     # Google calendar returns 404s on HEAD requests unconditionally | ||||
|     'https://calendar.google.com/calendar/embed?src=ktiduof4eoh47lmgcl2qunnc0o@group.calendar.google.com', | ||||
|   | ||||
		Reference in New Issue
	
	Block a user