requirements: Upgrade Python requirements.

Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
Anders Kaseorg
2024-11-18 16:49:34 -08:00
committed by Tim Abbott
parent a3eae0b6f0
commit 532aee926c
15 changed files with 1312 additions and 1335 deletions

View File

@@ -77,7 +77,6 @@ module = [
"re2.*",
"requests_oauthlib.*", # https://github.com/requests/requests-oauthlib/issues/428
"scim2_filter_parser.attr_paths",
"scrapy.*", # https://github.com/scrapy/scrapy/issues/4041
"social_core.*",
"social_django.*",
"talon_core.*",

View File

@@ -160,6 +160,7 @@ requests-oauthlib
# For OpenAPI schema validation.
openapi-core
werkzeug<3.1.2 # https://github.com/python-openapi/openapi-core/issues/938
# For reporting errors to sentry.io
sentry-sdk

File diff suppressed because it is too large Load Diff

View File

@@ -228,9 +228,9 @@ myst-parser==4.0.0 \
--hash=sha256:851c9dfb44e36e56d15d05e72f02b80da21a9e0d07cba96baf5e2d476bb91531 \
--hash=sha256:b9317997552424448c6096c2558872fdb6f81d3ecb3a40ce84a7518798f3f28d
# via -r requirements/docs.in
packaging==24.1 \
--hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \
--hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124
packaging==24.2 \
--hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \
--hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f
# via sphinx
pygments==2.18.0 \
--hash=sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199 \
@@ -312,9 +312,9 @@ sphinx-design==0.6.1 \
--hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \
--hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632
# via -r requirements/docs.in
sphinx-rtd-theme==3.0.1 \
--hash=sha256:921c0ece75e90633ee876bd7b148cfaad136b481907ad154ac3669b6fc957916 \
--hash=sha256:a4c5745d1b06dfcb80b7704fe532eb765b44065a8fad9851e4258c8804140703
sphinx-rtd-theme==3.0.2 \
--hash=sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13 \
--hash=sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85
# via -r requirements/docs.in
sphinxcontrib-applehelp==2.0.0 \
--hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \

View File

@@ -7,9 +7,9 @@
#
# For details, see requirements/README.md .
#
wheel==0.44.0 \
--hash=sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f \
--hash=sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49
wheel==0.45.0 \
--hash=sha256:52f0baa5e6522155090a09c6bd95718cc46956d1b51d537ea5454249edb671c7 \
--hash=sha256:a57353941a3183b3d5365346b567a260a0602a0f8a635926a7dede41b94c674a
# via -r requirements/pip.in
# The following packages are considered to be unsafe in a requirements file:

File diff suppressed because it is too large Load Diff

View File

@@ -475,7 +475,7 @@ def get_tzdata_zi() -> IO[str]:
for path in zoneinfo.TZPATH:
filename = os.path.join(path, "tzdata.zi")
if os.path.exists(filename):
return open(filename) # noqa: SIM115
return open(filename)
raise RuntimeError("Missing time zone data (tzdata.zi)")

View File

@@ -119,8 +119,8 @@ def parser() -> argparse.ArgumentParser:
def maybe_gzip(logfile_name: str) -> TextIO:
if logfile_name.endswith(".gz"):
return gzip.open(logfile_name, "rt") # noqa: SIM115
return open(logfile_name) # noqa: SIM115
return gzip.open(logfile_name, "rt")
return open(logfile_name)
NGINX_LOG_LINE_RE = re.compile(

View File

@@ -1,20 +1,26 @@
import optparse
import argparse
from scrapy.commands import crawl
from scrapy.crawler import Crawler
from scrapy.spiders import Spider
from typing_extensions import override
class Command(crawl.Command):
def run(self, args: list[str], opts: optparse.Values) -> None:
@override
def run(self, args: list[str], opts: argparse.Namespace) -> None:
crawlers = []
assert self.crawler_process is not None
real_create_crawler = self.crawler_process.create_crawler
def create_crawler(crawler_or_spidercls: Crawler | str) -> Crawler:
def create_crawler(crawler_or_spidercls: type[Spider] | Crawler | str) -> Crawler:
crawler = real_create_crawler(crawler_or_spidercls)
crawlers.append(crawler)
return crawler
self.crawler_process.create_crawler = create_crawler
self.crawler_process.create_crawler = create_crawler # type: ignore[method-assign] # monkey patching
super().run(args, opts)
if any(crawler.stats.get_value("log_count/ERROR") for crawler in crawlers):
self.exitcode = 1
for crawler in crawlers:
assert crawler.stats is not None
if crawler.stats.get_value("log_count/ERROR"):
self.exitcode = 1

View File

@@ -95,5 +95,4 @@ ROBOTSTXT_OBEY = False
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"

View File

@@ -5,12 +5,15 @@ from collections.abc import Callable, Iterator
from urllib.parse import urlsplit
import scrapy
from scrapy.http import Request, Response
from scrapy.http.request import Request
from scrapy.http.response import Response
from scrapy.http.response.text import TextResponse
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.utils.url import url_has_any_extension
from twisted.python.failure import Failure
from typing_extensions import override
EXCLUDED_DOMAINS = [
# Returns 429 rate-limiting errors
@@ -58,7 +61,6 @@ ZULIP_SERVER_GITHUB_DIRECTORY_PATH_PREFIX = "/zulip/zulip/tree/main"
class BaseDocumentationSpider(scrapy.Spider):
name: str | None = None
# Exclude domain address.
deny_domains: list[str] = []
start_urls: list[str] = []
@@ -113,6 +115,8 @@ class BaseDocumentationSpider(scrapy.Spider):
def check_fragment(self, response: Response) -> None:
self.log(response)
xpath_template = "//*[@id='{fragment}' or @name='{fragment}']"
assert isinstance(response, TextResponse)
assert response.request is not None
fragment = urlsplit(response.request.url).fragment
# Check fragment existing on response page.
if not response.selector.xpath(xpath_template.format(fragment=fragment)):
@@ -201,10 +205,12 @@ class BaseDocumentationSpider(scrapy.Spider):
errback=self.error_callback,
)
@override
def start_requests(self) -> Iterator[Request]:
for url in self.start_urls:
yield from self._make_requests(url)
@override
def parse(self, response: Response) -> Iterator[Request]:
self.log(response)
@@ -218,6 +224,7 @@ class BaseDocumentationSpider(scrapy.Spider):
errback=self.error_callback,
)
assert isinstance(response, TextResponse)
for link in LxmlLinkExtractor(
deny_domains=self.deny_domains,
deny_extensions=["doc"],
@@ -240,6 +247,7 @@ class BaseDocumentationSpider(scrapy.Spider):
# likely due to a redirect.
if urlsplit(response.url).netloc == "idmsa.apple.com":
return None
assert response.request is not None
if response.status == 405 and response.request.method == "HEAD":
# Method 'HEAD' not allowed, repeat request with 'GET'
return self.retry_request_with_get(response.request)

View File

@@ -19,6 +19,10 @@ PYTHONWARNINGS+=',ignore:The '\''strip_cdata'\'' option of HTMLParser() has neve
# https://github.com/fabfuel/circuitbreaker/pull/63
PYTHONWARNINGS+=',ignore:datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version.:DeprecationWarning:circuitbreaker'
# https://github.com/mahmoud/glom/pull/258
PYTHONWARNINGS+=',ignore:invalid escape sequence '\'\\' '\'':DeprecationWarning'
PYTHONWARNINGS+=',ignore:invalid escape sequence '\'\\' '\'':SyntaxWarning'
# This gets triggered due to our do_patch_activate_script
PYTHONWARNINGS+=',default:Attempting to work in a virtualenv.:UserWarning:IPython.core.interactiveshell'

View File

@@ -51,4 +51,4 @@ API_FEATURE_LEVEL = (
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
PROVISION_VERSION = (301, 3) # bumped 2024-11-13 for @giphy/js-types
PROVISION_VERSION = (302, 0) # bumped 2024-11-18 to upgrade Python requirements

View File

@@ -264,7 +264,7 @@ def reset_email_visibility_to_everyone_in_zulip_realm() -> None:
def get_test_image_file(filename: str) -> IO[bytes]:
test_avatar_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../tests/images"))
return open(os.path.join(test_avatar_dir, filename), "rb") # noqa: SIM115
return open(os.path.join(test_avatar_dir, filename), "rb")
def read_test_image_file(filename: str) -> bytes:

View File

@@ -264,7 +264,7 @@ def serve_file(
# We cannot use X-Accel-Redirect to offload the serving of
# this image to nginx, because it does not preserve the status
# code of this response, nor the Vary: header.
return FileResponse(open(static_path(image_path), "rb"), status=status) # noqa: SIM115
return FileResponse(open(static_path(image_path), "rb"), status=status)
if attachment is None:
if preferred_accept(request, ["text/html", "image/png"]) == "image/png":