mirror of
https://github.com/zulip/zulip.git
synced 2025-11-09 16:37:23 +00:00
Fix Tornado memory leak of handler objects.
In 2ea0daab19, handlers were moved to
being tracked via the handlers_by_id dict, but nothing cleared this
dict, resulting in every handler object being leaked. Since a Tornado
process uses a different handler object for every request, this
resulted in a significant memory leak. We fix this by clearing the
handlers_by_id dict in the two code paths that would result in a
Tornado handler being de-allocated: the exception codepath and the
handler disconnect codepath.
Fixes #463.
This commit is contained in:
@@ -20,7 +20,8 @@ from zerver.decorator import RespondAsynchronously, JsonableError
|
||||
from zerver.lib.cache import cache_get_many, message_cache_key, \
|
||||
user_profile_by_id_cache_key, cache_save_user_profile
|
||||
from zerver.lib.cache_helpers import cache_with_key
|
||||
from zerver.lib.handlers import get_handler_by_id, finish_handler
|
||||
from zerver.lib.handlers import clear_handler_by_id, get_handler_by_id, \
|
||||
finish_handler
|
||||
from zerver.lib.utils import statsd
|
||||
from zerver.middleware import async_request_restart
|
||||
from zerver.lib.narrow import build_narrow_filter
|
||||
@@ -160,7 +161,7 @@ class ClientDescriptor(object):
|
||||
|
||||
def disconnect_handler(self, client_closed=False):
|
||||
if self.current_handler_id:
|
||||
delete_descriptor_by_handler_id(self.current_handler_id, None)
|
||||
clear_descriptor_by_handler_id(self.current_handler_id, None)
|
||||
if client_closed:
|
||||
logging.info("Client disconnected for queue %s (%s via %s)" %
|
||||
(self.event_queue.id, self.user_profile_email,
|
||||
@@ -184,7 +185,7 @@ def get_descriptor_by_handler_id(handler_id):
|
||||
def set_descriptor_by_handler_id(handler_id, client_descriptor):
|
||||
descriptors_by_handler_id[handler_id] = client_descriptor
|
||||
|
||||
def delete_descriptor_by_handler_id(handler_id, client_descriptor):
|
||||
def clear_descriptor_by_handler_id(handler_id, client_descriptor):
|
||||
del descriptors_by_handler_id[handler_id]
|
||||
|
||||
def compute_full_event_type(event):
|
||||
|
||||
@@ -14,6 +14,9 @@ def allocate_handler_id(handler):
|
||||
current_handler_id += 1
|
||||
return handler.handler_id
|
||||
|
||||
def clear_handler_by_id(handler_id):
|
||||
del handlers[handler_id]
|
||||
|
||||
def finish_handler(handler_id, event_queue_id, contents, apply_markdown):
|
||||
err_msg = "Got error finishing handler for queue %s" % (event_queue_id,)
|
||||
try:
|
||||
|
||||
@@ -22,7 +22,7 @@ from zerver.lib.debug import interactive_debug_listen
|
||||
from zerver.lib.response import json_response
|
||||
from zerver.lib.event_queue import process_notification, missedmessage_hook
|
||||
from zerver.lib.event_queue import setup_event_queue, add_client_gc_hook, \
|
||||
get_descriptor_by_handler_id
|
||||
get_descriptor_by_handler_id, clear_handler_by_id
|
||||
from zerver.lib.handlers import allocate_handler_id
|
||||
from zerver.lib.queue import setup_tornado_rabbitmq
|
||||
from zerver.lib.socket import get_sockjs_router, respond_send_message
|
||||
@@ -151,6 +151,8 @@ class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler):
|
||||
self.initLock.release()
|
||||
self._auto_finish = False
|
||||
self.client_descriptor = None
|
||||
# Handler IDs are allocated here, and the handler ID map must
|
||||
# be cleared when the handler finishes its response
|
||||
allocate_handler_id(self)
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
@@ -243,6 +245,7 @@ class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler):
|
||||
async_request_stop(request)
|
||||
return None
|
||||
except Exception as e:
|
||||
clear_handler_by_id(self.current_handler_id)
|
||||
# If the view raised an exception, run it through exception
|
||||
# middleware, and if the exception middleware returns a
|
||||
# response, use that. Otherwise, reraise the exception.
|
||||
|
||||
Reference in New Issue
Block a user