digest: Eliminate unneeded queries for hot convos.

We can easily keep track of messages by bucket from the
original loop through messages.
This commit is contained in:
Steve Howell
2018-11-11 17:46:21 +00:00
committed by Tim Abbott
parent 3091412806
commit cc33e4cd0c
3 changed files with 7 additions and 8 deletions

View File

@@ -40,7 +40,6 @@ FILES_WITH_LEGACY_SUBJECT = {
# TRY TO FIX THESE! If you can't fix them, try to # TRY TO FIX THESE! If you can't fix them, try to
# add comments here and/or in the file itself about # add comments here and/or in the file itself about
# why sweeping subject is tricky. # why sweeping subject is tricky.
'zerver/lib/digest.py',
'zerver/lib/onboarding.py', 'zerver/lib/onboarding.py',
'zerver/lib/stream_topic.py', 'zerver/lib/stream_topic.py',

View File

@@ -16,7 +16,7 @@ from zerver.lib.send_email import send_future_email, FromAddress
from zerver.lib.url_encoding import encode_stream from zerver.lib.url_encoding import encode_stream
from zerver.models import UserProfile, UserMessage, Recipient, Stream, \ from zerver.models import UserProfile, UserMessage, Recipient, Stream, \
Subscription, UserActivity, get_active_streams, get_user_profile_by_id, \ Subscription, UserActivity, get_active_streams, get_user_profile_by_id, \
Realm Realm, Message
from zerver.context_processors import common_context from zerver.context_processors import common_context
from zerver.lib.queue import queue_json_publish from zerver.lib.queue import queue_json_publish
from zerver.lib.logging_util import log_to_file from zerver.lib.logging_util import log_to_file
@@ -94,11 +94,14 @@ def gather_hot_conversations(user_profile: UserProfile, stream_ums: QuerySet) ->
messages = [um.message for um in stream_ums] messages = [um.message for um in stream_ums]
conversation_length = defaultdict(int) # type: Dict[Tuple[int, str], int] conversation_length = defaultdict(int) # type: Dict[Tuple[int, str], int]
conversation_messages = defaultdict(list) # type: Dict[Tuple[int, str], List[Message]]
conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, str], Set[str]] conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, str], Set[str]]
for message in messages: for message in messages:
key = (message.recipient.type_id, key = (message.recipient.type_id,
message.topic_name()) message.topic_name())
conversation_messages[key].append(message)
if not message.sent_by_human(): if not message.sent_by_human():
# Don't include automated messages in the count. # Don't include automated messages in the count.
continue continue
@@ -131,15 +134,12 @@ def gather_hot_conversations(user_profile: UserProfile, stream_ums: QuerySet) ->
hot_conversation_render_payloads = [] hot_conversation_render_payloads = []
for h in hot_conversations: for h in hot_conversations:
stream_id, subject = h
users = list(conversation_diversity[h]) users = list(conversation_diversity[h])
count = conversation_length[h] count = conversation_length[h]
messages = conversation_messages[h]
# We'll display up to 2 messages from the conversation. # We'll display up to 2 messages from the conversation.
first_few_messages = [user_message.message for user_message in first_few_messages = messages[:2]
stream_ums.filter(
message__recipient__type_id=stream_id,
message__subject=subject)[:2]]
teaser_data = {"participants": users, teaser_data = {"participants": users,
"count": count - len(first_few_messages), "count": count - len(first_few_messages),

View File

@@ -121,7 +121,7 @@ class TestDigestEmailMessages(ZulipTestCase):
with queries_captured() as queries: with queries_captured() as queries:
handle_digest_email(othello.id, cutoff) handle_digest_email(othello.id, cutoff)
self.assertTrue(34 <= len(queries) <= 35) self.assertTrue(29 <= len(queries) <= 30)
self.assertEqual(mock_send_future_email.call_count, 1) self.assertEqual(mock_send_future_email.call_count, 1)
kwargs = mock_send_future_email.call_args[1] kwargs = mock_send_future_email.call_args[1]