mirror of
https://github.com/zulip/zulip.git
synced 2025-11-21 23:19:10 +00:00
Retrieve message objects from memcached in a bulk request.
On my laptop, this saves about 80 milliseconds per 1000 messages requested via get_old_messages queries. Since we only have one memcached process and it does not run with special priority, this might have significant impact on load during server restarts. (imported from commit 06ad13f32f4a6d87a0664c96297ef9843f410ac5)
This commit is contained in:
@@ -232,6 +232,9 @@ def get_recipient(type, type_id):
|
||||
def linebreak(string):
|
||||
return string.replace('\n\n', '<p/>').replace('\n', '<br/>')
|
||||
|
||||
def to_dict_cache_key(message, apply_markdown, rendered_content=None):
|
||||
return 'message_dict:%d:%d' % (message.id, apply_markdown)
|
||||
|
||||
class Message(models.Model):
|
||||
sender = models.ForeignKey(UserProfile)
|
||||
recipient = models.ForeignKey(Recipient)
|
||||
@@ -248,8 +251,7 @@ class Message(models.Model):
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
@cache_with_key(lambda self, apply_markdown, rendered_content=None: 'message_dict:%d:%d' % (self.id, apply_markdown),
|
||||
timeout=3600*24)
|
||||
@cache_with_key(to_dict_cache_key, timeout=3600*24)
|
||||
def to_dict(self, apply_markdown, rendered_content=None):
|
||||
display_recipient = get_display_recipient(self.recipient)
|
||||
if self.recipient.type == Recipient.STREAM:
|
||||
|
||||
Reference in New Issue
Block a user