mirror of
https://github.com/zulip/zulip.git
synced 2025-11-05 14:35:27 +00:00
Compare commits
15 Commits
shared-0.0
...
shared-0.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b52e83281b | ||
|
|
6e336ef6f6 | ||
|
|
66da42bbae | ||
|
|
a1aba2869b | ||
|
|
3a9730b7c6 | ||
|
|
3ea1311f56 | ||
|
|
8dd3579885 | ||
|
|
2e480b0b09 | ||
|
|
8810203a4c | ||
|
|
521ec5885b | ||
|
|
42f84a8cc7 | ||
|
|
e05a0dcf98 | ||
|
|
df201bd132 | ||
|
|
951dc68f3a | ||
|
|
167b891d4e |
@@ -14,7 +14,6 @@ const noop = () => {};
|
|||||||
const realm_icon = mock_esm("../../static/js/realm_icon");
|
const realm_icon = mock_esm("../../static/js/realm_icon");
|
||||||
|
|
||||||
const channel = mock_esm("../../static/js/channel");
|
const channel = mock_esm("../../static/js/channel");
|
||||||
const overlays = mock_esm("../../static/js/overlays");
|
|
||||||
|
|
||||||
mock_esm("../../static/js/csrf", {csrf_token: "token-stub"});
|
mock_esm("../../static/js/csrf", {csrf_token: "token-stub"});
|
||||||
mock_esm("../../static/js/list_widget", {
|
mock_esm("../../static/js/list_widget", {
|
||||||
@@ -309,8 +308,6 @@ function test_extract_property_name() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function test_sync_realm_settings() {
|
function test_sync_realm_settings() {
|
||||||
overlays.settings_open = () => true;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
/* Test invalid settings property sync */
|
/* Test invalid settings property sync */
|
||||||
const $property_elem = $("#id_realm_invalid_settings_property");
|
const $property_elem = $("#id_realm_invalid_settings_property");
|
||||||
|
|||||||
@@ -29,12 +29,10 @@ module Puppet::Parser::Functions
|
|||||||
newfunction(:zulipconf_nagios_hosts, :type => :rvalue, :arity => 0) do |args|
|
newfunction(:zulipconf_nagios_hosts, :type => :rvalue, :arity => 0) do |args|
|
||||||
section = "nagios"
|
section = "nagios"
|
||||||
prefix = "hosts_"
|
prefix = "hosts_"
|
||||||
ignore_key = "hosts_fullstack"
|
|
||||||
zulip_conf_path = lookupvar("zulip_conf_path")
|
zulip_conf_path = lookupvar("zulip_conf_path")
|
||||||
keys = `/usr/bin/crudini --get #{zulip_conf_path} #{section} 2>&1`; result = $?.success?
|
keys = `/usr/bin/crudini --get #{zulip_conf_path} #{section} 2>&1`; result = $?.success?
|
||||||
if result
|
if result
|
||||||
keys = keys.lines.map { |l| l.strip }
|
filtered_keys = keys.lines.map { |l| l.strip }.select { |l| l.start_with?(prefix) }
|
||||||
filtered_keys = keys.select { |l| l.start_with?(prefix) }.reject { |k| k == ignore_key }
|
|
||||||
all_values = []
|
all_values = []
|
||||||
filtered_keys.each do |key|
|
filtered_keys.each do |key|
|
||||||
values = `/usr/bin/crudini --get #{zulip_conf_path} #{section} #{key} 2>&1`; result = $?.success?
|
values = `/usr/bin/crudini --get #{zulip_conf_path} #{section} #{key} 2>&1`; result = $?.success?
|
||||||
|
|||||||
6
puppet/zulip_ops/files/nagios_ssh_config
Normal file
6
puppet/zulip_ops/files/nagios_ssh_config
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
Host *
|
||||||
|
ControlMaster no
|
||||||
|
ControlPath /tmp/ssh-%C
|
||||||
|
|
||||||
|
ServerAliveInterval 30
|
||||||
|
ServerAliveCountMax 3
|
||||||
@@ -140,6 +140,14 @@ class zulip_ops::profile::nagios {
|
|||||||
require => File['/var/lib/nagios'],
|
require => File['/var/lib/nagios'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
file { '/var/lib/nagios/.ssh/config':
|
||||||
|
ensure => file,
|
||||||
|
mode => '0644',
|
||||||
|
owner => 'nagios',
|
||||||
|
group => 'nagios',
|
||||||
|
source => 'puppet:///modules/zulip_ops/nagios_ssh_config',
|
||||||
|
}
|
||||||
|
|
||||||
# Disable apparmor for msmtp so it can read the above config file
|
# Disable apparmor for msmtp so it can read the above config file
|
||||||
file { '/etc/apparmor.d/disable/usr.bin.msmtp':
|
file { '/etc/apparmor.d/disable/usr.bin.msmtp':
|
||||||
ensure => link,
|
ensure => link,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,fullstack,non_aws_host,frontends,not_pageable_servers,postgresql
|
hostgroups all,fullstack,non_aws_host,frontends,not_pageable_servers,postgresql
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -13,7 +13,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,aws_host,prod_frontends,pageable_servers
|
hostgroups all,aws_host,prod_frontends,pageable_servers
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -23,7 +23,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,aws_host,staging_frontends,not_pageable_servers
|
hostgroups all,aws_host,staging_frontends,not_pageable_servers
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -33,7 +33,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,non_aws_host,zmirror,flaky_servers
|
hostgroups all,non_aws_host,zmirror,flaky_servers
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -43,7 +43,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,non_aws_host,zmirrorp,flaky_servers
|
hostgroups all,non_aws_host,zmirrorp,flaky_servers
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -53,7 +53,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,aws_host,postgresql_primary,pageable_servers
|
hostgroups all,aws_host,postgresql_primary,pageable_servers
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -63,7 +63,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,aws_host,postgresql_replica,pageable_servers
|
hostgroups all,aws_host,postgresql_replica,pageable_servers
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -73,7 +73,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,aws_host,not_pageable_servers, redis
|
hostgroups all,aws_host,not_pageable_servers, redis
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -83,7 +83,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,aws_host,pageable_servers,smokescreen
|
hostgroups all,aws_host,pageable_servers,smokescreen
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@@ -93,7 +93,7 @@ define host{
|
|||||||
use generic-host
|
use generic-host
|
||||||
host_name <%= host %>
|
host_name <%= host %>
|
||||||
alias <%= host %>
|
alias <%= host %>
|
||||||
address <%= host %>.<%= @default_host_domain %>
|
address <%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
hostgroups all,aws_host,not_pageable_servers,other
|
hostgroups all,<% if host.include?(".") %>non_<% end %>aws_host,not_pageable_servers,other
|
||||||
}
|
}
|
||||||
<% end -%>
|
<% end -%>
|
||||||
|
|||||||
@@ -4,8 +4,8 @@
|
|||||||
i = 0
|
i = 0
|
||||||
@hosts.each do |host|
|
@hosts.each do |host|
|
||||||
-%>
|
-%>
|
||||||
[program:munin-tunnel-<%= host %>]
|
[program:autossh-tunnel-<%= host %>]
|
||||||
command=autossh -2 -N -M <%= 20000 + 2 * i %> -L <%= 5000 + i %>:localhost:4949 nagios@<%= host %>.<%= @default_host_domain %>
|
command=autossh -N -M <%= 20000 + 2 * i %> -L <%= 5000 + i %>:localhost:4949 -o ControlMaster=yes nagios@<%= host %><% unless host.include?(".") %>.<%= @default_host_domain %><% end %>
|
||||||
priority=200 ; the relative start priority (default 999)
|
priority=200 ; the relative start priority (default 999)
|
||||||
autostart=true ; start at supervisord start (default: true)
|
autostart=true ; start at supervisord start (default: true)
|
||||||
autorestart=true ; whether/when to restart (default: unexpected)
|
autorestart=true ; whether/when to restart (default: unexpected)
|
||||||
|
|||||||
@@ -976,13 +976,14 @@ export function delete_topic(stream_id, topic_name, failures = 0) {
|
|||||||
data: {
|
data: {
|
||||||
topic_name,
|
topic_name,
|
||||||
},
|
},
|
||||||
success() {},
|
success(data) {
|
||||||
error(xhr) {
|
if (data.result === "partially_completed") {
|
||||||
if (failures >= 9) {
|
if (failures >= 9) {
|
||||||
// Don't keep retrying indefinitely to avoid DoSing the server.
|
// Don't keep retrying indefinitely to avoid DoSing the server.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (xhr.status === 502) {
|
|
||||||
|
failures += 1;
|
||||||
/* When trying to delete a very large topic, it's
|
/* When trying to delete a very large topic, it's
|
||||||
possible for the request to the server to
|
possible for the request to the server to
|
||||||
time out after making some progress. Retry the
|
time out after making some progress. Retry the
|
||||||
@@ -991,7 +992,6 @@ export function delete_topic(stream_id, topic_name, failures = 0) {
|
|||||||
|
|
||||||
TODO: Show a nice loading indicator experience.
|
TODO: Show a nice loading indicator experience.
|
||||||
*/
|
*/
|
||||||
failures += 1;
|
|
||||||
delete_topic(stream_id, topic_name, failures);
|
delete_topic(stream_id, topic_name, failures);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ import * as settings_bots from "./settings_bots";
|
|||||||
import * as settings_config from "./settings_config";
|
import * as settings_config from "./settings_config";
|
||||||
import * as settings_data from "./settings_data";
|
import * as settings_data from "./settings_data";
|
||||||
import * as settings_users from "./settings_users";
|
import * as settings_users from "./settings_users";
|
||||||
|
import * as stream_data from "./stream_data";
|
||||||
import * as stream_popover from "./stream_popover";
|
import * as stream_popover from "./stream_popover";
|
||||||
import * as ui_report from "./ui_report";
|
import * as ui_report from "./ui_report";
|
||||||
import * as unread_ops from "./unread_ops";
|
import * as unread_ops from "./unread_ops";
|
||||||
@@ -559,18 +560,32 @@ export function toggle_actions_popover(element, id) {
|
|||||||
view_source_menu_item = $t({defaultMessage: "View message source"});
|
view_source_menu_item = $t({defaultMessage: "View message source"});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Theoretically, it could be useful to offer this even for a
|
// We do not offer "Mark as unread" on messages in streams
|
||||||
// message that is already unread, so you can mark those below
|
// that the user is not currently subscribed to. Zulip has an
|
||||||
// it as unread; but that's an unlikely situation, and showing
|
// invariant that all unread messages must be in streams the
|
||||||
// it can be a confusing source of clutter.
|
// user is subscribed to, and so the server will ignore any
|
||||||
|
// messages in such streams; it's better to hint this is not
|
||||||
|
// useful by not offering the option.
|
||||||
|
//
|
||||||
|
// We also require that the message is currently marked as
|
||||||
|
// read. Theoretically, it could be useful to offer this even
|
||||||
|
// for a message that is already unread, so you can mark those
|
||||||
|
// below it as unread; but that's an unlikely situation, and
|
||||||
|
// showing it can be a confusing source of clutter. We may
|
||||||
|
// want to revise this algorithm specifically in the context
|
||||||
|
// of interleaved views.
|
||||||
//
|
//
|
||||||
// To work around #22893, we also only offer the option if the
|
// To work around #22893, we also only offer the option if the
|
||||||
// fetch_status data structure means we'll be able to mark
|
// fetch_status data structure means we'll be able to mark
|
||||||
// everything below the current message as read correctly.
|
// everything below the current message as read correctly.
|
||||||
|
const not_stream_message = message.type !== "stream";
|
||||||
|
const subscribed_to_stream =
|
||||||
|
message.type === "stream" && stream_data.is_subscribed(message.stream_id);
|
||||||
const should_display_mark_as_unread =
|
const should_display_mark_as_unread =
|
||||||
message_lists.current.data.fetch_status.has_found_newest() &&
|
message_lists.current.data.fetch_status.has_found_newest() &&
|
||||||
!message.unread &&
|
!message.unread &&
|
||||||
not_spectator;
|
not_spectator &&
|
||||||
|
(not_stream_message || subscribed_to_stream);
|
||||||
|
|
||||||
const should_display_edit_history_option =
|
const should_display_edit_history_option =
|
||||||
message.edit_history &&
|
message.edit_history &&
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import {csrf_token} from "./csrf";
|
|||||||
import {DropdownListWidget} from "./dropdown_list_widget";
|
import {DropdownListWidget} from "./dropdown_list_widget";
|
||||||
import {$t, $t_html, get_language_name} from "./i18n";
|
import {$t, $t_html, get_language_name} from "./i18n";
|
||||||
import * as loading from "./loading";
|
import * as loading from "./loading";
|
||||||
import * as overlays from "./overlays";
|
|
||||||
import {page_params} from "./page_params";
|
import {page_params} from "./page_params";
|
||||||
import * as realm_icon from "./realm_icon";
|
import * as realm_icon from "./realm_icon";
|
||||||
import * as realm_logo from "./realm_logo";
|
import * as realm_logo from "./realm_logo";
|
||||||
@@ -590,7 +589,7 @@ function discard_property_element_changes(elem, for_realm_default_settings) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function sync_realm_settings(property) {
|
export function sync_realm_settings(property) {
|
||||||
if (!overlays.settings_open()) {
|
if (!meta.loaded) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -42,29 +42,6 @@ const unread_messages = new Set();
|
|||||||
// for how we can refresh it efficiently.
|
// for how we can refresh it efficiently.
|
||||||
export const unread_mention_topics = new Map();
|
export const unread_mention_topics = new Map();
|
||||||
|
|
||||||
function add_message_to_unread_mention_topics(message_id) {
|
|
||||||
const message = message_store.get(message_id);
|
|
||||||
if (message.type !== "stream") {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const topic_key = recent_topics_util.get_topic_key(message.stream_id, message.topic);
|
|
||||||
if (unread_mention_topics.has(topic_key)) {
|
|
||||||
unread_mention_topics.get(topic_key).add(message_id);
|
|
||||||
}
|
|
||||||
unread_mention_topics.set(topic_key, new Set([message_id]));
|
|
||||||
}
|
|
||||||
|
|
||||||
function remove_message_from_unread_mention_topics(message_id) {
|
|
||||||
const message = message_store.get(message_id);
|
|
||||||
if (message.type !== "stream") {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const topic_key = recent_topics_util.get_topic_key(message.stream_id, message.topic);
|
|
||||||
if (unread_mention_topics.has(topic_key)) {
|
|
||||||
unread_mention_topics.get(topic_key).delete(message_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class Bucketer {
|
class Bucketer {
|
||||||
// Maps item_id => bucket_key for items present in a bucket.
|
// Maps item_id => bucket_key for items present in a bucket.
|
||||||
reverse_lookup = new Map();
|
reverse_lookup = new Map();
|
||||||
@@ -468,6 +445,39 @@ class UnreadTopicCounter {
|
|||||||
}
|
}
|
||||||
const unread_topic_counter = new UnreadTopicCounter();
|
const unread_topic_counter = new UnreadTopicCounter();
|
||||||
|
|
||||||
|
function add_message_to_unread_mention_topics(message_id) {
|
||||||
|
const message = message_store.get(message_id);
|
||||||
|
if (message.type !== "stream") {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const topic_key = recent_topics_util.get_topic_key(message.stream_id, message.topic);
|
||||||
|
if (unread_mention_topics.has(topic_key)) {
|
||||||
|
unread_mention_topics.get(topic_key).add(message_id);
|
||||||
|
}
|
||||||
|
unread_mention_topics.set(topic_key, new Set([message_id]));
|
||||||
|
}
|
||||||
|
|
||||||
|
function remove_message_from_unread_mention_topics(message_id) {
|
||||||
|
const stream_id = unread_topic_counter.bucketer.reverse_lookup.get(message_id);
|
||||||
|
if (!stream_id) {
|
||||||
|
// Private messages and messages that were already not unread
|
||||||
|
// exit here.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const per_stream_bucketer = unread_topic_counter.bucketer.get_bucket(stream_id);
|
||||||
|
if (!per_stream_bucketer) {
|
||||||
|
blueslip.error(`Could not find per_stream_bucketer for ${message_id}.`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const topic = per_stream_bucketer.reverse_lookup.get(message_id);
|
||||||
|
const topic_key = recent_topics_util.get_topic_key(stream_id, topic);
|
||||||
|
if (unread_mention_topics.has(topic_key)) {
|
||||||
|
unread_mention_topics.get(topic_key).delete(message_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export function clear_and_populate_unread_mention_topics() {
|
export function clear_and_populate_unread_mention_topics() {
|
||||||
// The unread_mention_topics is an important data structure for
|
// The unread_mention_topics is an important data structure for
|
||||||
// efficiently querying whether a given stream/topic pair contains
|
// efficiently querying whether a given stream/topic pair contains
|
||||||
@@ -565,9 +575,7 @@ export function process_loaded_messages(messages, expect_no_new_unreads = false)
|
|||||||
// to a view and the message_fetch request returns
|
// to a view and the message_fetch request returns
|
||||||
// before server_events system delivers the message to
|
// before server_events system delivers the message to
|
||||||
// the client.
|
// the client.
|
||||||
//
|
blueslip.log(`New unread ${message.id} discovered in process_loaded_messages.`);
|
||||||
// For now, log it as a blueslip error so we can learn its prevalence.
|
|
||||||
blueslip.error("New unread discovered in process_loaded_messages.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const user_ids_string =
|
const user_ids_string =
|
||||||
@@ -648,10 +656,14 @@ export function mark_as_read(message_id) {
|
|||||||
// the following methods are cheap and work fine even if message_id
|
// the following methods are cheap and work fine even if message_id
|
||||||
// was never set to unread.
|
// was never set to unread.
|
||||||
unread_pm_counter.delete(message_id);
|
unread_pm_counter.delete(message_id);
|
||||||
|
|
||||||
|
// Important: This function uses `unread_topic_counter` to look up
|
||||||
|
// the stream/topic for this previously unread message, so much
|
||||||
|
// happen before the message is removed from that data structure.
|
||||||
|
remove_message_from_unread_mention_topics(message_id);
|
||||||
unread_topic_counter.delete(message_id);
|
unread_topic_counter.delete(message_id);
|
||||||
unread_mentions_counter.delete(message_id);
|
unread_mentions_counter.delete(message_id);
|
||||||
unread_messages.delete(message_id);
|
unread_messages.delete(message_id);
|
||||||
remove_message_from_unread_mention_topics(message_id);
|
|
||||||
|
|
||||||
const message = message_store.get(message_id);
|
const message = message_store.get(message_id);
|
||||||
if (message) {
|
if (message) {
|
||||||
|
|||||||
@@ -1,6 +1,11 @@
|
|||||||
// @flow strict
|
// @flow strict
|
||||||
|
|
||||||
export type Emoji = {emoji_name: string, emoji_code: string, ...};
|
export type Emoji = {
|
||||||
|
emoji_name: string,
|
||||||
|
emoji_code: string,
|
||||||
|
reaction_type: "unicode_emoji" | "realm_emoji" | "zulip_extra_emoji",
|
||||||
|
...
|
||||||
|
};
|
||||||
|
|
||||||
// declare export var popular_emojis
|
// declare export var popular_emojis
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@zulip/shared",
|
"name": "@zulip/shared",
|
||||||
"version": "0.0.15",
|
"version": "0.0.16",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"version": "tools/npm-version",
|
"version": "tools/npm-version",
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ Next steps:
|
|||||||
|
|
||||||
\$ ${bold}git log --stat -p upstream..${reset} # check your work!
|
\$ ${bold}git log --stat -p upstream..${reset} # check your work!
|
||||||
|
|
||||||
\$ ${bold}git push upstream main shared-${npm_package_version}${reset}
|
\$ ${bold}git push --atomic upstream main shared-${npm_package_version}${reset}
|
||||||
|
|
||||||
\$ ${bold}npm publish${reset} # should prompt for an OTP, from your 2FA setup
|
\$ ${bold}npm publish${reset} # should prompt for an OTP, from your 2FA setup
|
||||||
"
|
"
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
{{else if is_plan_standard}}
|
{{else if is_plan_standard}}
|
||||||
<a href="/plans" target="_blank" rel="noopener noreferrer" role="menuitem">Zulip Cloud Standard</a>
|
<a href="/plans" target="_blank" rel="noopener noreferrer" role="menuitem">Zulip Cloud Standard</a>
|
||||||
{{else if is_plan_standard_sponsored_for_free}}
|
{{else if is_plan_standard_sponsored_for_free}}
|
||||||
<a href="/plans" target="_blank" rel="noopener noreferrer" role="menuitem">Zulip Cloud Standard, sponsored for free</a>
|
<a href="/plans" target="_blank" rel="noopener noreferrer" role="menuitem">Zulip Cloud Standard (sponsored)</a>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
</li>
|
</li>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
|
|||||||
@@ -20,6 +20,13 @@ format used by the Zulip server that they are interacting with.
|
|||||||
|
|
||||||
## Changes in Zulip 6.0
|
## Changes in Zulip 6.0
|
||||||
|
|
||||||
|
**Feature level 154**
|
||||||
|
|
||||||
|
* [`POST /streams/{stream_id}/delete_topic`](/api/delete-topic):
|
||||||
|
When the process of deleting messages times out, a success response
|
||||||
|
with "partially_completed" result will now be returned by the server,
|
||||||
|
analogically to the `/mark_all_as_read` endpoint.
|
||||||
|
|
||||||
**Feature level 153**
|
**Feature level 153**
|
||||||
|
|
||||||
* [`POST /mark_all_as_read`](/api/mark-all-as-read): Messages are now
|
* [`POST /mark_all_as_read`](/api/mark-all-as-read): Messages are now
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ DESKTOP_WARNING_VERSION = "5.4.3"
|
|||||||
# Changes should be accompanied by documentation explaining what the
|
# Changes should be accompanied by documentation explaining what the
|
||||||
# new level means in templates/zerver/api/changelog.md, as well as
|
# new level means in templates/zerver/api/changelog.md, as well as
|
||||||
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
|
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
|
||||||
API_FEATURE_LEVEL = 153
|
API_FEATURE_LEVEL = 154
|
||||||
|
|
||||||
# Bump the minor PROVISION_VERSION to indicate that folks should provision
|
# Bump the minor PROVISION_VERSION to indicate that folks should provision
|
||||||
# only when going from an old version of the code to a newer version. Bump
|
# only when going from an old version of the code to a newer version. Bump
|
||||||
|
|||||||
@@ -717,3 +717,15 @@ def mock_queue_publish(
|
|||||||
|
|
||||||
with mock.patch(method_to_patch, side_effect=verify_serialize):
|
with mock.patch(method_to_patch, side_effect=verify_serialize):
|
||||||
yield inner
|
yield inner
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def timeout_mock(mock_path: str) -> Iterator[None]:
|
||||||
|
# timeout() doesn't work in test environment with database operations
|
||||||
|
# and they don't get committed - so we need to replace it with a mock
|
||||||
|
# that just calls the function.
|
||||||
|
def mock_timeout(seconds: int, func: Callable[[], object]) -> object:
|
||||||
|
return func()
|
||||||
|
|
||||||
|
with mock.patch(f"{mock_path}.timeout", new=mock_timeout):
|
||||||
|
yield
|
||||||
|
|||||||
@@ -14388,6 +14388,9 @@ paths:
|
|||||||
for very large topics. It now deletes messages in batches,
|
for very large topics. It now deletes messages in batches,
|
||||||
starting with the newest messages, so that progress will be
|
starting with the newest messages, so that progress will be
|
||||||
made even if the request times out.
|
made even if the request times out.
|
||||||
|
|
||||||
|
As of feature level 154, in case of timeout, a success response
|
||||||
|
with "partially_completed" result will now be returned.
|
||||||
parameters:
|
parameters:
|
||||||
- $ref: "#/components/parameters/StreamIdInPath"
|
- $ref: "#/components/parameters/StreamIdInPath"
|
||||||
- name: topic_name
|
- name: topic_name
|
||||||
@@ -14400,7 +14403,26 @@ paths:
|
|||||||
required: true
|
required: true
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
$ref: "#/components/responses/SimpleSuccess"
|
description: Success or partial success.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
oneOf:
|
||||||
|
- allOf:
|
||||||
|
- $ref: "#/components/schemas/JsonSuccess"
|
||||||
|
- $ref: "#/components/schemas/SuccessDescription"
|
||||||
|
- allOf:
|
||||||
|
- $ref: "#/components/schemas/PartiallyCompleted"
|
||||||
|
- example:
|
||||||
|
{
|
||||||
|
"code": "REQUEST_TIMEOUT",
|
||||||
|
"msg": "",
|
||||||
|
"result": "partially_completed",
|
||||||
|
}
|
||||||
|
description: |
|
||||||
|
If the request exceeds its processing time limit after having
|
||||||
|
successfully marked some messages as read, response code 200
|
||||||
|
with result "partially_completed" and code "REQUEST_TIMEOUT" will be returned like this:
|
||||||
"400":
|
"400":
|
||||||
description: Bad request.
|
description: Bad request.
|
||||||
content:
|
content:
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
from contextlib import contextmanager
|
from typing import TYPE_CHECKING, Any, List, Mapping, Set
|
||||||
from typing import TYPE_CHECKING, Any, Callable, Iterator, List, Mapping, Set
|
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
import orjson
|
import orjson
|
||||||
@@ -22,7 +21,7 @@ from zerver.lib.message import (
|
|||||||
get_raw_unread_data,
|
get_raw_unread_data,
|
||||||
)
|
)
|
||||||
from zerver.lib.test_classes import ZulipTestCase
|
from zerver.lib.test_classes import ZulipTestCase
|
||||||
from zerver.lib.test_helpers import get_subscription
|
from zerver.lib.test_helpers import get_subscription, timeout_mock
|
||||||
from zerver.lib.timeout import TimeoutExpired
|
from zerver.lib.timeout import TimeoutExpired
|
||||||
from zerver.lib.user_topics import add_topic_mute
|
from zerver.lib.user_topics import add_topic_mute
|
||||||
from zerver.models import (
|
from zerver.models import (
|
||||||
@@ -51,18 +50,6 @@ def check_flags(flags: List[str], expected: Set[str]) -> None:
|
|||||||
raise AssertionError(f"expected flags (ignoring has_alert_word) to be {expected}")
|
raise AssertionError(f"expected flags (ignoring has_alert_word) to be {expected}")
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def timeout_mock() -> Iterator[None]:
|
|
||||||
# timeout() doesn't work in test environment with database operations
|
|
||||||
# and they don't get committed - so we need to replace it with a mock
|
|
||||||
# that just calls the function.
|
|
||||||
def mock_timeout(seconds: int, func: Callable[[], object]) -> object:
|
|
||||||
return func()
|
|
||||||
|
|
||||||
with mock.patch("zerver.views.message_flags.timeout", new=mock_timeout):
|
|
||||||
yield
|
|
||||||
|
|
||||||
|
|
||||||
class FirstUnreadAnchorTests(ZulipTestCase):
|
class FirstUnreadAnchorTests(ZulipTestCase):
|
||||||
"""
|
"""
|
||||||
HISTORICAL NOTE:
|
HISTORICAL NOTE:
|
||||||
@@ -76,7 +63,7 @@ class FirstUnreadAnchorTests(ZulipTestCase):
|
|||||||
self.login("hamlet")
|
self.login("hamlet")
|
||||||
|
|
||||||
# Mark all existing messages as read
|
# Mark all existing messages as read
|
||||||
with timeout_mock():
|
with timeout_mock("zerver.views.message_flags"):
|
||||||
result = self.client_post("/json/mark_all_as_read")
|
result = self.client_post("/json/mark_all_as_read")
|
||||||
self.assert_json_success(result)
|
self.assert_json_success(result)
|
||||||
|
|
||||||
@@ -136,7 +123,7 @@ class FirstUnreadAnchorTests(ZulipTestCase):
|
|||||||
def test_visible_messages_use_first_unread_anchor(self) -> None:
|
def test_visible_messages_use_first_unread_anchor(self) -> None:
|
||||||
self.login("hamlet")
|
self.login("hamlet")
|
||||||
|
|
||||||
with timeout_mock():
|
with timeout_mock("zerver.views.message_flags"):
|
||||||
result = self.client_post("/json/mark_all_as_read")
|
result = self.client_post("/json/mark_all_as_read")
|
||||||
self.assert_json_success(result)
|
self.assert_json_success(result)
|
||||||
|
|
||||||
@@ -579,7 +566,7 @@ class PushNotificationMarkReadFlowsTest(ZulipTestCase):
|
|||||||
[third_message_id, fourth_message_id],
|
[third_message_id, fourth_message_id],
|
||||||
)
|
)
|
||||||
|
|
||||||
with timeout_mock():
|
with timeout_mock("zerver.views.message_flags"):
|
||||||
result = self.client_post("/json/mark_all_as_read", {})
|
result = self.client_post("/json/mark_all_as_read", {})
|
||||||
self.assertEqual(self.get_mobile_push_notification_ids(user_profile), [])
|
self.assertEqual(self.get_mobile_push_notification_ids(user_profile), [])
|
||||||
mock_push_notifications.assert_called()
|
mock_push_notifications.assert_called()
|
||||||
@@ -602,7 +589,7 @@ class MarkAllAsReadEndpointTest(ZulipTestCase):
|
|||||||
.count()
|
.count()
|
||||||
)
|
)
|
||||||
self.assertNotEqual(unread_count, 0)
|
self.assertNotEqual(unread_count, 0)
|
||||||
with timeout_mock():
|
with timeout_mock("zerver.views.message_flags"):
|
||||||
result = self.client_post("/json/mark_all_as_read", {})
|
result = self.client_post("/json/mark_all_as_read", {})
|
||||||
self.assert_json_success(result)
|
self.assert_json_success(result)
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,12 @@
|
|||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
import orjson
|
||||||
from django.utils.timezone import now as timezone_now
|
from django.utils.timezone import now as timezone_now
|
||||||
|
|
||||||
from zerver.actions.streams import do_change_stream_permission
|
from zerver.actions.streams import do_change_stream_permission
|
||||||
from zerver.lib.test_classes import ZulipTestCase
|
from zerver.lib.test_classes import ZulipTestCase
|
||||||
|
from zerver.lib.test_helpers import timeout_mock
|
||||||
|
from zerver.lib.timeout import TimeoutExpired
|
||||||
from zerver.models import Message, UserMessage, get_client, get_realm, get_stream
|
from zerver.models import Message, UserMessage, get_client, get_realm, get_stream
|
||||||
|
|
||||||
|
|
||||||
@@ -282,6 +287,7 @@ class TopicDeleteTest(ZulipTestCase):
|
|||||||
acting_user=user_profile,
|
acting_user=user_profile,
|
||||||
)
|
)
|
||||||
# Delete the topic should now remove all messages
|
# Delete the topic should now remove all messages
|
||||||
|
with timeout_mock("zerver.views.streams"):
|
||||||
result = self.client_post(
|
result = self.client_post(
|
||||||
endpoint,
|
endpoint,
|
||||||
{
|
{
|
||||||
@@ -293,6 +299,7 @@ class TopicDeleteTest(ZulipTestCase):
|
|||||||
self.assertTrue(Message.objects.filter(id=initial_last_msg_id).exists())
|
self.assertTrue(Message.objects.filter(id=initial_last_msg_id).exists())
|
||||||
|
|
||||||
# Delete again, to test the edge case of deleting an empty topic.
|
# Delete again, to test the edge case of deleting an empty topic.
|
||||||
|
with timeout_mock("zerver.views.streams"):
|
||||||
result = self.client_post(
|
result = self.client_post(
|
||||||
endpoint,
|
endpoint,
|
||||||
{
|
{
|
||||||
@@ -302,3 +309,29 @@ class TopicDeleteTest(ZulipTestCase):
|
|||||||
self.assert_json_success(result)
|
self.assert_json_success(result)
|
||||||
self.assertFalse(Message.objects.filter(id=last_msg_id).exists())
|
self.assertFalse(Message.objects.filter(id=last_msg_id).exists())
|
||||||
self.assertTrue(Message.objects.filter(id=initial_last_msg_id).exists())
|
self.assertTrue(Message.objects.filter(id=initial_last_msg_id).exists())
|
||||||
|
|
||||||
|
def test_topic_delete_timeout(self) -> None:
|
||||||
|
stream_name = "new_stream"
|
||||||
|
topic_name = "new topic 2"
|
||||||
|
|
||||||
|
user_profile = self.example_user("iago")
|
||||||
|
self.subscribe(user_profile, stream_name)
|
||||||
|
|
||||||
|
stream = get_stream(stream_name, user_profile.realm)
|
||||||
|
self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
|
||||||
|
|
||||||
|
self.login_user(user_profile)
|
||||||
|
endpoint = "/json/streams/" + str(stream.id) + "/delete_topic"
|
||||||
|
with mock.patch("zerver.views.streams.timeout", side_effect=TimeoutExpired):
|
||||||
|
result = self.client_post(
|
||||||
|
endpoint,
|
||||||
|
{
|
||||||
|
"topic_name": topic_name,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.assertEqual(result.status_code, 200)
|
||||||
|
|
||||||
|
result_dict = orjson.loads(result.content)
|
||||||
|
self.assertEqual(
|
||||||
|
result_dict, {"result": "partially_completed", "msg": "", "code": "REQUEST_TIMEOUT"}
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Set, Union
|
from typing import Any, Callable, Dict, List, Literal, Mapping, Optional, Sequence, Set, Union
|
||||||
|
|
||||||
import orjson
|
import orjson
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
@@ -54,7 +54,7 @@ from zerver.lib.exceptions import (
|
|||||||
)
|
)
|
||||||
from zerver.lib.mention import MentionBackend, silent_mention_syntax_for_user
|
from zerver.lib.mention import MentionBackend, silent_mention_syntax_for_user
|
||||||
from zerver.lib.request import REQ, has_request_variables
|
from zerver.lib.request import REQ, has_request_variables
|
||||||
from zerver.lib.response import json_success
|
from zerver.lib.response import json_partial_success, json_success
|
||||||
from zerver.lib.retention import STREAM_MESSAGE_BATCH_SIZE as RETENTION_STREAM_MESSAGE_BATCH_SIZE
|
from zerver.lib.retention import STREAM_MESSAGE_BATCH_SIZE as RETENTION_STREAM_MESSAGE_BATCH_SIZE
|
||||||
from zerver.lib.retention import parse_message_retention_days
|
from zerver.lib.retention import parse_message_retention_days
|
||||||
from zerver.lib.streams import (
|
from zerver.lib.streams import (
|
||||||
@@ -72,6 +72,7 @@ from zerver.lib.streams import (
|
|||||||
)
|
)
|
||||||
from zerver.lib.string_validation import check_stream_name
|
from zerver.lib.string_validation import check_stream_name
|
||||||
from zerver.lib.subscription_info import gather_subscriptions
|
from zerver.lib.subscription_info import gather_subscriptions
|
||||||
|
from zerver.lib.timeout import TimeoutExpired, timeout
|
||||||
from zerver.lib.topic import (
|
from zerver.lib.topic import (
|
||||||
get_topic_history_for_public_stream,
|
get_topic_history_for_public_stream,
|
||||||
get_topic_history_for_stream,
|
get_topic_history_for_stream,
|
||||||
@@ -867,6 +868,7 @@ def delete_in_topic(
|
|||||||
).values_list("message_id", flat=True)
|
).values_list("message_id", flat=True)
|
||||||
messages = messages.filter(id__in=deletable_message_ids)
|
messages = messages.filter(id__in=deletable_message_ids)
|
||||||
|
|
||||||
|
def delete_in_batches() -> Literal[True]:
|
||||||
# Topics can be large enough that this request will inevitably time out.
|
# Topics can be large enough that this request will inevitably time out.
|
||||||
# In such a case, it's good for some progress to be accomplished, so that
|
# In such a case, it's good for some progress to be accomplished, so that
|
||||||
# full deletion can be achieved by repeating the request. For that purpose,
|
# full deletion can be achieved by repeating the request. For that purpose,
|
||||||
@@ -882,6 +884,14 @@ def delete_in_topic(
|
|||||||
break
|
break
|
||||||
do_delete_messages(user_profile.realm, messages_to_delete)
|
do_delete_messages(user_profile.realm, messages_to_delete)
|
||||||
|
|
||||||
|
# timeout() in which we call this function requires non-None return value.
|
||||||
|
return True
|
||||||
|
|
||||||
|
try:
|
||||||
|
timeout(50, delete_in_batches)
|
||||||
|
except TimeoutExpired:
|
||||||
|
return json_partial_success(request, data={"code": ErrorCode.REQUEST_TIMEOUT.name})
|
||||||
|
|
||||||
return json_success(request)
|
return json_success(request)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user