mirror of
https://github.com/zulip/zulip.git
synced 2025-10-24 00:23:49 +00:00
Compare commits
47 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bec3c0943a | ||
|
|
7352f31c4b | ||
|
|
dafe69761e | ||
|
|
956fd7c420 | ||
|
|
f819c1e901 | ||
|
|
3b00029c52 | ||
|
|
1482a386c2 | ||
|
|
92aebe595b | ||
|
|
5ad84fd997 | ||
|
|
40ec59b93e | ||
|
|
5bf66e04fc | ||
|
|
3efdb7ebf3 | ||
|
|
80fa5006f8 | ||
|
|
bda9d78092 | ||
|
|
6bb9b129f7 | ||
|
|
d93d4c7216 | ||
|
|
852ac66f8e | ||
|
|
e20bc9f9b3 | ||
|
|
1f2f497cab | ||
|
|
578f769f60 | ||
|
|
54fd321941 | ||
|
|
b6c1f1d162 | ||
|
|
d2f5937d89 | ||
|
|
ed742fa847 | ||
|
|
a625ca49ec | ||
|
|
96bd1c38dc | ||
|
|
9748780192 | ||
|
|
bc3f096918 | ||
|
|
af4aac6836 | ||
|
|
e5f7000a23 | ||
|
|
00bf7b25b5 | ||
|
|
2c6bfe136a | ||
|
|
db51a1c547 | ||
|
|
3f76745235 | ||
|
|
b59b5cac35 | ||
|
|
5dd330e769 | ||
|
|
140e598a89 | ||
|
|
8159c03205 | ||
|
|
0d12dfd06f | ||
|
|
6888826d5b | ||
|
|
f0add4638c | ||
|
|
974a9bd0f3 | ||
|
|
aeb6a5df7c | ||
|
|
94c35d8fb0 | ||
|
|
1d40b2291c | ||
|
|
d6a41b4fe3 | ||
|
|
5bf6f05f60 |
15
.travis.yml
Normal file
15
.travis.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
install:
|
||||
- pip install pbs
|
||||
- python provision.py --travis
|
||||
cache: apt
|
||||
language: python
|
||||
python:
|
||||
- "2.7"
|
||||
# command to run tests
|
||||
script:
|
||||
- source /srv/zulip-venv/bin/activate && env PATH=$PATH:/srv/zulip-venv/bin ./tools/test-all
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
addons:
|
||||
postgresql: "9.3"
|
||||
293
README.md
293
README.md
@@ -1,114 +1,16 @@
|
||||
Installing the Zulip Development environment
|
||||
============================================
|
||||
Zulip
|
||||
=====
|
||||
|
||||
Using Vagrant
|
||||
-------------
|
||||
Zulip is a powerful, open source group chat application. Written in
|
||||
Python and using the Django framework, Zulip supports both private
|
||||
messaging and group chats via conversation streams.
|
||||
|
||||
This is the recommended approach, and is tested on OS X 10.10 as well as Ubuntu 14.04.
|
||||
Zulip also supports fast search, drag-and-drop file uploads, image
|
||||
previews, group private messages, audible notifications,
|
||||
missed-message emails, desktop apps, and much more.
|
||||
|
||||
* If your host is OS X, download VirtualBox from
|
||||
<http://download.virtualbox.org/virtualbox/4.3.30/VirtualBox-4.3.30-101610-OSX.dmg>
|
||||
and install it.
|
||||
* If your host is Ubuntu 14.04:
|
||||
sudo apt-get install vagrant lxc lxc-templates cgroup-lite redir && vagrant plugin install vagrant-lxc
|
||||
|
||||
Once that's done, simply change to your zulip directory and run
|
||||
`vagrant up` in your terminal. That will install the development
|
||||
server inside a Vagrant guest.
|
||||
|
||||
Once that finishes, you can run the development server as follows:
|
||||
vagrant ssh -- -L9991:localhost:9991
|
||||
# Now inside the container
|
||||
cd /srv/zulip
|
||||
source /srv/zulip-venv/bin/activate
|
||||
./tools/run-dev.py --interface=''
|
||||
|
||||
You can now visit <http://localhost:9991/> in your browser. To get
|
||||
shell access to the virtual machine running the server, use `vagrant ssh`.
|
||||
|
||||
(A small note on tools/run-dev.py: the --interface='' option will make
|
||||
the development server listen on all network interfaces. While this
|
||||
is correct for the Vagrant guest sitting behind a NAT, you probably
|
||||
don't want to use that option when using run-dev.py in other environments).
|
||||
|
||||
The run-dev.py console output will show any errors your Zulip
|
||||
development server encounters. It runs on top of Django's "manage.py
|
||||
runserver" tool, which will automatically restart the Zulip server
|
||||
whenever you save changes to Python code.
|
||||
|
||||
|
||||
By hand
|
||||
-------
|
||||
|
||||
Install the following non-Python dependencies:
|
||||
* libffi-dev — needed for some Python extensions
|
||||
* postgresql 9.1 or later — our database (also install development headers)
|
||||
* memcached (and headers)
|
||||
* rabbitmq-server
|
||||
* libldap2-dev
|
||||
* python-dev
|
||||
* redis-server — rate limiting
|
||||
* tsearch-extras — better text search
|
||||
|
||||
On Debian or Ubuntu systems:
|
||||
sudo apt-get install libffi-dev memcached rabbitmq-server libldap2-dev redis-server postgresql-server-dev-all libmemcached-dev
|
||||
|
||||
# If on 12.04 or wheezy:
|
||||
sudo apt-get install postgresql-9.1
|
||||
wget https://dl.dropboxusercontent.com/u/283158365/zuliposs/postgresql-9.1-tsearch-extras_0.1.2_amd64.deb
|
||||
sudo dpkg -i postgresql-9.1-tsearch-extras_0.1.2_amd64.deb
|
||||
|
||||
# If on 14.04:
|
||||
sudo apt-get install postgresql-9.3
|
||||
wget https://dl.dropboxusercontent.com/u/283158365/zuliposs/postgresql-9.3-tsearch-extras_0.1.2_amd64.deb
|
||||
sudo dpkg -i postgresql-9.3-tsearch-extras_0.1.2_amd64.deb
|
||||
|
||||
# If on 15.04 or jessie:
|
||||
sudo apt-get install postgresql-9.4
|
||||
wget https://dl.dropboxusercontent.com/u/283158365/zuliposs/postgresql-9.4-tsearch-extras_0.1_amd64.deb
|
||||
sudo dpkg -i postgresql-9.4-tsearch-extras_0.1_amd64.deb
|
||||
|
||||
# Then, all versions:
|
||||
pip install -r requirements.txt
|
||||
./scripts/setup/configure-rabbitmq
|
||||
./tools/postgres-init-db
|
||||
./tools/do-destroy-rebuild-database
|
||||
./tools/emoji_dump/build_emoji
|
||||
|
||||
To start the development server:
|
||||
./tools/run-dev.py
|
||||
|
||||
… and hit http://localhost:9991/.
|
||||
|
||||
|
||||
Running the test suite
|
||||
======================
|
||||
|
||||
One-time setup of test databases:
|
||||
./tools/postgres-init-test-db
|
||||
./tools/do-destroy-rebuild-test-database
|
||||
|
||||
Run all tests:
|
||||
./tools/test-all
|
||||
|
||||
This runs the linter plus all of our test suites; they can all be run
|
||||
separately (just read `tools/test-all` to see them). You can also run
|
||||
individual tests, e.g.:
|
||||
./tools/test-backend zerver.test_bugdown.BugdownTest.test_inline_youtube
|
||||
./tools/test-js-with-casper 10-navigation.js
|
||||
|
||||
Possible issues
|
||||
===============
|
||||
|
||||
The Casper tests are flaky on the Virtualbox environment (probably due
|
||||
to some performance-sensitive races). Until this issue is debugged,
|
||||
you may need to rerun them to get them to pass.
|
||||
|
||||
When running the test suite, if you get an error like this:
|
||||
sqlalchemy.exc.ProgrammingError: (ProgrammingError) function ts_match_locs_array(unknown, text, tsquery) does not exist
|
||||
LINE 2: ...ECT message_id, flags, subject, rendered_content, ts_match_l...
|
||||
^
|
||||
… then you need to install tsearch-extras, described above. Afterwards, re-run the init*-db and the do-destroy-rebuild*-database scripts.
|
||||
Further information on the Zulip project and its features can be found
|
||||
at https://www.zulip.org
|
||||
|
||||
Contributing to Zulip
|
||||
=====================
|
||||
@@ -119,7 +21,8 @@ Before a pull request can be merged, you need to to sign the [Dropbox
|
||||
Contributor License Agreement](https://opensource.dropbox.com/cla/).
|
||||
|
||||
Please run the tests (tools/test-all) before submitting your pull
|
||||
request.
|
||||
request and read our [commit message style
|
||||
guidelines](http://zulip.readthedocs.org/en/latest/code-style.html#commit-messages).
|
||||
|
||||
Zulip has a growing collection of developer documentation including
|
||||
detailed documentation on coding style available on [Read The
|
||||
@@ -141,6 +44,178 @@ Running Zulip in production
|
||||
|
||||
This is documented in https://zulip.org/server.html and README.prod.md.
|
||||
|
||||
Installing the Zulip Development environment
|
||||
============================================
|
||||
|
||||
You will need a machine with at least 2GB of RAM available (see
|
||||
https://github.com/zulip/zulip/issues/32 for a plan for how to
|
||||
dramatically reduce this requirement).
|
||||
|
||||
Using Vagrant
|
||||
-------------
|
||||
|
||||
This is the recommended approach, and is tested on OS X 10.10 as well as Ubuntu 14.04.
|
||||
|
||||
* The best performing way to run the Zulip development environment is
|
||||
using an LXC container. If your host is Ubuntu 14.04 (or newer;
|
||||
what matters is having support for LXC containers), you'll want to
|
||||
install and configure the LXC Vagrant provider like this:
|
||||
`sudo apt-get install vagrant lxc lxc-templates cgroup-lite redir && vagrant plugin install vagrant-lxc`
|
||||
|
||||
* If your host is OS X, download VirtualBox from
|
||||
<http://download.virtualbox.org/virtualbox/4.3.30/VirtualBox-4.3.30-101610-OSX.dmg>
|
||||
and install it.
|
||||
|
||||
Once that's done, simply change to your zulip directory and run
|
||||
`vagrant up` in your terminal. That will install the development
|
||||
server inside a Vagrant guest.
|
||||
|
||||
Once that finishes, you can run the development server as follows:
|
||||
|
||||
```
|
||||
vagrant ssh -- -L9991:localhost:9991
|
||||
# Now inside the container
|
||||
cd /srv/zulip
|
||||
source /srv/zulip-venv/bin/activate
|
||||
./tools/run-dev.py --interface=''
|
||||
```
|
||||
|
||||
You can now visit <http://localhost:9991/> in your browser. To get
|
||||
shell access to the virtual machine running the server, use `vagrant ssh`.
|
||||
|
||||
(A small note on tools/run-dev.py: the `--interface=''` option will make
|
||||
the development server listen on all network interfaces. While this
|
||||
is correct for the Vagrant guest sitting behind a NAT, you probably
|
||||
don't want to use that option when using run-dev.py in other environments).
|
||||
|
||||
The run-dev.py console output will show any errors your Zulip
|
||||
development server encounters. It runs on top of Django's "manage.py
|
||||
runserver" tool, which will automatically restart the Zulip server
|
||||
whenever you save changes to Python code.
|
||||
|
||||
Using provision.py without Vagrant
|
||||
----------------------------------
|
||||
|
||||
If you'd like to install a Zulip development environment on a server
|
||||
that's already running Ubuntu 14.04 Trusty, you can do that by just
|
||||
running:
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y python-pbs
|
||||
python /srv/zulip/provision.py
|
||||
|
||||
cd /srv/zulip
|
||||
source /srv/zulip-venv/bin/activate
|
||||
./tools/run-dev.py
|
||||
```
|
||||
|
||||
By hand
|
||||
-------
|
||||
If you really want to install everything by hand, the below
|
||||
instructions should work.
|
||||
|
||||
Install the following non-Python dependencies:
|
||||
* libffi-dev — needed for some Python extensions
|
||||
* postgresql 9.1 or later — our database (also install development headers)
|
||||
* memcached (and headers)
|
||||
* rabbitmq-server
|
||||
* libldap2-dev
|
||||
* python-dev
|
||||
* redis-server — rate limiting
|
||||
* tsearch-extras — better text search
|
||||
|
||||
On Debian or Ubuntu systems:
|
||||
|
||||
```
|
||||
sudo apt-get install libffi-dev memcached rabbitmq-server libldap2-dev python-dev redis-server postgresql-server-dev-all libmemcached-dev
|
||||
|
||||
# If on 12.04 or wheezy:
|
||||
sudo apt-get install postgresql-9.1
|
||||
wget https://dl.dropboxusercontent.com/u/283158365/zuliposs/postgresql-9.1-tsearch-extras_0.1.2_amd64.deb
|
||||
sudo dpkg -i postgresql-9.1-tsearch-extras_0.1.2_amd64.deb
|
||||
|
||||
# If on 14.04:
|
||||
sudo apt-get install postgresql-9.3
|
||||
wget https://dl.dropboxusercontent.com/u/283158365/zuliposs/postgresql-9.3-tsearch-extras_0.1.2_amd64.deb
|
||||
sudo dpkg -i postgresql-9.3-tsearch-extras_0.1.2_amd64.deb
|
||||
|
||||
# If on 15.04 or jessie:
|
||||
sudo apt-get install postgresql-9.4
|
||||
wget https://dl.dropboxusercontent.com/u/283158365/zuliposs/postgresql-9.4-tsearch-extras_0.1_amd64.deb
|
||||
sudo dpkg -i postgresql-9.4-tsearch-extras_0.1_amd64.deb
|
||||
|
||||
# Then, all versions:
|
||||
pip install -r requirements.txt
|
||||
tools/download-zxcvbn
|
||||
./tools/emoji_dump/build_emoji
|
||||
generate_secrets.py -d
|
||||
./scripts/setup/configure-rabbitmq
|
||||
./tools/postgres-init-db
|
||||
./tools/do-destroy-rebuild-database
|
||||
./tools/postgres-init-test-db
|
||||
./tools/do_destroy_rebuild_test_database
|
||||
```
|
||||
|
||||
To start the development server:
|
||||
|
||||
```
|
||||
./tools/run-dev.py
|
||||
```
|
||||
|
||||
… and visit http://localhost:9991/.
|
||||
|
||||
|
||||
Running the test suite
|
||||
======================
|
||||
|
||||
Run all tests:
|
||||
|
||||
```
|
||||
./tools/test-all
|
||||
```
|
||||
|
||||
This runs the linter plus all of our test suites; they can all be run
|
||||
separately (just read `tools/test-all` to see them). You can also run
|
||||
individual tests, e.g.:
|
||||
|
||||
```
|
||||
./tools/test-backend zerver.test_bugdown.BugdownTest.test_inline_youtube
|
||||
./tools/test-js-with-casper 10-navigation.js
|
||||
```
|
||||
|
||||
The above instructions include the first-time setup of test databases,
|
||||
but you may need to rebuild the test database occasionally if you're
|
||||
working on new database migrations. To do this, run:
|
||||
|
||||
```
|
||||
./tools/postgres-init-test-db
|
||||
./tools/do-destroy-rebuild-test-database
|
||||
```
|
||||
|
||||
Possible testing issues
|
||||
=======================
|
||||
|
||||
- The Casper tests are flaky on the Virtualbox environment (probably
|
||||
due to some performance-sensitive races; they work reliably in
|
||||
Travis CI). Until this issue is debugged, you may need to rerun
|
||||
them to get them to pass.
|
||||
|
||||
- When running the test suite, if you get an error like this:
|
||||
|
||||
```
|
||||
sqlalchemy.exc.ProgrammingError: (ProgrammingError) function ts_match_locs_array(unknown, text, tsquery) does not exist
|
||||
LINE 2: ...ECT message_id, flags, subject, rendered_content, ts_match_l...
|
||||
^
|
||||
```
|
||||
|
||||
… then you need to install tsearch-extras, described
|
||||
above. Afterwards, re-run the `init*-db` and the
|
||||
`do-destroy-rebuild*-database` scripts.
|
||||
|
||||
- When building the development environment using Vagrant and the LXC provider, if you encounter permissions errors, you may need to `chown -R 1000:$(whoami) /path/to/zulip` on the host before running `vagrant up` in order to ensure that the synced directory has the correct owner during provision. This issue will arise if you run `id username` on the host where `username` is the user running Vagrant and the output is anything but 1000.
|
||||
This seems to be caused by Vagrant behavior; more information can be found here https://github.com/fgrehm/vagrant-lxc/wiki/FAQ#help-my-shared-folders-have-the-wrong-owner
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
|
||||
267
README.prod.md
267
README.prod.md
@@ -1,22 +1,38 @@
|
||||
Zulip in production
|
||||
===================
|
||||
|
||||
This documents the process for installing Zulip in a production environment.
|
||||
|
||||
Note that if you just want to play around with Zulip and see what it
|
||||
looks like, it is easier to install it in a development environment
|
||||
following the instructions in README.dev, since then you don't need to
|
||||
worry about setting up SSL certificates and an authentication mechanism.
|
||||
|
||||
Recommended requirements:
|
||||
|
||||
* Server running Ubuntu Precise or Debian Wheezy
|
||||
* At least 2 CPUs for production use
|
||||
* At least 4GB of RAM for production use
|
||||
* At least 100GB of free disk for production use
|
||||
* HTTP(S) access to the public Internet (for some features;
|
||||
discuss with Zulip Support if this is an issue for you)
|
||||
* At least 2 CPUs for production use with 100+ users
|
||||
* At least 4GB of RAM for production use with 100+ users. We strongly
|
||||
recommend against installing with less than 2GB of RAM, as you will
|
||||
likely experience OOM issues. In the future we expect Zulip's RAM
|
||||
requirements to decrease to support smaller installations (see
|
||||
https://github.com/zulip/zulip/issues/32).
|
||||
* At least 10GB of free disk for production use (more may be required
|
||||
if you intend to store uploaded files locally rather than in S3
|
||||
and your team uses that feature extensively)
|
||||
* Outgoing HTTP(S) access to the public Internet.
|
||||
* SSL Certificate for the host you're putting this on
|
||||
(e.g. https://zulip.example.com)
|
||||
* Email credentials for the service to send outgoing emails to users
|
||||
(e.g. missed message notifications, password reminders if you're not
|
||||
using SSO, etc.).
|
||||
(e.g. zulip.example.com). If you just want to see what
|
||||
Zulip looks like, we recommend installing the development
|
||||
environment detailed in README.md as that is easier to setup.
|
||||
* Email credentials Zulip can use to send outgoing emails to users
|
||||
(e.g. email address confirmation emails during the signup process,
|
||||
missed message notifications, password reminders if you're not using
|
||||
SSO, etc.).
|
||||
|
||||
=======================================================================
|
||||
|
||||
How to install Zulip in production:
|
||||
Installing Zulip in production
|
||||
==============================
|
||||
|
||||
These instructions should be followed as root.
|
||||
|
||||
@@ -51,6 +67,226 @@ announcements about new releases, security issues, etc.
|
||||
|
||||
=======================================================================
|
||||
|
||||
Authentication and logging into Zulip the first time
|
||||
====================================================
|
||||
|
||||
(As you read and follow the instructions in this section, if you run
|
||||
into trouble, check out the troubleshooting advice in the next major
|
||||
section.)
|
||||
|
||||
Once you've finished installing Zulip, configuring your settings.py
|
||||
file, and initializing the database, it's time to login to your new
|
||||
installation. By default, initialize-database creates 1 realm that
|
||||
you can join, the ADMIN_DOMAIN realm (defined in
|
||||
/etc/zulip/settings.py).
|
||||
|
||||
The ADMIN_DOMAIN realm is by default configured with the following settings:
|
||||
* restricted_to_domain=True: Only people with emails ending with @ADMIN_DOMAIN can join.
|
||||
* invite_required=False: An invitation is not required to join the realm.
|
||||
* invite_by_admin_only=False: You don't need to be an admin user to invite other users.
|
||||
* mandatory_topics=False: Users are not required to specify a topic when sending messages.
|
||||
|
||||
If you would like to change these settings, you can do so using the
|
||||
following process as the zulip user:
|
||||
|
||||
```
|
||||
cd /home/zulip/deployments/current
|
||||
./manage.py shell
|
||||
from zerver.models import *
|
||||
r = get_realm(settings.ADMIN_DOMAIN)
|
||||
r.restricted_to_domain=False # Now anyone anywhere can login
|
||||
r.save() # save to the database
|
||||
```
|
||||
|
||||
If you realize you set ADMIN_DOMAIN wrong, in addition to fixing the
|
||||
value in settings.py, you will also want to do a similar manage.py
|
||||
process to set `r.domain = newexample.com`.
|
||||
|
||||
Depending what authentication backend you're planning to use, you will
|
||||
need to do some additional setup documented in the settings.py template:
|
||||
|
||||
* For Google authentication, you need to follow the configuration
|
||||
instructions around GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_CLIENT_ID.
|
||||
* For Email authentication, you will need to follow the configuration
|
||||
instructions around outgoing SMTP from Django.
|
||||
|
||||
You should be able to login now. If you get an error, check
|
||||
/var/log/zulip/errors.log for a traceback, and consult the next
|
||||
section for advice on how to debug. If you aren't able to figure it
|
||||
out, email zulip-devel@googlegroups.com with the traceback and we'll
|
||||
try to help you out!
|
||||
|
||||
You will likely want to make your own user account an admin user,
|
||||
which you can do via the following management command:
|
||||
|
||||
```
|
||||
./manage.py knight username@example.com -f
|
||||
```
|
||||
|
||||
Now that you are an administrator, you will have a special
|
||||
"Administration" tab linked to from the upper-right gear menu in the
|
||||
Zulip app that lets you deactivate other users, manage streams, change
|
||||
the Realm settings you may have edited using manage.py shell above,
|
||||
etc.
|
||||
|
||||
You can also use `manage.py knight` with the
|
||||
`--permission=api_super_user` argument to create API super users,
|
||||
which are needed to mirror messages to streams from other users for
|
||||
the IRC and Jabber mirroring integrations (see
|
||||
`bots/irc-mirror.py` and `bots/jabber_mirror.py` for some detail on these).
|
||||
|
||||
There are a large number of useful management commands under
|
||||
zerver/manangement/commands/; you can also see them listed using
|
||||
`./manage.py` with no arguments.
|
||||
|
||||
One such command worth highlighting because it's a valuable feature
|
||||
with no UI in the Administration page is `./manage.py realm_filters`,
|
||||
which allows you to configure certain pattens in messages to be
|
||||
automatically linkified, e.g. whenever someone mentions "T1234" it
|
||||
could be auto-linkified to ticket 1234 in your team's Trac instance.
|
||||
|
||||
Checking Zulip is healthy and debugging the services it depends on
|
||||
==================================================================
|
||||
|
||||
You can check if the zulip application is running using:
|
||||
|
||||
supervisorctl status
|
||||
|
||||
And checking for errors in the Zulip errors logs under
|
||||
/var/log/zulip/. That contains one log file for each service, plus
|
||||
errors.log (has all errors), server.log (logs from the Django and
|
||||
Tornado servers), and workers.log (combined logs from the queue
|
||||
workers).
|
||||
|
||||
After you change configuration in /etc/zulip/settings.py or fix a
|
||||
misconfigurtion, you will often want to restart the Zulip application.
|
||||
You can restart Zulip using:
|
||||
|
||||
supervisorctl restart all
|
||||
|
||||
Similarly, you can stop Zulip using:
|
||||
|
||||
supervisorctl stop all
|
||||
|
||||
The Zulip application uses several major services to store and cache
|
||||
data, queue messages, and otherwise support the Zulip application:
|
||||
|
||||
* postgresql
|
||||
* rabbitmq-server
|
||||
* nginx
|
||||
* redis
|
||||
* memcached
|
||||
|
||||
If one of these services is not installed or functioning correctly,
|
||||
Zulip will not work. Below we detail some common configuration
|
||||
problems and how to resolve them:
|
||||
|
||||
* An AMQPConnectionError traceback or error running rabbitmqctl
|
||||
usually means that RabbitMQ is not running; to fix this, try:
|
||||
|
||||
service rabbitmq-server restart
|
||||
|
||||
If RabbitMQ fails to start, the problem is often that you are using
|
||||
a virtual machine with broken DNS configuration; you can often
|
||||
correct this by configuring /etc/hosts properly.
|
||||
|
||||
* If your browser reports no webserver is running, that is likely
|
||||
because nginx is not configured properly and thus failed to start.
|
||||
nginx will fail to start if you configured SSL incorrectly or did
|
||||
not provide SSL certificates. To fix this, configure them properly
|
||||
and then run:
|
||||
|
||||
service nginx restart
|
||||
|
||||
If you run into additional problems, please report them so that we can
|
||||
update these lists!
|
||||
|
||||
=======================================================================
|
||||
|
||||
Making your Zulip instance awesome
|
||||
==================================
|
||||
|
||||
Once you've got Zulip setup, you'll likely want to configure it the
|
||||
way you like. There are four big things to focus on:
|
||||
|
||||
(1) Integrations. We recommend setting up integrations for the major
|
||||
tools that your team works with. For example, if you're a software
|
||||
development team, you may want to start with integrations for your
|
||||
version control, issue tracker, CI system, and monitoring tools.
|
||||
|
||||
Spend time configuring these integrations to be how you like them --
|
||||
if an integration is spammy, you may want to change it to not send
|
||||
messages that nobody cares about (E.g. for the zulip.com trac
|
||||
integration, some teams find they only want notifications when new
|
||||
tickets are opened, commented on, or closed, and not every time
|
||||
someone edits the metadata).
|
||||
|
||||
If Zulip doesn't have an integration you want, you can add your own!
|
||||
Most integrations are very easy to write, and even more complex
|
||||
integrations usually take less than a day's work to build. We very
|
||||
much appreciate contributions of new integrations; there is a brief
|
||||
draft integration writing guide here:
|
||||
https://github.com/zulip/zulip/issues/70
|
||||
|
||||
It can often be valuable to integrate your own internal processes to
|
||||
send notifications into Zulip; e.g. notifications of new customer
|
||||
signups, new error reports, or daily reports on the team's key
|
||||
metrics; this can often spawn discussions in response to the data.
|
||||
|
||||
(2) Streams and Topics. If it feels like a stream has too much
|
||||
traffic about a topic only of interest to some of the subscribers,
|
||||
consider adding or renaming streams until you feel like your team is
|
||||
working productively.
|
||||
|
||||
Second, most users are not used to topics. It can require a bit of
|
||||
time for everyone to get used to topics and start benefitting from
|
||||
them, but usually once a team is using them well, everyone ends up
|
||||
enthusiastic about how much topics make life easier. Some tips on
|
||||
using topics:
|
||||
|
||||
* When replying to an existing conversation thread, just click on the
|
||||
message, or navigate to it with the arrow keys and hit "r" or
|
||||
"enter" to reply on the same topic
|
||||
* When you start a new conversation topic, even if it's related to the
|
||||
previous conversation, type a new topic in the compose box
|
||||
* You can edit topics to fix a thread that's already been started,
|
||||
which can be helpful when onboarding new batches of users to the platform.
|
||||
|
||||
(3) Notification settings. Zulip gives you a great deal of control
|
||||
over which messages trigger desktop notifications; you can configure
|
||||
these extensively in the /#settings page (get there from the gear
|
||||
menu). If you find the desktop notifications annoying, consider
|
||||
changing the settings to only trigger desktop notifications when you
|
||||
receive a PM or are @-mentioned.
|
||||
|
||||
(4) The mobile and desktop apps. Currently, the Zulip Desktop app
|
||||
only supports talking to servers with a properly signed SSL
|
||||
certificate, so you may find that you get a blank screen when you
|
||||
connect to a Zulip server using a self-signed certificate.
|
||||
|
||||
The Zulip iOS and Android apps in their respective stores don't yet
|
||||
support talking to non-zulip.com servers; the iOS app is waiting on
|
||||
Apple's app store review, while the Android app is waiting on someone
|
||||
to do the small project of adding a field to specify what Zulip server
|
||||
to talk to.
|
||||
|
||||
These issues will likely all be addressed in the coming weeks; make
|
||||
sure to join the zulip-announce@googlegroups.com list so that you can
|
||||
receive the announcements when these become available.
|
||||
|
||||
(5) All the other features: Hotkeys, emoji, search filters,
|
||||
@-mentions, etc. Zulip has lots of great features, make sure your
|
||||
team knows they exist and how to use them effectively.
|
||||
|
||||
(6) Enjoy your Zulip installation! If you discover things that you
|
||||
wish had been documented, please contribute documentation suggestions
|
||||
either via a GitHub issue or pull request; we love even small
|
||||
contributions, and we'd love to make the Zulip documentation cover
|
||||
everything anyone might want to know about running Zulip in
|
||||
production.
|
||||
|
||||
=======================================================================
|
||||
|
||||
Maintaining Zulip in production:
|
||||
|
||||
* To upgrade to a new version, download the appropriate release
|
||||
@@ -65,7 +301,7 @@ Maintaining Zulip in production:
|
||||
transition involved. Unless you have tested the upgrade in advance,
|
||||
we recommend doing upgrades at off hours.
|
||||
|
||||
You can create your own release tarballs from a copy of this
|
||||
You can create your own release tarballs from a copy of zulip.git
|
||||
repository using `tools/build-release-tarball`.
|
||||
|
||||
* To update your settings, simply edit /etc/zulip/settings.py and then
|
||||
@@ -83,6 +319,13 @@ Maintaining Zulip in production:
|
||||
support it via putting "site=https://zulip.yourdomain.net" in your
|
||||
.zuliprc.
|
||||
|
||||
Every Zulip integration supports this sort of argument (or e.g. a
|
||||
ZULIP_SITE variable in a zuliprc file or the environment), but this
|
||||
is not yet documented for some of the integrations (the included
|
||||
integration documentation on /integrations will properly document
|
||||
how to do this for most integrations). Pull requests welcome to
|
||||
document this for those integrations that don't discuss this!
|
||||
|
||||
* Similarly, you will need to instruct your users to specify the URL
|
||||
for your Zulip server when using the Zulip desktop and mobile apps.
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ client = zulip.Client(
|
||||
site=config.ZULIP_SITE,
|
||||
api_key=config.ZULIP_API_KEY,
|
||||
client="ZulipBasecamp/" + VERSION)
|
||||
user_agent = "Basecamp To Zulip Mirroring script (support@zulip.com)"
|
||||
user_agent = "Basecamp To Zulip Mirroring script (zulip-devel@googlegroups.com)"
|
||||
htmlParser = HTMLParser()
|
||||
|
||||
# find some form of JSON loader/dumper, with a preference order for speed.
|
||||
|
||||
@@ -58,7 +58,7 @@ client = zulip.Client(
|
||||
site=config.ZULIP_SITE,
|
||||
api_key=config.ZULIP_API_KEY,
|
||||
client="ZulipCodebase/" + VERSION)
|
||||
user_agent = "Codebase To Zulip Mirroring script (support@zulip.com)"
|
||||
user_agent = "Codebase To Zulip Mirroring script (zulip-devel@googlegroups.com)"
|
||||
|
||||
# find some form of JSON loader/dumper, with a preference order for speed.
|
||||
json_implementations = ['ujson', 'cjson', 'simplejson', 'json']
|
||||
|
||||
@@ -26,7 +26,7 @@ package_info = dict(
|
||||
version=version(),
|
||||
description='Bindings for the Zulip message API',
|
||||
author='Zulip, Inc.',
|
||||
author_email='support@zulip.com',
|
||||
author_email='zulip-devel@googlegroups.com',
|
||||
classifiers=[
|
||||
'Development Status :: 3 - Alpha',
|
||||
'Environment :: Web Environment',
|
||||
|
||||
@@ -11,5 +11,5 @@ ZULIP_DIR=/home/zulip/deployments/current
|
||||
STATE_DIR=/var/lib/nagios_state
|
||||
STATE_FILE=$STATE_DIR/check-rabbitmq-consumers-$queue
|
||||
|
||||
$ZULIP_DIR/bots/check-rabbitmq-consumers --queue=$queue &> ${STATE_FILE}-tmp;
|
||||
mv ${STATE_FILE}-tmp $STATE_FILE
|
||||
"$ZULIP_DIR/bots/check-rabbitmq-consumers" "--queue=$queue" &> "${STATE_FILE}-tmp";
|
||||
mv "${STATE_FILE}-tmp" "$STATE_FILE"
|
||||
|
||||
@@ -54,6 +54,6 @@ while backoff.keep_going():
|
||||
print ""
|
||||
print ""
|
||||
print "ERROR: The Jabber mirroring bot is unable to continue mirroring Jabber."
|
||||
print "Please contact support@zulip.com if you need assistence."
|
||||
print "Please contact zulip-devel@googlegroups.com if you need assistance."
|
||||
print ""
|
||||
sys.exit(1)
|
||||
|
||||
1
docs/requirements.readthedocs.txt
Normal file
1
docs/requirements.readthedocs.txt
Normal file
@@ -0,0 +1 @@
|
||||
# Empty requirements.txt to avoid readthedocs installing all our dependencies.
|
||||
@@ -51,6 +51,10 @@ NPM_DEPENDENCIES = {
|
||||
VENV_PATH="/srv/zulip-venv"
|
||||
ZULIP_PATH="/srv/zulip"
|
||||
|
||||
# TODO: Parse arguments properly
|
||||
if "--travis" in sys.argv:
|
||||
ZULIP_PATH="."
|
||||
|
||||
# tsearch-extras is an extension to postgres's built-in full-text search.
|
||||
# TODO: use a real APT repository
|
||||
TSEARCH_URL_BASE = "https://dl.dropboxusercontent.com/u/283158365/zuliposs/"
|
||||
@@ -159,6 +163,10 @@ def main():
|
||||
os.system("tools/download-zxcvbn")
|
||||
os.system("tools/emoji_dump/build_emoji")
|
||||
os.system("generate_secrets.py -d")
|
||||
if "--travis" in sys.argv:
|
||||
os.system("sudo service rabbitmq-server restart")
|
||||
os.system("sudo service redis-server restart")
|
||||
os.system("sudo service memcached restart")
|
||||
sh.configure_rabbitmq(**LOUD)
|
||||
sh.postgres_init_db(**LOUD)
|
||||
sh.do_destroy_rebuild_database(**LOUD)
|
||||
|
||||
@@ -9,7 +9,7 @@ server {
|
||||
listen 443;
|
||||
|
||||
ssl on;
|
||||
ssl_certificate /etc/ssl/certs/zulip-combined-chain.crt;
|
||||
ssl_certificate /etc/ssl/certs/zulip.combined-chain.crt;
|
||||
ssl_certificate_key /etc/ssl/private/zulip.key;
|
||||
|
||||
location /user_uploads {
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
[main]
|
||||
server = puppet.zulip.com
|
||||
environment = production
|
||||
confdir = /etc/puppet
|
||||
logdir=/var/log/puppet
|
||||
vardir=/var/lib/puppet
|
||||
ssldir=/var/lib/puppet/ssl
|
||||
rundir=/var/run/puppet
|
||||
factpath=$vardir/lib/facter
|
||||
templatedir=$confdir/templates
|
||||
prerun_command=/etc/puppet/etckeeper-commit-pre
|
||||
postrun_command=/etc/puppet/etckeeper-commit-post
|
||||
modulepath = /root/zulip/puppet:/etc/puppet/modules:/usr/share/puppet/modules
|
||||
|
||||
[master]
|
||||
environment = production
|
||||
manifest = $confdir/environments/$environment/manifests/site.pp
|
||||
modulepath = $confdir/environments/$environment/modules
|
||||
[agent]
|
||||
report = true
|
||||
show_diff = true
|
||||
environment = production
|
||||
@@ -1,6 +1,6 @@
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specifiy
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
@@ -12,6 +12,26 @@
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis server but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize yes
|
||||
@@ -24,9 +44,14 @@ pidfile /var/run/redis/redis-server.pid
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# If you want you can bind a single interface, if the bind option is not
|
||||
# specified all the interfaces will listen for incoming connections.
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
bind 127.0.0.1
|
||||
|
||||
# Specify the path for the unix socket that will be used to listen for
|
||||
@@ -39,15 +64,31 @@ bind 127.0.0.1
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# Set server verbosity to 'debug'
|
||||
# it can be one of:
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 0
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also 'stdout' can be used to force
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile /var/log/redis/redis-server.log
|
||||
@@ -59,7 +100,7 @@ logfile /var/log/redis/redis-server.log
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
@@ -67,7 +108,7 @@ logfile /var/log/redis/redis-server.log
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING #################################
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
@@ -82,10 +123,31 @@ databases 16
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving at all commenting all the "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
# save 900 1
|
||||
# save 300 10
|
||||
# save 60 10000
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
@@ -93,6 +155,15 @@ databases 16
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
@@ -100,9 +171,9 @@ dbfilename dump.rdb
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# Also the Append Only File will be created inside this directory.
|
||||
#
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir /var/lib/redis
|
||||
|
||||
@@ -122,27 +193,46 @@ dir /var/lib/redis
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave lost the connection with the master, or when the replication
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of data data, or the
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale data is set to 'no' the slave will reply with
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets a timeout for both Bulk transfer I/O timeout and
|
||||
# master data or ping response timeout. The default value is 60 seconds.
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
@@ -150,6 +240,80 @@ slave-serve-stale-data yes
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The biggest the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEES that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
@@ -158,7 +322,7 @@ slave-serve-stale-data yes
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
@@ -167,33 +331,39 @@ slave-serve-stale-data yes
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possilbe to change the name of dangerous commands in a shared
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# of hard to guess so that it will be still available for internal-use
|
||||
# tools but not available for general clients.
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possilbe to completely kill a command renaming it into
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default there
|
||||
# is no limit, and it's up to the number of file descriptors the Redis process
|
||||
# is able to open. The special value '0' means no limits.
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 128
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# accordingly to the eviction policy selected (see maxmemmory-policy).
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
@@ -201,7 +371,7 @@ slave-serve-stale-data yes
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# an hard memory limit for an instance (using the 'noeviction' policy).
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
@@ -217,16 +387,16 @@ slave-serve-stale-data yes
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached? You can select among five behavior:
|
||||
#
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key accordingly to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys->random -> remove a random key, any key
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with all the kind of policies, Redis will return an error on write
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are not suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing this commands are: set setnx setex append
|
||||
@@ -249,45 +419,51 @@ slave-serve-stale-data yes
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. If you can live
|
||||
# with the idea that the latest records will be lost if something like a crash
|
||||
# happens this is the preferred way to run Redis. If instead you care a lot
|
||||
# about your data and don't want to that a single record can get lost you should
|
||||
# enable the append only mode: when this mode is enabled Redis will append
|
||||
# every write operation received in the file appendonly.aof. This file will
|
||||
# be read on startup in order to rebuild the full dataset in memory.
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# Note that you can have both the async dumps and the append only file if you
|
||||
# like (you have to comment the "save" statements above to disable the dumps).
|
||||
# Still if append only mode is enabled Redis will load the data from the
|
||||
# log file at startup ignoring the dump.rdb file.
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
|
||||
# log file in background when it gets too big.
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly no
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
# appendfilename appendonly.aof
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead to wait for more data in the output buffer. Some OS will really flush
|
||||
# instead to wait for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log . Slow, Safest.
|
||||
# everysec: fsync only if one second passed since the last fsync. Compromise.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec" that's usually the right compromise between
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will will let the operating system flush the output buffer when
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
@@ -305,21 +481,22 @@ appendfsync everysec
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving the durability of Redis is
|
||||
# the same as "appendfsync none", that in pratical terms means that it is
|
||||
# possible to lost up to 30 seconds of log in the worst scenario (with the
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size will growth by the specified percentage.
|
||||
#
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (or if no rewrite happened since the restart, the size of
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
@@ -328,12 +505,30 @@ no-appendfsync-on-rewrite no
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a precentage of zero in order to disable the automatic AOF
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceed the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write commands was
|
||||
# already issue by the script but the user don't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
@@ -342,7 +537,7 @@ auto-aof-rewrite-min-size 64mb
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
@@ -358,88 +553,59 @@ slowlog-log-slower-than 10000
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ VIRTUAL MEMORY ###############################
|
||||
############################# Event notification ##############################
|
||||
|
||||
### WARNING! Virtual Memory is deprecated in Redis 2.4
|
||||
### The use of Virtual Memory is strongly discouraged.
|
||||
|
||||
# Virtual Memory allows Redis to work with datasets bigger than the actual
|
||||
# amount of RAM needed to hold the whole dataset in memory.
|
||||
# In order to do so very used keys are taken in memory while the other keys
|
||||
# are swapped into a swap file, similarly to what operating systems do
|
||||
# with memory pages.
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/keyspace-events
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# To enable VM just set 'vm-enabled' to yes, and set the following three
|
||||
# VM parameters accordingly to your needs.
|
||||
|
||||
vm-enabled no
|
||||
# vm-enabled yes
|
||||
|
||||
# This is the path of the Redis swap file. As you can guess, swap files
|
||||
# can't be shared by different Redis instances, so make sure to use a swap
|
||||
# file for every redis process you are running. Redis will complain if the
|
||||
# swap file is already in use.
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# The best kind of storage for the Redis swap file (that's accessed at random)
|
||||
# is a Solid State Disk (SSD).
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# *** WARNING *** if you are using a shared hosting the default of putting
|
||||
# the swap file under /tmp is not secure. Create a dir with access granted
|
||||
# only to Redis user and configure Redis to create the swap file there.
|
||||
vm-swap-file /var/lib/redis/redis.swap
|
||||
|
||||
# vm-max-memory configures the VM to use at max the specified amount of
|
||||
# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
|
||||
# is, if there is still enough contiguous space in the swap file.
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# With vm-max-memory 0 the system will swap everything it can. Not a good
|
||||
# default, just specify the max amount of RAM you can in bytes, but it's
|
||||
# better to leave some margin. For instance specify an amount of RAM
|
||||
# that's more or less between 60 and 80% of your free RAM.
|
||||
vm-max-memory 0
|
||||
|
||||
# Redis swap files is split into pages. An object can be saved using multiple
|
||||
# contiguous pages, but pages can't be shared between different objects.
|
||||
# So if your page is too big, small objects swapped out on disk will waste
|
||||
# a lot of space. If you page is too small, there is less space in the swap
|
||||
# file (assuming you configured the same number of total swap file pages).
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# by zero or multiple characters. The empty string means that notifications
|
||||
# are disabled at all.
|
||||
#
|
||||
# If you use a lot of small objects, use a page size of 64 or 32 bytes.
|
||||
# If you use a lot of big objects, use a bigger page size.
|
||||
# If unsure, use the default :)
|
||||
vm-page-size 32
|
||||
|
||||
# Number of total memory pages in the swap file.
|
||||
# Given that the page table (a bitmap of free/used pages) is taken in memory,
|
||||
# every 8 pages on disk will consume 1 byte of RAM.
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# The total swap size is vm-page-size * vm-pages
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# With the default of 32-bytes memory pages and 134217728 pages Redis will
|
||||
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# It's better to use the smallest acceptable value for your application,
|
||||
# but the default is large in order to work in most conditions.
|
||||
vm-pages 134217728
|
||||
|
||||
# Max number of VM I/O threads running at the same time.
|
||||
# This threads are used to read/write data from/to swap file, since they
|
||||
# also encode and decode objects from disk to memory or the reverse, a bigger
|
||||
# number of threads can help with big objects even if they can't help with
|
||||
# I/O itself as the physical device may not be able to couple with many
|
||||
# reads/writes operations at the same time.
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# The special value of 0 turn off threaded I/O and enables the blocking
|
||||
# Virtual Memory implementation.
|
||||
vm-max-threads 4
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
# Hashes are encoded in a special way (much more memory efficient) when they
|
||||
# have at max a given numer of elements, and the biggest element does not
|
||||
# exceed a given threshold. You can configure this limits with the following
|
||||
# configuration directives.
|
||||
hash-max-zipmap-entries 512
|
||||
hash-max-zipmap-value 64
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
@@ -462,12 +628,12 @@ zset-max-ziplist-value 64
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into an hash table
|
||||
# that is rhashing, the more rehashing "steps" are performed, so if the
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# active rehashing the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
@@ -480,12 +646,65 @@ zset-max-ziplist-value 64
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all redis server but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients
|
||||
# slave -> slave clients and MONITOR clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform accordingly to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
||||
|
||||
# Zulip-specific configuration: disable saving to disk.
|
||||
save ""
|
||||
|
||||
@@ -40,14 +40,6 @@ class zulip::base {
|
||||
group => 'zulip',
|
||||
}
|
||||
|
||||
file { '/etc/puppet/puppet.conf':
|
||||
ensure => file,
|
||||
mode => 640,
|
||||
owner => "root",
|
||||
group => "root",
|
||||
source => 'puppet:///modules/zulip/puppet.conf',
|
||||
}
|
||||
|
||||
file { '/etc/security/limits.conf':
|
||||
ensure => file,
|
||||
mode => 640,
|
||||
@@ -56,6 +48,13 @@ class zulip::base {
|
||||
source => 'puppet:///modules/zulip/limits.conf',
|
||||
}
|
||||
|
||||
# This directory is written to by cron jobs for reading by Nagios
|
||||
file { '/var/lib/nagios_state/':
|
||||
ensure => directory,
|
||||
group => 'zulip',
|
||||
mode => 774,
|
||||
}
|
||||
|
||||
file { '/var/log/zulip':
|
||||
ensure => 'directory',
|
||||
owner => 'zulip',
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
class zulip::rabbit {
|
||||
$rabbit_packages = [# Needed to run rabbitmq
|
||||
"erlang-base",
|
||||
"rabbitmq-server",
|
||||
]
|
||||
package { $rabbit_packages: ensure => "installed" }
|
||||
@@ -39,5 +40,21 @@ class zulip::rabbit {
|
||||
source => "puppet:///modules/zulip/rabbitmq/rabbitmq.config",
|
||||
}
|
||||
|
||||
# epmd doesn't have an init script. This won't leak epmd processes
|
||||
# because epmd checks if one is already running and exits if so.
|
||||
#
|
||||
# TODO: Ideally we'd still check if it's already running to keep the
|
||||
# puppet log for what is being changed clean
|
||||
exec { "epmd":
|
||||
command => "epmd -daemon",
|
||||
require => Package[erlang-base],
|
||||
path => "/usr/bin/:/bin/",
|
||||
}
|
||||
|
||||
service { "rabbitmq-server":
|
||||
ensure => running,
|
||||
require => Exec["epmd"],
|
||||
}
|
||||
|
||||
# TODO: Should also call exactly once "configure-rabbitmq"
|
||||
}
|
||||
|
||||
@@ -49,8 +49,8 @@ $wgLogo = "$wgStylePath/common/images/wiki.png";
|
||||
$wgEnableEmail = true;
|
||||
$wgEnableUserEmail = true; # UPO
|
||||
|
||||
$wgEmergencyContact = "support@zulip.com";
|
||||
$wgPasswordSender = "support@zulip.com";
|
||||
$wgEmergencyContact = "zulip-devel@googlegroups.com";
|
||||
$wgPasswordSender = "zulip-devel@googlegroups.com";
|
||||
|
||||
$wgEnotifUserTalk = true; # UPO
|
||||
$wgEnotifWatchlist = true; # UPO
|
||||
|
||||
@@ -4,4 +4,4 @@ if [ "$(hostname)" = "staging.zulip.net" ]; then
|
||||
else
|
||||
site="https://api.zulip.com"
|
||||
fi
|
||||
/home/zulip/deployments/current/bots/check_send_receive.py --munin $1 --site="$site"
|
||||
/home/zulip/deployments/current/bots/check_send_receive.py --munin "$1" --site="$site"
|
||||
|
||||
@@ -9,11 +9,11 @@ cd /home/zulip/deployments/current
|
||||
|
||||
BACKLOG=$(./manage.py print_email_delivery_backlog)
|
||||
|
||||
if [ $BACKLOG -gt 0 -a $BACKLOG -lt 10 ]
|
||||
if [ "$BACKLOG" -gt 0 ] && [ "$BACKLOG" -lt 10 ]
|
||||
then
|
||||
echo "backlog of $BACKLOG"
|
||||
exit 1
|
||||
elif [ $BACKLOG -ge 10 ]
|
||||
elif [ "$BACKLOG" -ge 10 ]
|
||||
then
|
||||
echo "backlog of $BACKLOG"
|
||||
exit 2
|
||||
|
||||
@@ -13,14 +13,14 @@ if [ "$STATUS" == "RUNNING" ]
|
||||
then
|
||||
echo "Running"
|
||||
exit 0
|
||||
elif [ $(echo "$STATUS" | egrep '(STOPPED)|(STARTING)|(BACKOFF)|(STOPPING)|(EXITED)|(FATAL)|(UNKNOWN)$') ]
|
||||
elif [ "$(echo "$STATUS" | egrep '(STOPPED)|(STARTING)|(BACKOFF)|(STOPPING)|(EXITED)|(FATAL)|(UNKNOWN)$')" ]
|
||||
then
|
||||
# not "RUNNING", but a recognized supervisor status
|
||||
echo $STATUS
|
||||
echo "$STATUS"
|
||||
exit 1
|
||||
else
|
||||
# we don't recognize the second column in this SUPERVISOR_STATUS.
|
||||
# This may be indicative of a supervisor configuration problem
|
||||
echo $SUPERVISOR_STATUS
|
||||
echo "$SUPERVISOR_STATUS"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Checks for any Zulip queue workers that are leaking memory and thus have a high vsize
|
||||
datafile=$(mktemp)
|
||||
ps -o vsize,size,pid,user,command --sort -vsize $(pgrep -f '^python /home/zulip/deployments/current/manage.py process_queue') > "$datafile"
|
||||
ps -o vsize,size,pid,user,command --sort -vsize "$(pgrep -f '^python /home/zulip/deployments/current/manage.py process_queue')" > "$datafile"
|
||||
cat "$datafile"
|
||||
top_worker=$(cat "$datafile" | head -n2 | tail -n1)
|
||||
top_worker_memory_usage=$(echo "$top_worker" | cut -f1 -d" ")
|
||||
|
||||
@@ -5,9 +5,9 @@ fi
|
||||
|
||||
export AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxxx
|
||||
export AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
s3_backup_bucket=$(crudini --get $ZULIP_CONF database s3_backup_bucket 2>&1)
|
||||
s3_backup_bucket=$(crudini --get "$ZULIP_CONF" database s3_backup_bucket 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Could not determine which s3 bucket to use:" $s3_backup_bucket
|
||||
echo "Could not determine which s3 bucket to use:" "$s3_backup_bucket"
|
||||
exit 1
|
||||
fi
|
||||
export WALE_S3_PREFIX=s3://$s3_backup_bucket
|
||||
|
||||
@@ -102,13 +102,6 @@ class zulip_internal::base {
|
||||
group => "nagios",
|
||||
mode => 600,
|
||||
}
|
||||
file { '/var/lib/nagios_state/':
|
||||
ensure => directory,
|
||||
require => User['nagios'],
|
||||
owner => "nagios",
|
||||
group => "nagios",
|
||||
mode => 777,
|
||||
}
|
||||
file { '/var/lib/nagios/.ssh':
|
||||
ensure => directory,
|
||||
require => File['/var/lib/nagios/'],
|
||||
|
||||
@@ -12,13 +12,7 @@ EOF
|
||||
|
||||
apt-get update
|
||||
apt-get -y dist-upgrade
|
||||
apt-get install -y puppet git
|
||||
cp -a /root/zulip/puppet/zulip/files/puppet.conf /etc/puppet/
|
||||
|
||||
# Hack to get required python-django-guardian while the PPA build of django-guardian isn't working :(
|
||||
apt-get -y install python-django
|
||||
wget https://zulip.com/dist/packages/python-django-guardian_1.3-1~zulip4_all.deb
|
||||
dpkg -i /root/python-django-guardian_1.3-1~zulip4_all.deb
|
||||
apt-get install -y puppet git python
|
||||
|
||||
mkdir -p /etc/zulip
|
||||
echo -e "[machine]\npuppet_classes = zulip::voyager\ndeploy_type = voyager" > /etc/zulip/zulip.conf
|
||||
@@ -34,6 +28,17 @@ fi
|
||||
cp -a /root/zulip/zproject/local_settings_template.py /etc/zulip/settings.py
|
||||
ln -nsf /etc/zulip/settings.py /root/zulip/zproject/local_settings.py
|
||||
|
||||
if ! rabbitmqctl status >/dev/null; then
|
||||
set +x
|
||||
echo; echo "RabbitMQ seems to not have started properly after the installation process."
|
||||
echo "Often, this can be caused by misconfigured /etc/hosts in virtualized environments"
|
||||
echo "See https://github.com/zulip/zulip/issues/53#issuecomment-143805121"
|
||||
echo "for more information"
|
||||
echo
|
||||
set -x
|
||||
exit 1
|
||||
fi
|
||||
|
||||
/root/zulip/scripts/setup/configure-rabbitmq
|
||||
|
||||
/root/zulip/scripts/setup/postgres-init-db
|
||||
@@ -61,4 +66,5 @@ cat <<EOF
|
||||
|
||||
su zulip -c /home/zulip/deployments/current/scripts/setup/initialize-database
|
||||
|
||||
To configure the initial database.
|
||||
EOF
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
# Delete the "guest" default user and replace it with a Zulip user
|
||||
# with a real password
|
||||
|
||||
RMQPW=$($(dirname $0)/../../bin/get-django-setting RABBITMQ_PASSWORD)
|
||||
RMQPW=$("$(dirname "$0")/../../bin/get-django-setting" RABBITMQ_PASSWORD)
|
||||
sudo rabbitmqctl delete_user zulip || true
|
||||
sudo rabbitmqctl delete_user guest || true
|
||||
sudo rabbitmqctl add_user zulip $RMQPW
|
||||
sudo rabbitmqctl add_user zulip "$RMQPW"
|
||||
sudo rabbitmqctl set_user_tags zulip administrator
|
||||
sudo rabbitmqctl set_permissions -p / zulip '.*' '.*' '.*'
|
||||
|
||||
@@ -1,12 +1,30 @@
|
||||
#!/bin/bash -xe
|
||||
|
||||
# Change to root directory of the checkout that we're running from
|
||||
cd $(dirname $0)/../..
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
python manage.py checkconfig
|
||||
|
||||
python manage.py migrate --noinput
|
||||
python manage.py createcachetable third_party_api_results
|
||||
python manage.py initialize_voyager_db
|
||||
|
||||
if ! python manage.py initialize_voyager_db; then
|
||||
set +x
|
||||
echo
|
||||
echo -e "\033[32mPopulating default database failed."
|
||||
echo "After you fix the problem, you will need to do the following before rerunning this:"
|
||||
echo " * supervisorctl stop all # to stop all services that might be accessing the database"
|
||||
echo " * scripts/setup/postgres-init-db # run as root to drop and re-create the database"
|
||||
echo -e "\033[0m"
|
||||
set -x
|
||||
fi
|
||||
|
||||
supervisorctl restart all
|
||||
|
||||
echo "Congratulations! You have successfully configured your Zulip database."
|
||||
echo "If you haven't already, you should configure email in /etc/zulip/settings.py"
|
||||
echo "And then you should now be able to visit your server over https and sign up using"
|
||||
echo "an email address that ends with @ADMIN_DOMAIN (from your settings file)."
|
||||
echo ""
|
||||
echo "See README.prod.md for instructions on how to confirm your Zulip install is healthy, "
|
||||
echo " change ADMIN_DOMAIN, debug common issues, and otherwise finish setting things up."
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
mkdir -p /var/log/zulip
|
||||
$(dirname $(dirname $0))/lib/install "$@" 2>&1 | tee -a /var/log/zulip/install.log
|
||||
"$(dirname "$(dirname "$0")")/lib/install" "$@" 2>&1 | tee -a /var/log/zulip/install.log
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
$(dirname $0)/lib/upgrade-zulip "$@" 2>&1 | tee -a /var/log/zulip/upgrade.log
|
||||
"$(dirname "$0")/lib/upgrade-zulip" "$@" 2>&1 | tee -a /var/log/zulip/upgrade.log
|
||||
|
||||
@@ -23,7 +23,7 @@ include apt
|
||||
for pclass in re.split(r'\s*,\s*', config.get('machine', 'puppet_classes')):
|
||||
puppet_config += "include %s\n" % (pclass,)
|
||||
|
||||
puppet_cmd = ["puppet", "apply", "-e", puppet_config]
|
||||
puppet_cmd = ["puppet", "apply", "--modulepath=/root/zulip/puppet", "-e", puppet_config]
|
||||
puppet_cmd += extra_args
|
||||
|
||||
if force:
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
|
||||
<p>We know this is stressful, but we still love you.</p>
|
||||
|
||||
<p>If you'd like, you can <a href="mailto:support@zulip.com?Subject=404%20error%20on%20%7Bwhich%20URL%3F%7D&Body=Hi%20there%21%0A%0AI%20was%20trying%20to%20do%20%7Bwhat%20were%20you%20trying%20to%20do%3F%7D%20at%20around%20%7Bwhen%20was%20this%3F%7D%20when%20I%20got%20a%20404%20error%20while%20accessing%20%7Bwhich%20URL%3F%7D.%0A%0AThanks!%0A%0ASincerely%2C%20%0A%0A%7BYour%20name%7D">drop us a line</a> to let us know what happened.</p>
|
||||
<p>If you'd like, you can <a href="mailto:zulip-devel@googlegroups.com?Subject=404%20error%20on%20%7Bwhich%20URL%3F%7D&Body=Hi%20there%21%0A%0AI%20was%20trying%20to%20do%20%7Bwhat%20were%20you%20trying%20to%20do%3F%7D%20at%20around%20%7Bwhen%20was%20this%3F%7D%20when%20I%20got%20a%20404%20error%20while%20accessing%20%7Bwhich%20URL%3F%7D.%0A%0AThanks!%0A%0ASincerely%2C%20%0A%0A%7BYour%20name%7D">drop us a line</a> to let us know what happened.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
data-screen-name="ZulipStatus"
|
||||
>@ZulipStatus on Twitter</a>.</p>
|
||||
|
||||
<p>If you'd like, you can <a href="mailto:support@zulip.com?Subject=500%20error%20on%20%7Bwhich%20URL%3F%7D&Body=Hi%20there%21%0A%0AI%20was%20trying%20to%20do%20%7Bwhat%20were%20you%20trying%20to%20do%3F%7D%20at%20around%20%7Bwhen%20was%20this%3F%7D%20when%20I%20got%20a%20500%20error%20while%20accessing%20%7Bwhich%20URL%3F%7D.%0A%0AThanks!%0A%0ASincerely%2C%20%0A%0A%7BYour%20name%7D">drop us a line</a> to let us know what happened.</p>
|
||||
<p>If you'd like, you can <a href="mailto:zulip-devel@googlegroups.com?Subject=500%20error%20on%20%7Bwhich%20URL%3F%7D&Body=Hi%20there%21%0A%0AI%20was%20trying%20to%20do%20%7Bwhat%20were%20you%20trying%20to%20do%3F%7D%20at%20around%20%7Bwhen%20was%20this%3F%7D%20when%20I%20got%20a%20500%20error%20while%20accessing%20%7Bwhich%20URL%3F%7D.%0A%0AThanks!%0A%0ASincerely%2C%20%0A%0A%7BYour%20name%7D">drop us a line</a> to let us know what happened.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
BIN
static/images/integrations/logos/pagerduty.png
Normal file
BIN
static/images/integrations/logos/pagerduty.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.6 KiB |
BIN
static/images/integrations/pagerduty/001.png
Normal file
BIN
static/images/integrations/pagerduty/001.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
BIN
static/images/integrations/pagerduty/002.png
Normal file
BIN
static/images/integrations/pagerduty/002.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 25 KiB |
BIN
static/images/logo/zballoon.png
Normal file
BIN
static/images/logo/zballoon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
@@ -261,6 +261,11 @@ input.text-error {
|
||||
padding: 6px 0px 6px 0px;
|
||||
}
|
||||
|
||||
.header-main .portico-simple-logo {
|
||||
height: 40px;
|
||||
width: auto;
|
||||
}
|
||||
|
||||
.app {
|
||||
width: 100%;
|
||||
z-index: 99;
|
||||
@@ -697,12 +702,50 @@ a.bottom-signup-button {
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
.main-headline-container {
|
||||
.main-headline-container,
|
||||
.os-headline-container {
|
||||
position: relative;
|
||||
height: 100%;
|
||||
padding-top: 40px !important;
|
||||
}
|
||||
|
||||
.os-headline-container {
|
||||
padding: 20px;
|
||||
background: #1e5799;
|
||||
background: linear-gradient(to bottom, #1e5799 0%,#2989d8 52%,#7db9e8 100%);
|
||||
padding-bottom: 40px !important;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.os-tagline,
|
||||
.os-footnote {
|
||||
color: #eeeeee !important;
|
||||
}
|
||||
|
||||
.os-footnote a:link,
|
||||
.os-footnote a:visited,
|
||||
.os-footnote a:active {
|
||||
color: #eeeeee;
|
||||
text-decoration: none;
|
||||
border-bottom: 1px solid #dddddd;
|
||||
}
|
||||
|
||||
.os-footnote a:hover {
|
||||
color: #ffffff;
|
||||
text-decoration: none;
|
||||
border-bottom: 1px solid #ffffff;
|
||||
}
|
||||
|
||||
.os-illustration {
|
||||
height: 200px;
|
||||
width: auto;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
.portico-os-announcement {
|
||||
padding-top: 40px;
|
||||
}
|
||||
|
||||
.main-headline-logo {
|
||||
display: block;
|
||||
width: 200px;
|
||||
@@ -759,7 +802,7 @@ a.bottom-signup-button {
|
||||
.hello-main {
|
||||
max-width: none;
|
||||
min-width: 0;
|
||||
padding: 0;
|
||||
padding: 0 !important;
|
||||
}
|
||||
|
||||
.footer-padder {
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p>The organization you are trying to join, {{ deactivated_domain_name }}, has
|
||||
been deactivated. Please
|
||||
contact <a href="mailto:support@zulip.com">support@zulip.com</a> to reactivate
|
||||
contact <a href="mailto:{{ zulip_administrator }}">{{ zulip_administrator }}</a> to reactivate
|
||||
this group.</p>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
@@ -1,6 +1,22 @@
|
||||
{% extends "zerver/portico.html" %}
|
||||
{% block hello_page_container %} hello-main{% endblock %}
|
||||
{% block hello_page_footer %} hello-footer{% endblock %}
|
||||
{% block os_announcement %}
|
||||
{% if zulip_com %}
|
||||
<div class="os-headline-container">
|
||||
<img src="/static/images/logo/zballoon.png" class="os-illustration" alt="Zulip balloon" />
|
||||
<div class="main-headline-text">
|
||||
<span class="tagline os-tagline">
|
||||
Zulip has been released as open source software!
|
||||
</span>
|
||||
<span class="footnote os-footnote">
|
||||
Read the <a href="https://blogs.dropbox.com/tech/2015/09/open-sourcing-zulip-a-dropbox-hack-week-project" target="_blank">announcement</a> or go to <a href="https://www.zulip.org" target="_blank">the Zulip open source project website</a>.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block portico_content %}
|
||||
<div class="app main-headline">
|
||||
<div class="app-main main-headline-container">
|
||||
|
||||
@@ -2,15 +2,31 @@
|
||||
|
||||
{# API information page #}
|
||||
|
||||
{% block os_announcement %}
|
||||
{% if zulip_com %}
|
||||
<div class="os-headline-container">
|
||||
<img src="/static/images/logo/zballoon.png" class="os-illustration" alt="Zulip balloon" />
|
||||
<div class="main-headline-text">
|
||||
<span class="tagline os-tagline">
|
||||
Zulip has been released as open source software!
|
||||
</span>
|
||||
<span class="footnote os-footnote">
|
||||
Read the <a href="https://blogs.dropbox.com/tech/2015/09/open-sourcing-zulip-a-dropbox-hack-week-project" target="_blank">announcement</a> or go to <a href="https://www.zulip.org" target="_blank">the Zulip open source project website</a>.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block portico_content %}
|
||||
|
||||
<div class="portico-page-header"><a href="#"><i class="icon-vector-gears portico-page-header-icon"></i>Integrations</a></div>
|
||||
|
||||
<p class="portico-large-text">With Zulip integrations, your team can stay up-to-date on
|
||||
code changes, issue tickets, build system results, and much more. If you don't see the system you would like to integrate with it, or run into any
|
||||
trouble, don't hesitate to <a href="mailto:support@zulip.com?subject=Integration%20question">email us</a>.</p>
|
||||
trouble, don't hesitate to <a href="mailto:zulip-devel@googlegroups.com?subject=Integration%20question">email us</a>.</p>
|
||||
|
||||
<p>Many of these integrations require creating a Zulip bot. You can do so on your <a href="https://zulip.com/#settings">Zulip settings page</a>. Be sure to note its username and API key.</p>
|
||||
<p>Many of these integrations require creating a Zulip bot. You can do so on your <a href="/#settings">Zulip settings page</a>. Be sure to note its username and API key.</p>
|
||||
|
||||
<div id="integration-instruction-block" class="integration-instruction-block">
|
||||
<a href="#" id="integration-list-link"><i class="icon-vector-circle-arrow-left"></i> Back to list</a>
|
||||
@@ -130,6 +146,12 @@
|
||||
<span class="integration-label">New Relic</span>
|
||||
</a>
|
||||
</div>
|
||||
<div class="integration-lozenge integration-pagerduty">
|
||||
<a class="integration-link integration-pagerduty" href="#pagerduty">
|
||||
<img class="integration-logo" src="/static/images/integrations/logos/pagerduty.png" alt="Pagerduty logo" />
|
||||
<span class="integration-label">Pagerduty</span>
|
||||
</a>
|
||||
</div>
|
||||
<div class="integration-lozenge integration-perforce">
|
||||
<a class="integration-link integration-perforce" href="#perforce">
|
||||
<img class="integration-logo" src="/static/images/integrations/logos/perforce.png" alt="Perforce logo" />
|
||||
@@ -254,7 +276,7 @@
|
||||
example, auto-restarting through <code>supervisord</code>).</p>
|
||||
|
||||
<p>Please
|
||||
contact <a href="mailto:support@zulip.com?subject=Asana%20integration%20question">support@zulip.com</a>
|
||||
contact <a href="mailto:zulip-devel@googlegroups.com?subject=Asana%20integration%20question">zulip-devel@googlegroups.com</a>
|
||||
if you'd like assistance with maintaining this integration.
|
||||
</p>
|
||||
</li>
|
||||
@@ -836,7 +858,7 @@
|
||||
<pre><code>npm install --save hubot-zulip</code></pre>
|
||||
</li>
|
||||
|
||||
<li><p>On your <a href="https://zulip.com/#settings">Zulip settings page</a>, create a bot account. Note its username, API key and full name; you will use them on the next step.</p></li>
|
||||
<li><p>On your <a href="/#settings">Zulip settings page</a>, create a bot account. Note its username, API key and full name; you will use them on the next step.</p></li>
|
||||
|
||||
<li>To run Hubot locally, use:
|
||||
<pre><code>HUBOT_ZULIP_BOT=hubot-bot@example.com HUBOT_ZULIP_API_KEY=your_key bin/hubot --adapter zulip --name "myhubot"</code></pre>
|
||||
@@ -908,7 +930,7 @@
|
||||
<li>Did you set up a post-build action for your project?</li>
|
||||
<li>Does the stream you picked (e.g. <code>jenkins</code>) already exist? If not, add yourself to it and try again.</li>
|
||||
<li>Are your access key and email address correct? Test them using <a href="/api">our curl API</a>.</li>
|
||||
<li>Still stuck? Email <a href="mailto:support@zulip.com?subject=Jenkins">support@zulip.com</a>.</li>
|
||||
<li>Still stuck? Email <a href="mailto:zulip-devel@googlegroups.com?subject=Jenkins">zulip-devel@googlegroups.com</a>.</li>
|
||||
</ul>
|
||||
</p>
|
||||
</div>
|
||||
@@ -1065,6 +1087,30 @@ key = NAGIOS_BOT_API_KEY
|
||||
directly.</p>
|
||||
</div>
|
||||
|
||||
|
||||
<div id="pagerduty" class="integration-instructions">
|
||||
|
||||
<p>First, create the stream you'd like to use for Pagerduty notifications,
|
||||
and subscribe all interested parties to this stream. We recommend the
|
||||
stream name <code>pagerduty</code>. Keep in mind you still need to create
|
||||
the stream first even if you are using this recommendation.</p>
|
||||
|
||||
<p>Next, in Pagerduty, select Services under Configuration on the top
|
||||
of the page.</p>
|
||||
<img class="screenshot" src="/static/images/integrations/pagerduty/001.png" />
|
||||
|
||||
<p>Now navigate to the service you want to integrate with Zulip. From
|
||||
there, click "Add a webhook". Fill in the form like this:</p>
|
||||
<ul>
|
||||
<li><b>Name</b>: Zulip</li>
|
||||
<li><b>Endpoint URL</b>: <code>{{ external_api_uri }}{% verbatim %}/v1/external/pagerduty?api_key=abcdefgh&stream=pagerduty{% endverbatim %}</code></li>
|
||||
</ul>
|
||||
|
||||
<img class="screenshot" src="/static/images/integrations/pagerduty/002.png" />
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
<div id="perforce" class="integration-instructions">
|
||||
|
||||
|
||||
|
||||
@@ -9,6 +9,22 @@
|
||||
{% minified_js 'signup' %}
|
||||
{% endblock %}
|
||||
|
||||
{% block os_announcement %}
|
||||
{% if zulip_com %}
|
||||
<div class="os-headline-container">
|
||||
<img src="/static/images/logo/zballoon.png" class="os-illustration" alt="Zulip balloon" />
|
||||
<div class="main-headline-text">
|
||||
<span class="tagline os-tagline">
|
||||
Zulip has been released as open source software!
|
||||
</span>
|
||||
<span class="footnote os-footnote">
|
||||
Read the <a href="https://blogs.dropbox.com/tech/2015/09/open-sourcing-zulip-a-dropbox-hack-week-project" target="_blank">announcement</a> or go to <a href="https://www.zulip.org" target="_blank">the Zulip open source project website</a>.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block portico_content %}
|
||||
|
||||
{% if password_auth_enabled %}
|
||||
|
||||
@@ -21,7 +21,7 @@ hence the name.
|
||||
{% if zulip_com %}
|
||||
<a class="brand logo" href="/"><img src="/static/images/logo/zulip-dropbox.png" class="portico-logo" alt="Zulip" content="Zulip" /></a>
|
||||
{% else %}
|
||||
<a class="brand logo" href="/"><img src="/static/images/logo/zulipwlogo@2x.png" class="portico-logo" alt="Zulip" content="Zulip" /></a>
|
||||
<a class="brand logo" href="/"><img src="/static/images/logo/zulipcornerlogo@2x.png" class="portico-simple-logo" alt="Zulip" content="Zulip" /></a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
@@ -41,6 +41,10 @@ hence the name.
|
||||
</div>
|
||||
|
||||
<div class="app portico-page">
|
||||
<div class="portico-os-announcement">
|
||||
{% block os_announcement %}
|
||||
{% endblock %}
|
||||
</div>
|
||||
<div class="app-main portico-page-container{% block hello_page_container %}{% endblock %}">
|
||||
{% block portico_content %}
|
||||
{% endblock %}
|
||||
|
||||
@@ -12,7 +12,7 @@ dist=$1
|
||||
file=$2
|
||||
|
||||
if [ -z "$dist" ] || [ -z "$file" ]; then
|
||||
echo "$(echo $0 | rev | cut -d "/" -f 1-1 | rev) -- build Debian packages on a Zulip buildslave"
|
||||
echo "$(echo "$0" | rev | cut -d "/" -f 1-1 | rev) -- build Debian packages on a Zulip buildslave"
|
||||
echo
|
||||
echo "USAGE: $0 dist path/to/package.dsc"
|
||||
exit 1
|
||||
@@ -21,10 +21,10 @@ fi
|
||||
set -xe
|
||||
|
||||
ret=0
|
||||
path=$(ssh -q -l $BUILDD_USERNAME $BUILDD_HOST -- mktemp -d $BUILDD_BASE_PATH/$USER.`date -u +%F.%R`.XXXXXXX)/
|
||||
path=$(ssh -q -l "$BUILDD_USERNAME" "$BUILDD_HOST" -- "mktemp -d '$BUILDD_BASE_PATH/$USER.`date -u +%F.%R`.XXXXXXX'")/
|
||||
|
||||
dcmd rsync -vz --copy-links $file $BUILDD_USERNAME@$BUILDD_HOST:$path/
|
||||
file=$(basename $file)
|
||||
dcmd rsync -vz --copy-links "$file" "$BUILDD_USERNAME@$BUILDD_HOST:$path/"
|
||||
file=$(basename "$file")
|
||||
|
||||
# -A specifies to build arch-all packages (non-arch dependent) in addition to
|
||||
# binary packages
|
||||
@@ -38,9 +38,9 @@ file=$(basename $file)
|
||||
#
|
||||
# We always build for amd64. There is no 32-bit.
|
||||
|
||||
ssh -t -l $BUILDD_USERNAME $BUILDD_HOST -- "cd $path && sbuild -A -s --force-orig-source --dist=$dist --arch=amd64 $file"
|
||||
ssh -t -l "$BUILDD_USERNAME" "$BUILDD_HOST" -- "cd '$path' && sbuild -A -s --force-orig-source '--dist=$dist' --arch=amd64 '$file'"
|
||||
|
||||
rsync -Lvz --copy-links $BUILDD_USERNAME@$BUILDD_HOST:$path/* .
|
||||
ssh -l $BUILDD_USERNAME $BUILDD_HOST rm -r $path
|
||||
rsync -Lvz --copy-links "$BUILDD_USERNAME@$BUILDD_HOST:$path/*" .
|
||||
ssh -l "$BUILDD_USERNAME" "$BUILDD_HOST" rm -r "$path"
|
||||
|
||||
exit $ret
|
||||
exit "$ret"
|
||||
|
||||
@@ -11,8 +11,8 @@ prefix="zulip-server-$version"
|
||||
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
TMPDIR=/tmp/voyager-build
|
||||
rm -Rf $TMPDIR
|
||||
mkdir -p $TMPDIR
|
||||
rm -Rf "$TMPDIR"
|
||||
mkdir -p "$TMPDIR"
|
||||
else
|
||||
TMPDIR=$(mktemp -d)
|
||||
fi
|
||||
@@ -27,10 +27,10 @@ TMP_CHECKOUT=$TMPDIR/$prefix/
|
||||
TARBALL=$TMPDIR/$prefix.tar
|
||||
|
||||
# .gitattributes lists the files that are not exported
|
||||
git archive -o $TARBALL --prefix=$prefix/ HEAD
|
||||
git archive -o "$TARBALL" "--prefix=$prefix/" HEAD
|
||||
|
||||
|
||||
if tar -tf $TARBALL | grep -q -e zilencer -e zproject/local_settings.py -e puppet/zulip_internal; then
|
||||
if tar -tf "$TARBALL" | grep -q -e zilencer -e zproject/local_settings.py -e puppet/zulip_internal; then
|
||||
echo "Excluded files remain in tarball!";
|
||||
echo "Versions of git 1.8.1.1 - 1.8.1.6 have broken .gitattributes syntax";
|
||||
exit 1;
|
||||
@@ -39,8 +39,8 @@ else
|
||||
fi
|
||||
|
||||
# Check out a temporary full copy of the index to generate static files
|
||||
git checkout-index -f -a --prefix $TMP_CHECKOUT
|
||||
cd $TMP_CHECKOUT
|
||||
git checkout-index -f -a --prefix "$TMP_CHECKOUT"
|
||||
cd "$TMP_CHECKOUT"
|
||||
|
||||
# Use default settings so there is no chance of leaking secrets
|
||||
cp zproject/local_settings_template.py zproject/local_settings.py
|
||||
@@ -70,8 +70,8 @@ echo; echo "\033[33mRunning update-prod-static; check ${TMP_CHECKOUT}update-prod
|
||||
set -x
|
||||
|
||||
./tools/update-prod-static
|
||||
echo $GITID > build_id
|
||||
echo $version > version
|
||||
echo "$GITID" > build_id
|
||||
echo "$version" > version
|
||||
mv update-prod-static.log ..
|
||||
|
||||
rm -f zproject/dev-secrets.conf
|
||||
@@ -79,11 +79,11 @@ rm -f zproject/dev-secrets.conf
|
||||
# We don't need duplicate copies of emoji with hashed paths, and they would break bugdown
|
||||
find prod-static/serve/third/gemoji/images/emoji/ -regex '.*\.[0-9a-f]+\.png' -delete
|
||||
|
||||
cd $TMPDIR
|
||||
cd "$TMPDIR"
|
||||
|
||||
tar --append -f $TARBALL $prefix/prod-static $prefix/build_id $prefix/version
|
||||
tar --append -f "$TARBALL" "$prefix/prod-static" "$prefix/build_id" "$prefix/version"
|
||||
|
||||
rm -rf $prefix
|
||||
rm -rf "$prefix"
|
||||
|
||||
gzip $TARBALL
|
||||
gzip "$TARBALL"
|
||||
echo "Generated $TARBALL.gz"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# and also any branches in origin which are ancestors of
|
||||
# origin/master and are named like $USER-*.
|
||||
|
||||
push_args=''
|
||||
push_args=()
|
||||
|
||||
function is_merged {
|
||||
! git rev-list -n 1 origin/master.."$1" | grep -q .
|
||||
@@ -31,7 +31,7 @@ function clean_ref {
|
||||
echo -n "Deleting remote branch $remote_name"
|
||||
echo " (was $(git rev-parse --short "$ref"))"
|
||||
# NB: this won't handle spaces in ref names
|
||||
push_args="$push_args :$remote_name"
|
||||
push_args=("${push_args[@]}" ":$remote_name")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
@@ -44,8 +44,8 @@ fi
|
||||
|
||||
git fetch --prune origin
|
||||
|
||||
eval $(git for-each-ref --shell --format='clean_ref %(refname);')
|
||||
eval "$(git for-each-ref --shell --format='clean_ref %(refname);')"
|
||||
|
||||
if [ -n "$push_args" ]; then
|
||||
git push origin $push_args
|
||||
if [ "${#push_args}" -ne 0 ]; then
|
||||
git push origin "${push_args[@]}"
|
||||
fi
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
function error_out {
|
||||
echo -en '\e[0;31m'
|
||||
echo $1
|
||||
echo "$1"
|
||||
echo -en '\e[0m'
|
||||
exit 1
|
||||
}
|
||||
@@ -12,16 +12,16 @@ status=$(git status --porcelain | grep -v '^??')
|
||||
|
||||
old_ref=$(git rev-list --max-count=1 HEAD)
|
||||
branch=$1
|
||||
branch_ref=$(git rev-list --max-count=1 $branch)
|
||||
branch_ref=$(git rev-list --max-count=1 "$branch")
|
||||
|
||||
[ $? -ne 0 ] && error_out "Unknown branch: $branch"
|
||||
|
||||
if [ "$old_ref" == "$branch_ref" ]; then
|
||||
new_ref=master
|
||||
else
|
||||
ref_name=$(git describe --all --exact $old_ref)
|
||||
ref_name=$(git describe --all --exact "$old_ref")
|
||||
if [ $? -eq 0 ]; then
|
||||
new_ref=$(echo $ref_name | perl -pe 's{^(heads|remotes)/}{}')
|
||||
new_ref=$(echo "$ref_name" | perl -pe 's{^(heads|remotes)/}{}')
|
||||
else
|
||||
new_ref=$old_ref
|
||||
fi
|
||||
@@ -31,13 +31,13 @@ fi
|
||||
|
||||
git fetch -p
|
||||
|
||||
git rebase origin/master $branch
|
||||
git rebase origin/master "$branch"
|
||||
[ $? -ne 0 ] && error_out "Rebase onto origin/master failed"
|
||||
|
||||
git push . HEAD:master
|
||||
git push origin master
|
||||
[ $? -ne 0 ] && error_out "Push of master to origin/master failed"
|
||||
|
||||
git checkout $new_ref
|
||||
git branch -D $branch
|
||||
git push origin :$branch
|
||||
git checkout "$new_ref"
|
||||
git branch -D "$branch"
|
||||
git push origin ":$branch"
|
||||
|
||||
@@ -15,23 +15,23 @@ exit 1
|
||||
}
|
||||
|
||||
|
||||
if [[ $# < 1 || $# >2 ]]; then
|
||||
if [[ $# -lt 1 || $# -gt 2 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
function get_status {
|
||||
if (ssh zulip@$1.zulip.net '[ -d "'"$lockdir"'" ]'); then
|
||||
if (ssh "zulip@$1.zulip.net" '[ -d "'"$lockdir"'" ]'); then
|
||||
printf "%-10s %b" "$1" "is currently \e[31mlocked\e[0m\n"
|
||||
else
|
||||
printf "%-10s %b" "$1" "is currently \e[32munlocked\e[0m\n"
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ $1 == "lock" ]]; then
|
||||
if [[ "$1" == "lock" ]]; then
|
||||
verb="mkdir"
|
||||
elif [[ $1 == "unlock" ]]; then
|
||||
elif [[ "$1" == "unlock" ]]; then
|
||||
verb="rmdir"
|
||||
elif [[ $# == 1 && $1 == "status" ]]; then
|
||||
elif [[ $# == 1 && "$1" == "status" ]]; then
|
||||
get_status "staging"
|
||||
get_status "prod0"
|
||||
exit
|
||||
@@ -40,11 +40,11 @@ else
|
||||
fi
|
||||
|
||||
|
||||
if [[ $2 == "staging" ]]; then
|
||||
if [[ "$2" == "staging" ]]; then
|
||||
ssh zulip@staging.zulip.net "$verb $lockdir"
|
||||
get_status "staging"
|
||||
exit
|
||||
elif [[ $2 == "prod" ]]; then
|
||||
elif [[ "$2" == "prod" ]]; then
|
||||
ssh zulip@prod0.zulip.net "$verb $lockdir"
|
||||
get_status "prod0"
|
||||
exit
|
||||
|
||||
@@ -11,7 +11,7 @@ COUNT=50
|
||||
mkdir -p output
|
||||
while true; do
|
||||
|
||||
if python show-last-messages --api-key=$API_KEY --user=$BOT_EMAIL --streams="$STREAMS" --count=$COUNT; then
|
||||
if python show-last-messages --api-key="$API_KEY" --user="$BOT_EMAIL" --streams="$STREAMS" --count="$COUNT"; then
|
||||
echo "[`date`] Success";
|
||||
mv output-candidate.html output/zulip.html
|
||||
touch output/zulip.html
|
||||
|
||||
@@ -21,5 +21,5 @@ if [ $# = 2 ]; then
|
||||
fi
|
||||
|
||||
for dist in $DISTS; do
|
||||
reprepro --ignore=wrongdistribution include$type $dist "$1"
|
||||
reprepro --ignore=wrongdistribution "include$TYPE" "$dist" "$1"
|
||||
done
|
||||
|
||||
@@ -8,7 +8,7 @@ if [ -z "$hostname" ]; then
|
||||
echo "USAGE: $0 server type hostname [branch]"
|
||||
exit 1
|
||||
fi
|
||||
if ! $(echo "$hostname" | grep -q zulip); then
|
||||
if ! echo "$hostname" | grep -q zulip; then
|
||||
echo "USAGE: $0 server type hostname [branch]"
|
||||
echo "Hostname must have zulip in it."
|
||||
exit 1
|
||||
@@ -35,11 +35,11 @@ fi
|
||||
# Force RSA keys. We do this because the ECDSA key is not printed on syslog,
|
||||
# and our puppet configuration does not use ECDSA. If we don't do this,
|
||||
# we'll get key errors after puppet apply.
|
||||
SSH_OPTS="-o HostKeyAlgorithms=ssh-rsa"
|
||||
SSH_OPTS=(-o HostKeyAlgorithms=ssh-rsa)
|
||||
|
||||
set +e
|
||||
|
||||
ssh $SSH_OPTS "$server" -t -i "$amazon_key_file" -ladmin -o "ControlMaster=no" <<EOF
|
||||
ssh "${SSH_OPTS[@]}" "$server" -t -i "$amazon_key_file" -ladmin -o "ControlMaster=no" <<EOF
|
||||
sudo sed -i 's/PermitRootLogin no/PermitRootLogin yes/g' /etc/ssh/sshd_config
|
||||
sudo mkdir -p ~root/.ssh && sudo cp .ssh/authorized_keys ~root/.ssh/authorized_keys
|
||||
sudo service ssh restart
|
||||
@@ -48,7 +48,7 @@ EOF
|
||||
|
||||
set -e
|
||||
|
||||
ssh $SSH_OPTS "$server" -t -i "$amazon_key_file" -lroot <<EOF
|
||||
ssh "${SSH_OPTS[@]}" "$server" -t -i "$amazon_key_file" -lroot <<EOF
|
||||
resize2fs /dev/xvda1
|
||||
echo "$hostname" > /etc/hostname
|
||||
sed -i 's/localhost$/localhost $hostname/' /etc/hosts
|
||||
@@ -69,9 +69,9 @@ EOF
|
||||
|
||||
# Give new server git access
|
||||
# TODO: Don't give servers push access to our git!
|
||||
scp $SSH_OPTS -i "$amazon_key_file" "$server_private_key_file" root@"$server":/root/.ssh/id_rsa
|
||||
scp "${SSH_OPTS[@]}" -i "$amazon_key_file" "$server_private_key_file" root@"$server":/root/.ssh/id_rsa
|
||||
|
||||
ssh $SSH_OPTS "$server" -t -i "$amazon_key_file" -lroot <<EOF
|
||||
ssh "${SSH_OPTS[@]}" "$server" -t -i "$amazon_key_file" -lroot <<EOF
|
||||
chmod 600 /root/.ssh/id_rsa
|
||||
# Setup initial known_hosts including git server
|
||||
cat > /root/.ssh/known_hosts <<EOF2
|
||||
@@ -85,9 +85,7 @@ cd /root/zulip
|
||||
git checkout $branch
|
||||
EOF
|
||||
|
||||
ssh $SSH_OPTS "$server" -t -i "$amazon_key_file" -lroot <<EOF
|
||||
cp -a /root/zulip/puppet/zulip/files/puppet.conf /etc/puppet/
|
||||
|
||||
ssh "${SSH_OPTS[@]}" "$server" -t -i "$amazon_key_file" -lroot <<EOF
|
||||
userdel admin
|
||||
passwd -d root
|
||||
mkdir /etc/zulip
|
||||
@@ -107,8 +105,8 @@ fi
|
||||
EOF
|
||||
|
||||
# TODO: Don't give servers push access to our git!
|
||||
scp $SSH_OPTS -i "$amazon_key_file" "$server_private_key_file" zulip@"$server":/home/zulip/.ssh/id_rsa
|
||||
ssh $SSH_OPTS "$server" -t -i "$amazon_key_file" -lzulip <<EOF
|
||||
scp "${SSH_OPTS[@]}" -i "$amazon_key_file" "$server_private_key_file" zulip@"$server":/home/zulip/.ssh/id_rsa
|
||||
ssh "${SSH_OPTS[@]}" "$server" -t -i "$amazon_key_file" -lzulip <<EOF
|
||||
chmod 600 /home/zulip/.ssh/id_rsa
|
||||
EOF
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/sh -xe
|
||||
|
||||
# This is a really simple wrapper script, pretty much for documenting clarity
|
||||
`dirname $0`/../tools/generate-fixtures --force
|
||||
"`dirname "$0"`/../tools/generate-fixtures" --force
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh -ex
|
||||
|
||||
mkdir -p static/third/zxcvbn/
|
||||
wget https://raw.githubusercontent.com/dropbox/zxcvbn/0890678ede1488ecb02cda7eb793cd308a59c49d/zxcvbn.js -o static/third/zxcvbn/zxcvbn.js
|
||||
wget https://raw.githubusercontent.com/dropbox/zxcvbn/0890678ede1488ecb02cda7eb793cd308a59c49d/zxcvbn.js -O static/third/zxcvbn/zxcvbn.js
|
||||
|
||||
@@ -6,7 +6,7 @@ function migration_status {
|
||||
|
||||
template_grep_error_code=$(echo "SELECT 1 from pg_database WHERE datname='zulip_test_template';" | python manage.py dbshell --settings=zproject.test_settings | grep -q "1 row"; echo $?)
|
||||
|
||||
if [ $template_grep_error_code == "0" ]; then
|
||||
if [ "$template_grep_error_code" == "0" ]; then
|
||||
migration_status zerver/fixtures/available-migrations
|
||||
if [ -e zerver/fixtures/migration-status ] &&
|
||||
cmp -s zerver/fixtures/available-migrations zerver/fixtures/migration-status &&
|
||||
|
||||
@@ -11,7 +11,7 @@ VAGRANTUSERNAME=$(whoami)
|
||||
|
||||
if [[ $# == 0 ]]; then
|
||||
USERNAME=zulip
|
||||
PASSWORD=$($(dirname $0)/../bin/get-django-setting LOCAL_DATABASE_PASSWORD)
|
||||
PASSWORD=$("$(dirname "$0")/../bin/get-django-setting" LOCAL_DATABASE_PASSWORD)
|
||||
DBNAME=zulip
|
||||
SEARCH_PATH="$USERNAME",public
|
||||
elif [[ $# == 4 ]]; then
|
||||
@@ -28,7 +28,7 @@ fi
|
||||
|
||||
DBNAME_BASE=${DBNAME}_base
|
||||
|
||||
$ROOT_POSTGRES $DEFAULT_DB << EOF
|
||||
$ROOT_POSTGRES "$DEFAULT_DB" << EOF
|
||||
CREATE USER $USERNAME;
|
||||
ALTER USER $USERNAME PASSWORD '$PASSWORD';
|
||||
ALTER USER $USERNAME CREATEDB;
|
||||
@@ -42,28 +42,28 @@ EOF
|
||||
umask go-rw
|
||||
PGPASS_PREFIX="*:*:*:$USERNAME:"
|
||||
PGPASS_ESCAPED_PREFIX="*:\*:\*:$USERNAME:"
|
||||
if ! $(grep -q "$PGPASS_ESCAPED_PREFIX" ~/.pgpass); then
|
||||
echo $PGPASS_PREFIX$PASSWORD >> ~/.pgpass
|
||||
if ! grep -q "$PGPASS_ESCAPED_PREFIX" ~/.pgpass; then
|
||||
echo "$PGPASS_PREFIX$PASSWORD" >> ~/.pgpass
|
||||
else
|
||||
sed -i "s/$PGPASS_ESCAPED_PREFIX.*\$/$PGPASS_PREFIX$PASSWORD/" ~/.pgpass
|
||||
fi
|
||||
chmod go-rw ~/.pgpass
|
||||
|
||||
psql -h localhost postgres $USERNAME <<EOF
|
||||
psql -h localhost postgres "$USERNAME" <<EOF
|
||||
DROP DATABASE IF EXISTS $DBNAME;
|
||||
DROP DATABASE IF EXISTS $DBNAME_BASE;
|
||||
CREATE DATABASE $DBNAME_BASE
|
||||
EOF
|
||||
|
||||
psql -h localhost $DBNAME_BASE $USERNAME <<EOF
|
||||
psql -h localhost "$DBNAME_BASE" "$USERNAME" <<EOF
|
||||
CREATE SCHEMA zulip;
|
||||
EOF
|
||||
|
||||
$ROOT_POSTGRES $DBNAME_BASE << EOF
|
||||
$ROOT_POSTGRES "$DBNAME_BASE" << EOF
|
||||
CREATE EXTENSION tsearch_extras SCHEMA zulip;
|
||||
EOF
|
||||
|
||||
psql -h localhost postgres $USERNAME <<EOF
|
||||
psql -h localhost postgres "$USERNAME" <<EOF
|
||||
CREATE DATABASE $DBNAME TEMPLATE $DBNAME_BASE;
|
||||
EOF
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash -xe
|
||||
|
||||
$(dirname $0)/postgres-init-db zulip_test $($(dirname $0)/../bin/get-django-setting LOCAL_DATABASE_PASSWORD) zulip_test zulip,public
|
||||
"$(dirname "$0")/postgres-init-db" zulip_test "$("$(dirname "$0")/../bin/get-django-setting" LOCAL_DATABASE_PASSWORD)" zulip_test zulip,public
|
||||
|
||||
@@ -6,11 +6,11 @@ export NODE_PATH=/usr/lib/nodejs:static
|
||||
|
||||
INDEX_JS=zerver/tests/frontend/node/index.js
|
||||
NODEJS=$(which nodejs || which node)
|
||||
if [ f$1 = fcover ]; then
|
||||
if [ "$1" = "cover" ]; then
|
||||
# Run a coverage test with Istanbul.
|
||||
istanbul cover $INDEX_JS
|
||||
istanbul cover "$INDEX_JS"
|
||||
else
|
||||
# Normal testing, no coverage analysis.
|
||||
# Run the index.js test runner, which runs all the other tests.
|
||||
$NODEJS --stack-trace-limit=100 $INDEX_JS
|
||||
"$NODEJS" --stack-trace-limit=100 "$INDEX_JS"
|
||||
fi
|
||||
|
||||
@@ -93,8 +93,9 @@ class OurAuthenticationForm(AuthenticationForm):
|
||||
if user_profile.realm.deactivated:
|
||||
error_msg = u"""Sorry for the trouble, but %s has been deactivated.
|
||||
|
||||
Please contact support@zulip.com to reactivate this group.""" % (
|
||||
user_profile.realm.name,)
|
||||
Please contact %s to reactivate this group.""" % (
|
||||
user_profile.realm.name,
|
||||
settings.ZULIP_ADMINISTRATOR)
|
||||
raise ValidationError(mark_safe(error_msg))
|
||||
|
||||
return email
|
||||
|
||||
@@ -97,16 +97,16 @@ def fetch_tweet_data(tweet_id):
|
||||
from . import testing_mocks
|
||||
res = testing_mocks.twitter(tweet_id)
|
||||
else:
|
||||
if settings.TWITTER_CONSUMER_KEY == '' or \
|
||||
settings.TWITTER_CONSUMER_SECRET == '' or \
|
||||
settings.TWITTER_ACCESS_TOKEN_KEY == '' or \
|
||||
settings.TWITTER_ACCESS_TOKEN_SECRET == '':
|
||||
creds = {
|
||||
'consumer_key': settings.TWITTER_CONSUMER_KEY,
|
||||
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
|
||||
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
|
||||
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
|
||||
}
|
||||
if not all(creds.values()):
|
||||
return None
|
||||
|
||||
api = twitter.Api(consumer_key = settings.TWITTER_CONSUMER_KEY,
|
||||
consumer_secret = settings.TWITTER_CONSUMER_SECRET,
|
||||
access_token_key = settings.TWITTER_ACCESS_TOKEN_KEY,
|
||||
access_token_secret = settings.TWITTER_ACCESS_TOKEN_SECRET)
|
||||
api = twitter.Api(**creds)
|
||||
|
||||
try:
|
||||
# Sometimes Twitter hangs on responses. Timing out here
|
||||
|
||||
@@ -294,6 +294,10 @@ class BugdownTest(TestCase):
|
||||
self.assertEqual(converted, '<p>%s</p>\n%s' % (make_link('http://twitter.com/wdaher/status/287977969287315459'),
|
||||
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315459', media_tweet_html, """<div class="twitter-image"><a href="http://t.co/xo7pAhK6n3" target="_blank" title="http://t.co/xo7pAhK6n3"><img src="https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small"></a></div>""")))
|
||||
|
||||
def test_fetch_tweet_data_settings_validation(self):
|
||||
with self.settings(TEST_SUITE=False):
|
||||
self.assertIs(None, bugdown.fetch_tweet_data('287977969287315459'))
|
||||
|
||||
def test_realm_emoji(self):
|
||||
def emoji_img(name, url):
|
||||
return '<img alt="%s" class="emoji" src="%s" title="%s">' % (name, url, name)
|
||||
|
||||
@@ -760,7 +760,7 @@ class PagerDutyHookTests(AuthedTestCase):
|
||||
self.assertEqual(msg.subject, 'incident 3')
|
||||
self.assertEqual(
|
||||
msg.content,
|
||||
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) triggered by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
|
||||
':imp: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) triggered by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
|
||||
)
|
||||
|
||||
def test_unacknowledge(self):
|
||||
@@ -769,7 +769,7 @@ class PagerDutyHookTests(AuthedTestCase):
|
||||
self.assertEqual(msg.subject, 'incident 3')
|
||||
self.assertEqual(
|
||||
msg.content,
|
||||
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) unacknowledged by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
|
||||
':imp: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) unacknowledged by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
|
||||
)
|
||||
|
||||
def test_resolved(self):
|
||||
@@ -778,7 +778,7 @@ class PagerDutyHookTests(AuthedTestCase):
|
||||
self.assertEqual(msg.subject, 'incident 1')
|
||||
self.assertEqual(
|
||||
msg.content,
|
||||
':healthy_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) resolved by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
|
||||
':grinning: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) resolved by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
|
||||
)
|
||||
|
||||
def test_auto_resolved(self):
|
||||
@@ -787,7 +787,7 @@ class PagerDutyHookTests(AuthedTestCase):
|
||||
self.assertEqual(msg.subject, 'incident 2')
|
||||
self.assertEqual(
|
||||
msg.content,
|
||||
':healthy_heart: Incident [2](https://zulip-test.pagerduty.com/incidents/PX7K9J2) resolved\n\n>new'
|
||||
':grinning: Incident [2](https://zulip-test.pagerduty.com/incidents/PX7K9J2) resolved\n\n>new'
|
||||
)
|
||||
|
||||
def test_acknowledge(self):
|
||||
@@ -796,7 +796,7 @@ class PagerDutyHookTests(AuthedTestCase):
|
||||
self.assertEqual(msg.subject, 'incident 1')
|
||||
self.assertEqual(
|
||||
msg.content,
|
||||
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
|
||||
':no_good: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
|
||||
)
|
||||
|
||||
def test_no_subject(self):
|
||||
@@ -805,7 +805,7 @@ class PagerDutyHookTests(AuthedTestCase):
|
||||
self.assertEqual(msg.subject, 'incident 48219')
|
||||
self.assertEqual(
|
||||
msg.content,
|
||||
u':healthy_heart: Incident [48219](https://dropbox.pagerduty.com/incidents/PJKGZF9) resolved\n\n>mp_error_block_down_critical\u2119\u01b4'
|
||||
u':grinning: Incident [48219](https://dropbox.pagerduty.com/incidents/PJKGZF9) resolved\n\n>mp_error_block_down_critical\u2119\u01b4'
|
||||
)
|
||||
|
||||
def test_explicit_subject(self):
|
||||
@@ -814,7 +814,7 @@ class PagerDutyHookTests(AuthedTestCase):
|
||||
self.assertEqual(msg.subject, 'my cool topic')
|
||||
self.assertEqual(
|
||||
msg.content,
|
||||
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
|
||||
':no_good: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
|
||||
)
|
||||
|
||||
def test_bad_message(self):
|
||||
|
||||
@@ -244,7 +244,8 @@ def accounts_register(request):
|
||||
# The user is trying to register for a deactivated realm. Advise them to
|
||||
# contact support.
|
||||
return render_to_response("zerver/deactivated.html",
|
||||
{"deactivated_domain_name": realm.name})
|
||||
{"deactivated_domain_name": realm.name,
|
||||
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
|
||||
|
||||
try:
|
||||
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
|
||||
@@ -693,7 +694,7 @@ def finish_google_oauth2(request):
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
raise Exception('Could not convert google pauth2 code to access_token\r%r' % resp.text)
|
||||
access_token = resp.json['access_token']
|
||||
access_token = resp.json()['access_token']
|
||||
|
||||
resp = requests.get(
|
||||
'https://www.googleapis.com/plus/v1/people/me',
|
||||
@@ -701,7 +702,7 @@ def finish_google_oauth2(request):
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
raise Exception('Google login failed making API call\r%r' % resp.text)
|
||||
body = resp.json
|
||||
body = resp.json()
|
||||
|
||||
try:
|
||||
full_name = body['name']['formatted']
|
||||
|
||||
@@ -961,20 +961,20 @@ def send_raw_pagerduty_json(user_profile, stream, message, topic):
|
||||
|
||||
def send_formated_pagerduty(user_profile, stream, message_type, format_dict, topic):
|
||||
if message_type in ('incident.trigger', 'incident.unacknowledge'):
|
||||
template = (u':unhealthy_heart: Incident '
|
||||
template = (u':imp: Incident '
|
||||
u'[{incident_num}]({incident_url}) {action} by '
|
||||
u'[{service_name}]({service_url}) and assigned to '
|
||||
u'[{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}')
|
||||
|
||||
elif message_type == 'incident.resolve' and format_dict['resolved_by_url']:
|
||||
template = (u':healthy_heart: Incident '
|
||||
template = (u':grinning: Incident '
|
||||
u'[{incident_num}]({incident_url}) resolved by '
|
||||
u'[{resolved_by_username}@]({resolved_by_url})\n\n>{trigger_message}')
|
||||
elif message_type == 'incident.resolve' and not format_dict['resolved_by_url']:
|
||||
template = (u':healthy_heart: Incident '
|
||||
template = (u':grinning: Incident '
|
||||
u'[{incident_num}]({incident_url}) resolved\n\n>{trigger_message}')
|
||||
else:
|
||||
template = (u':average_heart: Incident [{incident_num}]({incident_url}) '
|
||||
template = (u':no_good: Incident [{incident_num}]({incident_url}) '
|
||||
u'{action} by [{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}')
|
||||
|
||||
subject = topic or u'incident {incident_num}'.format(**format_dict)
|
||||
|
||||
@@ -22,6 +22,8 @@ def password_auth_enabled(realm):
|
||||
for backend in django.contrib.auth.get_backends():
|
||||
if isinstance(backend, EmailAuthBackend):
|
||||
return True
|
||||
if isinstance(backend, ZulipLDAPAuthBackend):
|
||||
return True
|
||||
return False
|
||||
|
||||
def dev_auth_enabled():
|
||||
@@ -91,7 +93,6 @@ class GoogleMobileOauth2Backend(ZulipAuthMixin):
|
||||
https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app
|
||||
https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess
|
||||
|
||||
This backend is not currently supported on voyager.
|
||||
"""
|
||||
def authenticate(self, google_oauth2_token=None, return_data={}):
|
||||
try:
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
# Non-secret secret Django settings for the Zulip project
|
||||
# This file is the Zulip local_settings.py configuration for the
|
||||
# zulip.com installation of Zulip. It shouldn't be used in other
|
||||
# environments, but you may find it to be a a helpful reference when
|
||||
# setting up your own Zulip installation to see how Zulip can be
|
||||
# configured.
|
||||
#
|
||||
# On a normal Zulip production server, zproject/local_settings.py is a
|
||||
# symlink to /etc/zulip/settings.py (based off local_settings_template.py).
|
||||
import platform
|
||||
import ConfigParser
|
||||
from base64 import b64decode
|
||||
@@ -52,7 +59,9 @@ else:
|
||||
EXTERNAL_API_PATH = 'api.zulip.com'
|
||||
STATSD_PREFIX = 'app'
|
||||
|
||||
# Legacy zulip.com bucket used for old-style S3 uploads.
|
||||
S3_BUCKET="humbug-user-uploads"
|
||||
# Buckets used for Amazon S3 integration for storing files and user avatars.
|
||||
S3_AUTH_UPLOADS_BUCKET = "zulip-user-uploads"
|
||||
S3_AVATAR_BUCKET="humbug-user-avatars"
|
||||
|
||||
|
||||
@@ -18,25 +18,51 @@ ADMIN_DOMAIN = 'example.com'
|
||||
|
||||
# Enable at least one of the following authentication backends.
|
||||
AUTHENTICATION_BACKENDS = (
|
||||
# 'zproject.backends.EmailAuthBackend', # Email and password
|
||||
# 'zproject.backends.EmailAuthBackend', # Email and password; see SMTP setup below
|
||||
# 'zproject.backends.ZulipRemoteUserBackend', # Local SSO
|
||||
# 'zproject.backends.GoogleBackend', # Google Apps
|
||||
# 'zproject.backends.GoogleMobileOauth2Backend', # Google Apps, setup below
|
||||
# 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below
|
||||
)
|
||||
|
||||
# Google Oauth requires a bit of configuration; you will need to go to
|
||||
# do the following:
|
||||
#
|
||||
# (1) Visit https://console.developers.google.com, setup an
|
||||
# Oauth2 client ID that allows redirects to
|
||||
# e.g. https://zulip.example.com/accounts/login/google/done/.
|
||||
#
|
||||
# (2) Then click into the APIs and Auth section (in the sidebar on the
|
||||
# left side of the page), APIs, then under "Social APIs" click on
|
||||
# "Google+ API" and click the button to enable the API.
|
||||
#
|
||||
# (3) put your client secret as "google_oauth2_client_secret" in
|
||||
# zulip-secrets.conf, and your client ID right here:
|
||||
# GOOGLE_OAUTH2_CLIENT_ID=<your client ID from Google>
|
||||
|
||||
# If you are using the ZulipRemoteUserBackend authentication backend,
|
||||
# set this to your domain (e.g. if REMOTE_USER is "username" and the
|
||||
# corresponding email address is "username@example.com", set
|
||||
# SSO_APPEND_DOMAIN = "example.com")
|
||||
SSO_APPEND_DOMAIN = None
|
||||
|
||||
# Configure the outgoing SMTP server below. For outgoing email
|
||||
# via a GMail SMTP server, EMAIL_USE_TLS must be True and the
|
||||
# outgoing port must be 587. The EMAIL_HOST is prepopulated
|
||||
# for GMail servers, change it for other hosts, or leave it unset
|
||||
# or empty to skip sending email.
|
||||
# Configure the outgoing SMTP server below. The default configuration
|
||||
# is prepopulated for GMail servers. Change EMAIL_HOST for other
|
||||
# hosts, or leave it unset or empty to skip sending email. Note if
|
||||
# you are using a GMail account to send outgoing email, you will
|
||||
# likely need to configure that account as "less secure" here:
|
||||
# https://support.google.com/accounts/answer/6010255.
|
||||
#
|
||||
# With the exception of reading EMAIL_HOST_PASSWORD from the Zulip
|
||||
# secrets file, Zulip uses Django's standard EmailBackend, so if
|
||||
# you're having issues, you may want to search for documentation on
|
||||
# using your email provider with Django.
|
||||
#
|
||||
# A common problem you may encounter when trying to get this working
|
||||
# is that some hosting providers block outgoing SMTP traffic.
|
||||
EMAIL_HOST = 'smtp.gmail.com'
|
||||
EMAIL_HOST_USER = ''
|
||||
EMAIL_HOST_PASSWORD = ''
|
||||
# If you're using password auth, you will need to put the password in
|
||||
# /etc/zulip/zulip-secrets.conf as email_password.
|
||||
EMAIL_PORT = 587
|
||||
EMAIL_USE_TLS = True
|
||||
|
||||
@@ -82,10 +108,12 @@ ERROR_REPORTING = True
|
||||
INLINE_IMAGE_PREVIEW = True
|
||||
|
||||
# By default, files uploaded by users and user avatars are stored
|
||||
# directly on the Zulip server. If file storage in Amazon S3 (or
|
||||
# elsewhere, e.g. your corporate fileshare) is desired, please contact
|
||||
# Zulip Support (support@zulip.com) for further instructions on
|
||||
# setting up the appropriate integration.
|
||||
# directly on the Zulip server. If file storage in Amazon S3 is
|
||||
# desired, you can configure that by setting s3_key and s3_secret_key
|
||||
# in /etc/zulip/zulip-secrets.conf to be the S3 access and secret keys
|
||||
# that you want to use, and setting the S3_AUTH_UPLOADS_BUCKET and
|
||||
# S3_AVATAR_BUCKET to be the S3 buckets you've created to store file
|
||||
# uploads and user avatars, respectively.
|
||||
LOCAL_UPLOADS_DIR = "/home/zulip/uploads"
|
||||
|
||||
# Controls whether name changes are completely disabled for this installation
|
||||
@@ -111,11 +139,9 @@ ENABLE_GRAVATAR = True
|
||||
#
|
||||
# 1. Log in to http://dev.twitter.com.
|
||||
# 2. In the menu under your username, click My Applications. From this page, create a new application.
|
||||
# 3. Click on the application you created and click "create my access token". Fill in the requested values.
|
||||
TWITTER_CONSUMER_KEY = ''
|
||||
TWITTER_CONSUMER_SECRET = ''
|
||||
TWITTER_ACCESS_TOKEN_KEY = ''
|
||||
TWITTER_ACCESS_TOKEN_SECRET = ''
|
||||
# 3. Click on the application you created and click "create my access token".
|
||||
# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,
|
||||
# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.
|
||||
|
||||
### EMAIL GATEWAY INTEGRATION
|
||||
|
||||
@@ -166,8 +192,11 @@ EMAIL_GATEWAY_IMAP_PORT = 993
|
||||
EMAIL_GATEWAY_IMAP_FOLDER = "INBOX"
|
||||
|
||||
### LDAP integration configuration
|
||||
# Zulip supports retrieving information about users via LDAP, and optionally
|
||||
# using LDAP as an authentication mechanism.
|
||||
# Zulip supports retrieving information about users via LDAP, and
|
||||
# optionally using LDAP as an authentication mechanism. For using
|
||||
# LDAP authentication, you will need to enable the
|
||||
# zproject.backends.ZulipLDAPAuthBackend auth backend in
|
||||
# AUTHENTICATION_BACKENDS above.
|
||||
|
||||
import ldap
|
||||
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
|
||||
|
||||
@@ -86,8 +86,7 @@ else:
|
||||
AUTHENTICATION_BACKENDS = ('zproject.backends.DevAuthBackend',)
|
||||
# Add some of the below if you're testing other backends
|
||||
# AUTHENTICATION_BACKENDS = ('zproject.backends.EmailAuthBackend',
|
||||
# 'zproject.backends.GoogleMobileOauth2Backend',
|
||||
# 'zproject.backends.GoogleBackend')
|
||||
# 'zproject.backends.GoogleMobileOauth2Backend',)
|
||||
EXTERNAL_URI_SCHEME = "http://"
|
||||
EMAIL_GATEWAY_PATTERN = "%s@" + EXTERNAL_HOST
|
||||
ADMIN_DOMAIN = "zulip.com"
|
||||
|
||||
Reference in New Issue
Block a user