mirror of
https://github.com/zulip/zulip.git
synced 2025-10-23 04:52:12 +00:00
See 625939 for more information. In short, the purpose of this delay is to give autoreload code enough time to touch every watched file at least once before the change is made.
97 lines
2.8 KiB
Python
Executable File
97 lines
2.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
|
|
import os
|
|
import sys
|
|
import time
|
|
import signal
|
|
import subprocess
|
|
import re
|
|
|
|
# check for the venv
|
|
from lib import sanity_check
|
|
sanity_check.check_venv(__file__)
|
|
|
|
from typing import IO, Text
|
|
|
|
# TODO: Convert this to use scripts/lib/queue_workers.py
|
|
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
successful_worker_launches = [
|
|
'[process_queue] 20 queue worker threads were launched',
|
|
]
|
|
|
|
def check_worker_launch(logfile_name):
|
|
# type: (str) -> bool
|
|
def check(content):
|
|
# type: (str) -> bool
|
|
flag = True
|
|
for entry in successful_worker_launches:
|
|
flag = flag and entry in content
|
|
return flag
|
|
|
|
failed = True
|
|
log_output = []
|
|
print("Polling logfile", end='')
|
|
logfile = open(logfile_name, 'rb', buffering=0)
|
|
# Attempt to poll the log file for 60 sec. to see if all worker threads are launched.
|
|
for i in range(200):
|
|
time.sleep(0.3)
|
|
sys.stdout.write('.')
|
|
sys.stdout.flush()
|
|
new_data = logfile.read().decode()
|
|
if new_data:
|
|
log_output.append(new_data)
|
|
|
|
if check(''.join(log_output)):
|
|
failed = False
|
|
break
|
|
sys.stdout.write('\n')
|
|
logfile.close()
|
|
|
|
if not failed:
|
|
print('Worker threads launched successfully')
|
|
else:
|
|
print('Error in server startup. Dumping logs')
|
|
print(''.join(log_output))
|
|
|
|
return failed
|
|
|
|
if __name__ == '__main__':
|
|
print('\nStarting Development Server')
|
|
logfile_name = '/tmp/run-dev-output'
|
|
logfile = open(logfile_name, 'wb', buffering=0)
|
|
args = ["{}/run-dev.py".format(TOOLS_DIR)]
|
|
run_dev = subprocess.Popen(args, stdout=logfile, stderr=subprocess.STDOUT)
|
|
|
|
failed = check_worker_launch(logfile_name)
|
|
if failed:
|
|
run_dev.send_signal(signal.SIGINT)
|
|
run_dev.wait()
|
|
logfile.close()
|
|
sys.exit(1)
|
|
|
|
# In dev. environment, queues are run through Django's autoreload code. The
|
|
# autoreload code of Django works by looping over the files associated with
|
|
# all the loaded modules. This loop is run after every 1 second. If the
|
|
# file is found for the first time by the loop, it is assumed that the
|
|
# file is new and is not modified between the time it is loaded and is
|
|
# checked by the loop. This assumption is the source of a race condition.
|
|
|
|
# We can either implement a more sensitive version of the loop or we can
|
|
# just allow enough time to the Django loop to touch every file at least
|
|
# once.
|
|
time.sleep(1.3)
|
|
# Removing all data from the server log file.
|
|
logfile.truncate(0)
|
|
logfile.seek(0)
|
|
|
|
print("Attempting to modify a file")
|
|
subprocess.call(['touch', 'zerver/lib/actions.py'])
|
|
failed = check_worker_launch(logfile_name)
|
|
|
|
run_dev.send_signal(signal.SIGINT)
|
|
run_dev.wait()
|
|
logfile.close()
|
|
if failed:
|
|
sys.exit(1)
|