Files
zulip/tools/test-backend
Tim Abbott d90f7c72a3 zephyr: Fix broken postgres regular expression logic and add tests.
Like many rare-case code with new tests, it turns out that the logic
for handling null characters in our Zephyr postgres query escaping
never worked, in multiple ways.  First, it always changed the second
character in s, not the current one being inspected, and second, the
value it replaced it with was no the correct postgres escape of the
null byte.  We fix this and add tests.

This completes the effort to get zerver/views/messages.py to 100%
test coverage.

Fixes #1006.
2017-03-01 10:38:48 -08:00

308 lines
12 KiB
Python
Executable File

#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import glob
import optparse
import os
import sys
import subprocess
# check for the venv
from lib import sanity_check
sanity_check.check_venv(__file__)
import django
from django.conf import settings
from django.test.utils import get_runner
target_fully_covered = {path for target in [
'analytics/tests/*.py',
'analytics/lib/*.py',
'zerver/context_processors.py',
'zerver/lib/alert_words.py',
'zerver/lib/attachments.py',
'zerver/lib/avatar_hash.py',
'zerver/lib/context_managers.py',
'zerver/lib/domains.py',
'zerver/lib/emoji.py',
'zerver/lib/i18n.py',
'zerver/lib/mention.py',
'zerver/lib/message.py',
'zerver/lib/name_restrictions.py',
'zerver/lib/realm_icon.py',
'zerver/lib/retention.py',
'zerver/lib/streams.py',
'zerver/lib/users.py',
'zerver/lib/webhooks/*.py',
'zerver/views/*.py',
# Once we have a nice negative tests system, we can add these:
# 'zerver/webhooks/*/*.py',
# 'zerver/webhooks/*/*/*.py',
'zproject/backends.py',
# Uncovered but in exclude list and we'd like to have included soon
'confirmation/models.py',
'zerver/decorator.py',
'zerver/lib/actions.py',
'zerver/lib/events.py',
'zerver/lib/bugdown/__init__.py',
'zerver/lib/events.py',
'zerver/lib/integrations.py',
'zerver/lib/message.py',
'zerver/lib/narrow.py',
'zerver/lib/notifications.py',
'zerver/lib/push_notifications.py',
'zerver/lib/request.py',
'zerver/lib/upload.py',
'zerver/lib/validator.py',
'zerver/models.py',
] for path in glob.glob(target)}
not_yet_fully_covered = {
# Goal is for analytics to have 100% coverage
'analytics/lib/counts.py',
'analytics/lib/fixtures.py',
'analytics/lib/time_utils.py',
# Major lib files should have 100% coverage
'confirmation/models.py',
'zerver/decorator.py',
'zerver/lib/actions.py',
'zerver/lib/events.py',
'zerver/lib/bugdown/__init__.py',
'zerver/lib/events.py',
'zerver/lib/i18n.py',
'zerver/lib/integrations.py',
'zerver/lib/message.py',
'zerver/lib/narrow.py',
'zerver/lib/notifications.py',
'zerver/lib/push_notifications.py',
'zerver/lib/request.py',
'zerver/lib/upload.py',
'zerver/lib/validator.py',
'zerver/models.py',
# Test files should have full coverage; it's a bug in the test if
# they don't!
'zerver/tests/test_auth_backends.py',
'zerver/tests/test_bugdown.py',
'zerver/tests/test_decorators.py',
'zerver/tests/test_email_mirror.py',
'zerver/tests/test_events.py',
'zerver/tests/test_narrow.py',
'zerver/tests/test_signup.py',
'zerver/tests/test_subs.py',
'zerver/tests/test_templates.py',
'zerver/tests/test_tornado.py',
'zerver/tests/test_urls.py',
'zerver/tests/tests.py',
# Getting views file coverage to 100% is a major project goal
'zerver/views/auth.py',
'zerver/views/home.py',
'zerver/views/registration.py',
'zerver/views/events_register.py',
# This one has 1 line that isn't covered in Python 3
'zerver/views/upload.py',
# Getting this to 100% is a major project goal.
'zproject/backends.py',
}
enforce_fully_covered = sorted(target_fully_covered - not_yet_fully_covered)
if __name__ == "__main__":
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.dirname(TOOLS_DIR))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from zerver.lib.test_fixtures import is_template_database_current
from tools.lib.test_script import (
get_provisioning_status,
)
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
os.environ['PYTHONUNBUFFERED'] = 'y'
usage = """%prog [options]
test-backend # Runs all backend tests
test-backend zerver.tests.test_bugdown # run all tests in a test module
test-backend zerver/tests/test_bugdown.py # run all tests in a test module
test-backend test_bugdown # run all tests in a test module
test-backend zerver.tests.test_bugdown.BugdownTest # run all tests in a test class
test-backend BugdownTest # run all tests in a test class
test-backend zerver.tests.test_bugdown.BugdownTest.test_inline_youtube # run a single test
test-backend BugdownTest.test_inline_youtube # run a single test"""
parser = optparse.OptionParser(usage)
parser.add_option('--nonfatal-errors', action="store_false", default=True,
dest="fatal_errors", help="Continue past test failures to run all tests")
parser.add_option('--coverage', dest='coverage',
action="store_true",
default=False, help='Compute test coverage.')
parser.add_option('--no-verbose-coverage', dest='verbose_coverage',
action="store_false",
default=True, help='Disable verbose print of coverage report.')
parser.add_option('--profile', dest='profile',
action="store_true",
default=False, help='Profile test runtime.')
parser.add_option('--force', dest='force',
action="store_true",
default=False, help='Run tests despite possible problems.')
parser.add_option('--no-shallow', dest='no_shallow',
action="store_true",
default=False,
help="Don't allow shallow testing of templates (deprecated)")
parser.add_option('--verbose', dest='verbose',
action="store_true",
default=False,
help="Show detailed output")
parser.add_option('--no-generate-fixtures', action="store_false", default=True,
dest="generate_fixtures",
help=("Reduce running time by not calling generate-fixtures. "
"This may cause spurious failures for some tests."))
parser.add_option('--report-slow-tests', dest='report_slow_tests',
action="store_true",
default=False,
help="Show which tests are slowest.")
(options, args) = parser.parse_args()
zerver_test_dir = 'zerver/tests/'
# to transform forward slashes '/' present in the argument into dots '.'
for suite in args:
args[args.index(suite)] = suite.rstrip('/').replace("/", ".")
def rewrite_arguments(search_key):
# type: (str) -> None
for root, dirs, files_names in os.walk(zerver_test_dir, topdown=False):
for file_name in files_names:
# Check for files starting with alphanumeric characters and ending with '.py'
# Ignore backup files if any
if not file_name[0].isalnum() or not file_name.endswith(".py"):
continue
filepath = os.path.join(root, file_name)
for line in open(filepath):
if search_key not in line:
continue
new_suite = filepath.replace(".py", ".") + suite
args[args.index(suite)] = new_suite
return
for suite in args:
if suite[0].isupper() and "test_" in suite:
classname = suite.rsplit('.', 1)[0]
rewrite_arguments(classname)
elif suite[0].isupper():
rewrite_arguments(suite)
for suite in args:
if suite.startswith('test'):
for root, dirs, files_names in os.walk(zerver_test_dir):
for file_name in files_names:
if file_name == suite or file_name == suite + ".py":
new_suite = os.path.join(root, file_name)
args[args.index(suite)] = new_suite
break
for suite in args:
args[args.index(suite)] = suite.replace(".py", "")
# to transform forward slashes '/' introduced by the zerver_test_dir into dots '.'
# taking care of any forward slashes that might be present
for suite in args:
args[args.index(suite)] = suite.replace("/", ".")
full_suite = len(args) == 0
if len(args) == 0:
suites = ["zerver.tests",
"zerver.webhooks",
"analytics.tests"]
else:
suites = args
if not options.force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
if options.coverage:
import coverage
cov = coverage.Coverage(omit=["*/zulip-venv-cache/*",
"*/migrations/*",
"*/management/commands/*"])
cov.start()
if options.profile:
import cProfile
prof = cProfile.Profile()
prof.enable()
# This is kind of hacky, but it's the most reliable way
# to make sure instrumentation decorators know the
# setting when they run.
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
# setup() needs to be called after coverage is started to get proper coverage reports of model
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
django.setup()
if options.generate_fixtures:
generate_fixtures_command = [os.path.join(TOOLS_DIR, 'setup', 'generate-fixtures')]
if not is_template_database_current():
generate_fixtures_command.append('--force')
subprocess.call(generate_fixtures_command)
TestRunner = get_runner(settings)
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2)
failures = test_runner.run_tests(suites, full_suite=full_suite)
templates_not_rendered = test_runner.get_shallow_tested_templates()
# We only check the templates if all the tests ran and passed
if not failures and full_suite and templates_not_rendered:
missed_count = len(templates_not_rendered)
print("\nError: %s templates have no tests!" % (missed_count,))
for template in templates_not_rendered:
print(' {}'.format(template))
print("See zerver/tests/test_templates.py for the exclude list.")
failures = True
if options.coverage:
cov.stop()
cov.save()
if options.verbose_coverage:
print("Printing coverage data")
cov.report(show_missing=False)
cov.html_report(directory='var/coverage')
print("HTML report saved to var/coverage")
if full_suite and not failures and options.coverage:
# Assert that various files have full coverage
for path in enforce_fully_covered:
missing_lines = cov.analysis2(path)[3]
if len(missing_lines) > 0:
print("ERROR: %s no longer has complete backend test coverage" % (path,))
print(" Lines missing coverage: %s" % (missing_lines,))
print()
failures = True
if failures:
print("It looks like your changes lost 100% test coverage in one or more files")
print("Usually, the right fix for this is to add some tests.")
print("But also check out the include/exclude lists in tools/test-backend.")
print("To run this check locally, use `test-backend --coverage`.")
if options.profile:
prof.disable()
prof.dump_stats("/tmp/profile.data")
print("Profile data saved to /tmp/profile.data")
print("You can visualize it using e.g. `runsnake /tmp/profile.data`")
if options.report_slow_tests:
from zerver.lib.test_runner import report_slow_tests
# We do this even with failures, since slowness can be
# an important clue as to why tests fail.
report_slow_tests()
# We'll have printed whether tests passed or failed above
sys.exit(bool(failures))