mirror of
				https://github.com/zulip/zulip.git
				synced 2025-10-31 03:53:50 +00:00 
			
		
		
		
	Previously, --interactive used to run tests from the start on a repeated run triggered when tests failed and we decide to trigger a re-run by pressing Enter key. Rerunning passed tests is of no interest. It also used to run all tests in a loop even if all pass. This commit fixes those both issues i.e it runs again from the test that failed on pressing Enter and exits if all tests pass.
		
			
				
	
	
		
			104 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			104 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
| #!/usr/bin/env python3
 | |
| import argparse
 | |
| import os
 | |
| import shlex
 | |
| import subprocess
 | |
| import sys
 | |
| 
 | |
| ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 | |
| 
 | |
| # Request the special webpack setup for frontend integration tests,
 | |
| # where webpack assets are compiled up front rather than running in
 | |
| # watch mode. We prefer the same for puppeteer, so the below line
 | |
| # makes the webpack similar to casper.
 | |
| #
 | |
| # TODO: Eventually, we'll want to rename this parameter.
 | |
| os.environ["CASPER_TESTS"] = "1"
 | |
| 
 | |
| os.environ["CHROMIUM_EXECUTABLE"] = os.path.join(ZULIP_PATH, "node_modules/.bin/chromium")
 | |
| os.environ.pop("http_proxy", "")
 | |
| os.environ.pop("https_proxy", "")
 | |
| 
 | |
| usage = """test-js-with-puppeteer [options]
 | |
|     test-js-with-puppeteer # Run all test files
 | |
|     test-js-with-puppeteer 09-navigation.js # Run a single test file
 | |
|     test-js-with-puppeteer 09 # Run a single test file 09-navigation.js
 | |
|     test-js-with-puppeteer 01-login.js 03-narrow.js # Run a few test files
 | |
|     test-js-with-puppeteer 01 03 # Run a few test files, 01-login.js and 03-narrow.js here"""
 | |
| parser = argparse.ArgumentParser(usage)
 | |
| 
 | |
| parser.add_argument('--interactive', dest='interactive',
 | |
|                     action="store_true",
 | |
|                     default=False, help='Run tests interactively')
 | |
| parser.add_argument('--force', dest='force',
 | |
|                     action="store_true",
 | |
|                     default=False, help='Run tests despite possible problems.')
 | |
| parser.add_argument('tests', nargs=argparse.REMAINDER,
 | |
|                     help='Specific tests to run; by default, runs all tests')
 | |
| 
 | |
| options = parser.parse_args()
 | |
| 
 | |
| sys.path.insert(0, ZULIP_PATH)
 | |
| 
 | |
| # check for the venv
 | |
| from tools.lib import sanity_check
 | |
| 
 | |
| sanity_check.check_venv(__file__)
 | |
| 
 | |
| from typing import Iterable, Tuple
 | |
| 
 | |
| from tools.lib.test_script import (
 | |
|     assert_provisioning_status_ok,
 | |
|     find_js_test_files,
 | |
|     prepare_puppeteer_run,
 | |
| )
 | |
| from tools.lib.test_server import test_server_running
 | |
| 
 | |
| 
 | |
| def run_tests(files: Iterable[str], external_host: str) -> None:
 | |
|     test_dir = os.path.join(ZULIP_PATH, 'frontend_tests/puppeteer_tests')
 | |
|     test_files = find_js_test_files(test_dir, files)
 | |
| 
 | |
|     def run_tests(test_number: int=0) -> Tuple[int, int]:
 | |
|         ret = 1
 | |
|         current_test_num = test_number
 | |
|         for test_file in test_files[test_number:]:
 | |
|             test_name = os.path.basename(test_file)
 | |
|             cmd = ["node"] + [test_file]
 | |
|             print("\n\n===================== {}\nRunning {}\n\n".format(test_name, " ".join(map(shlex.quote, cmd))), flush=True)
 | |
|             ret = subprocess.call(cmd)
 | |
|             if ret != 0:
 | |
|                 return ret, current_test_num
 | |
|             current_test_num += 1
 | |
|         return 0, -1
 | |
| 
 | |
|     with test_server_running(False, external_host):
 | |
|         # Important: do this next call inside the `with` block, when Django
 | |
|         #            will be pointing at the test database.
 | |
|         subprocess.check_call('tools/setup/generate-test-credentials')
 | |
|         if options.interactive:
 | |
|             response = input('Press Enter to run tests, "q" to quit: ')
 | |
|             ret = 1
 | |
|             failed_test_num = 0
 | |
|             while response != 'q' and failed_test_num != -1:
 | |
|                 ret, failed_test_num = run_tests(failed_test_num)
 | |
|                 if ret != 0:
 | |
|                     response = input('Tests failed. Press Enter to re-run tests, "q" to quit: ')
 | |
|         else:
 | |
|             ret = 1
 | |
|             ret = run_tests()[0]
 | |
|     if ret != 0:
 | |
|         print("""
 | |
| The Puppeteer frontend tests failed! Please report and ask for help in chat.zulip.org""", file=sys.stderr)
 | |
|         if os.environ.get("CIRCLECI"):
 | |
|             print("", file=sys.stderr)
 | |
|             print("In CircleCI, the Artifacts tab contains screenshots of the failure.", file=sys.stderr)
 | |
|             print("", file=sys.stderr)
 | |
|         sys.exit(ret)
 | |
| 
 | |
| external_host = "zulipdev.com:9981"
 | |
| assert_provisioning_status_ok(options.force)
 | |
| prepare_puppeteer_run()
 | |
| run_tests(options.tests, external_host)
 | |
| sys.exit(0)
 |