Radix cross Linux

The main Radix cross Linux repository contains the build scripts of packages, which have the most complete and common functionality for desktop machines

452 Commits   2 Branches   1 Tag
Index: js/src/Makefile.in
===================================================================
--- js/src/Makefile.in	(nonexistent)
+++ js/src/Makefile.in	(revision 228)
@@ -0,0 +1,150 @@
+# -*- Mode: makefile -*-
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+ifeq (,$(MAKE_VERSION))
+$(error GNU Make is required)
+endif
+make_min_ver := 3.81
+ifneq ($(make_min_ver),$(firstword $(sort $(make_min_ver) $(MAKE_VERSION))))
+$(error GNU Make $(make_min_ver) or higher is required)
+endif
+
+DASH_R		= -r
+
+# Define keyword generator before rules.mk, see bug 323979 comment 50
+
+USE_HOST_CXX = 1
+
+include $(topsrcdir)/config/rules.mk
+
+ifdef MOZ_VALGRIND
+ifndef MOZ_ASAN
+JITTEST_VALGRIND_FLAG = --valgrind
+endif
+endif
+
+ifneq ($(LLVM_SYMBOLIZER),)
+# Use the LLVM symbolizer when running jit-tests under ASan and TSan, if available
+ifdef MOZ_ASAN
+JITTEST_SANITIZER_ENV=ASAN_SYMBOLIZER_PATH='$(LLVM_SYMBOLIZER)'
+endif
+ifdef MOZ_TSAN
+JITTEST_SANITIZER_ENV=TSAN_OPTIONS="external_symbolizer_path=$(LLVM_SYMBOLIZER) handle_segv=0 $$TSAN_OPTIONS"
+endif
+ifdef MOZ_MSAN
+JITTEST_SANITIZER_ENV=MSAN_SYMBOLIZER_PATH='$(LLVM_SYMBOLIZER)'
+endif
+endif
+
+check-js-msg::
+	(cd $(topsrcdir) && $(PYTHON3) $(topsrcdir)/config/check_js_msg_encoding.py);
+
+check-jit-test::
+	$(JITTEST_SANITIZER_ENV) $(wildcard $(RUN_TEST_PROGRAM)) $(PYTHON3) -u $(srcdir)/jit-test/jit_test.py \
+	        --no-slow --no-progress --format=automation --jitflags=all \
+			$(JITTEST_VALGRIND_FLAG) \
+			$(JITTEST_EXTRA_ARGS) \
+	        $(DIST)/bin/js$(BIN_SUFFIX) $(JITTEST_TEST_ARGS)
+
+check:: check-js-msg
+
+check-jstests:
+	$(wildcard $(RUN_TEST_PROGRAM)) $(PYTHON3) -u $(srcdir)/tests/jstests.py \
+		--no-progress --format=automation --timeout 600 \
+		$(JSTESTS_EXTRA_ARGS) \
+		$(DIST)/bin/js$(BIN_SUFFIX)
+
+# FIXME:
+# We want to run check-jstests as part of |make check| on all platforms, on
+# tinderbox. However, some configurations don't work quite right just yet.
+# Rather than risking regressions in major configurations while fixing these
+# secondary configuration, we work around them and fix the bugs later.
+#
+#   Bug 652154: On Windows, SM(!m !t) don't work because of path problems with
+#				their unusual directory layout
+#
+#   Bug 652155: On Mac, SM(d) doesn't work for unknown reasons
+
+ifneq ($(OS_ARCH),WINNT)
+ifndef HAVE_DTRACE
+#check:: check-jstests
+endif
+endif
+
+CFLAGS += $(MOZ_ZLIB_CFLAGS)
+
+# Silence warnings on AIX/HP-UX from non-GNU compilers
+ifndef GNU_CC
+ifeq ($(OS_ARCH),AIX)
+# Suppress warnings from xlC
+# 1540-1281: offsetof() on null non-POD types
+# 1540-1608: anonymous unions using static data members
+CFLAGS		+= -qsuppress=1540-1281 -qsuppress=1540-1608
+CXXFLAGS	+= -qsuppress=1540-1281 -qsuppress=1540-1608
+endif
+ifeq ($(OS_ARCH),HP-UX)
+# Suppress warnings from aCC
+# 3055: anonymous unions declaring types
+# 4189: offsetof() on non-POD types
+CFLAGS		+= +W3055,4189
+CXXFLAGS	+= +W3055,4189
+endif
+endif
+ifeq ($(OS_ARCH),SunOS)
+ifeq ($(TARGET_CPU),sparc)
+
+ifdef GNU_CC
+CFLAGS   += -mcpu=v9
+CXXFLAGS += -mcpu=v9
+endif # GNU_CC
+
+endif
+endif
+
+install::
+	$(MAKE) -C build install
+	$(MAKE) -C shell install
+
+ifdef HAVE_DTRACE
+javascript-trace.h: $(srcdir)/devtools/javascript-trace.d
+	dtrace -x nolibs -h -s $(srcdir)/devtools/javascript-trace.d -o javascript-trace.h.in
+	sed -e 's/if _DTRACE_VERSION/ifdef INCLUDE_MOZILLA_DTRACE/' \
+	    -e '/const/!s/char \*/const char */g' \
+	    javascript-trace.h.in > javascript-trace.h
+
+# We can't automatically generate dependencies on auto-generated headers;
+# we have to list them explicitly.
+$(addsuffix .$(OBJ_SUFFIX),Probes jsinterp jsobj): $(CURDIR)/javascript-trace.h
+
+ifneq ($(OS_ARCH),Darwin)
+DTRACE_PROBE_OBJ = js-dtrace.$(OBJ_SUFFIX)
+$(LIBRARY): $(DTRACE_PROBE_OBJ)
+$(DTRACE_PROBE_OBJ): $(srcdir)/devtools/javascript-trace.d $(OBJS)
+	dtrace -x nolibs -G -C -s $< -o $@ $(filter-out $<, $^)
+
+OBJS += $(DTRACE_PROBE_OBJ)
+endif # OS_ARCH != Darwin
+endif # HAVE_DTRACE
+
+###############################################
+# Generating source package tarballs
+# (only possible when tar is found)
+ifneq (,$(TAR))
+
+source-package:
+	SRCDIR=$(srcdir) \
+	DIST=$(DIST) \
+	MKDIR=$(MKDIR) \
+	TAR=$(TAR) \
+	M4=$(M4) \
+	AWK=$(AWK) \
+	MOZJS_MAJOR_VERSION=$(MOZJS_MAJOR_VERSION) \
+	MOZJS_MINOR_VERSION=$(MOZJS_MINOR_VERSION) \
+	MOZJS_PATCH_VERSION=$(MOZJS_PATCH_VERSION) \
+	MOZJS_ALPHA=$(MOZJS_ALPHA) \
+	$(srcdir)/make-source-package.sh
+
+endif
Index: js/src/jit-test/jit_test.py
===================================================================
--- js/src/jit-test/jit_test.py	(nonexistent)
+++ js/src/jit-test/jit_test.py	(revision 228)
@@ -0,0 +1,595 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import print_function, unicode_literals
+
+import math
+import os
+import platform
+import posixpath
+import shlex
+import subprocess
+import sys
+import traceback
+
+
+read_input = input
+if sys.version_info.major == 2:
+    read_input = raw_input
+
+
+def add_tests_dir_to_path():
+    from os.path import dirname, exists, join, realpath
+
+    js_src_dir = dirname(dirname(realpath(sys.argv[0])))
+    assert exists(join(js_src_dir, "jsapi.h"))
+    sys.path.insert(0, join(js_src_dir, "tests"))
+
+
+add_tests_dir_to_path()
+
+from lib import jittests
+from lib.tests import (
+    get_jitflags,
+    valid_jitflags,
+    get_cpu_count,
+    get_environment_overlay,
+    change_env,
+)
+from tempfile import TemporaryDirectory
+
+
+def which(name):
+    if name.find(os.path.sep) != -1:
+        return os.path.abspath(name)
+
+    for path in os.environ["PATH"].split(os.pathsep):
+        full = os.path.join(path, name)
+        if os.path.exists(full):
+            return os.path.abspath(full)
+
+    return name
+
+
+def choose_item(jobs, max_items, display):
+    job_count = len(jobs)
+
+    # Don't present a choice if there are too many tests
+    if job_count > max_items:
+        raise Exception("Too many jobs.")
+
+    for i, job in enumerate(jobs, 1):
+        print("{}) {}".format(i, display(job)))
+
+    item = read_input("Which one:\n")
+    try:
+        item = int(item)
+        if item > job_count or item < 1:
+            raise Exception("Input isn't between 1 and {}".format(job_count))
+    except ValueError:
+        raise Exception("Unrecognized input")
+
+    return jobs[item - 1]
+
+
+def main(argv):
+    # The [TESTS] optional arguments are paths of test files relative
+    # to the jit-test/tests directory.
+    import argparse
+
+    op = argparse.ArgumentParser(description="Run jit-test JS shell tests")
+    op.add_argument(
+        "-s",
+        "--show-cmd",
+        dest="show_cmd",
+        action="store_true",
+        help="show js shell command run",
+    )
+    op.add_argument(
+        "-f",
+        "--show-failed-cmd",
+        dest="show_failed",
+        action="store_true",
+        help="show command lines of failed tests",
+    )
+    op.add_argument(
+        "-o",
+        "--show-output",
+        dest="show_output",
+        action="store_true",
+        help="show output from js shell",
+    )
+    op.add_argument(
+        "-F",
+        "--failed-only",
+        dest="failed_only",
+        action="store_true",
+        help="if --show-output is given, only print output for" " failed tests",
+    )
+    op.add_argument(
+        "--no-show-failed",
+        dest="no_show_failed",
+        action="store_true",
+        help="don't print output for failed tests" " (no-op with --show-output)",
+    )
+    op.add_argument(
+        "-x",
+        "--exclude",
+        dest="exclude",
+        default=[],
+        action="append",
+        help="exclude given test dir or path",
+    )
+    op.add_argument(
+        "--exclude-from",
+        dest="exclude_from",
+        type=str,
+        help="exclude each test dir or path in FILE",
+    )
+    op.add_argument(
+        "--slow",
+        dest="run_slow",
+        action="store_true",
+        help="also run tests marked as slow",
+    )
+    op.add_argument(
+        "--no-slow",
+        dest="run_slow",
+        action="store_false",
+        help="do not run tests marked as slow (the default)",
+    )
+    op.add_argument(
+        "-t",
+        "--timeout",
+        dest="timeout",
+        type=float,
+        default=150.0,
+        help="set test timeout in seconds",
+    )
+    op.add_argument(
+        "--no-progress",
+        dest="hide_progress",
+        action="store_true",
+        help="hide progress bar",
+    )
+    op.add_argument(
+        "--tinderbox",
+        dest="format",
+        action="store_const",
+        const="automation",
+        help="Use automation-parseable output format",
+    )
+    op.add_argument(
+        "--format",
+        dest="format",
+        default="none",
+        choices=("automation", "none"),
+        help="Output format (default %(default)s).",
+    )
+    op.add_argument(
+        "--args",
+        dest="shell_args",
+        metavar="ARGS",
+        default="",
+        help="extra args to pass to the JS shell",
+    )
+    op.add_argument(
+        "--feature-args",
+        dest="feature_args",
+        metavar="ARGS",
+        default="",
+        help="even more args to pass to the JS shell "
+        "(for compatibility with jstests.py)",
+    )
+    op.add_argument(
+        "-w",
+        "--write-failures",
+        dest="write_failures",
+        metavar="FILE",
+        help="Write a list of failed tests to [FILE]",
+    )
+    op.add_argument(
+        "-C",
+        "--check-output",
+        action="store_true",
+        dest="check_output",
+        help="Run tests to check output for different jit-flags",
+    )
+    op.add_argument(
+        "-r",
+        "--read-tests",
+        dest="read_tests",
+        metavar="FILE",
+        help="Run test files listed in [FILE]",
+    )
+    op.add_argument(
+        "-R",
+        "--retest",
+        dest="retest",
+        metavar="FILE",
+        help="Retest using test list file [FILE]",
+    )
+    op.add_argument(
+        "-g",
+        "--debug",
+        action="store_const",
+        const="gdb",
+        dest="debugger",
+        help="Run a single test under the gdb debugger",
+    )
+    op.add_argument(
+        "-G",
+        "--debug-rr",
+        action="store_const",
+        const="rr",
+        dest="debugger",
+        help="Run a single test under the rr debugger",
+    )
+    op.add_argument(
+        "--debugger", type=str, help="Run a single test under the specified debugger"
+    )
+    op.add_argument(
+        "--valgrind",
+        dest="valgrind",
+        action="store_true",
+        help="Enable the |valgrind| flag, if valgrind is in $PATH.",
+    )
+    op.add_argument(
+        "--unusable-error-status",
+        action="store_true",
+        help="Ignore incorrect exit status on tests that should return nonzero.",
+    )
+    op.add_argument(
+        "--valgrind-all",
+        dest="valgrind_all",
+        action="store_true",
+        help="Run all tests with valgrind, if valgrind is in $PATH.",
+    )
+    op.add_argument(
+        "--avoid-stdio",
+        dest="avoid_stdio",
+        action="store_true",
+        help="Use js-shell file indirection instead of piping stdio.",
+    )
+    op.add_argument(
+        "--write-failure-output",
+        dest="write_failure_output",
+        action="store_true",
+        help="With --write-failures=FILE, additionally write the"
+        " output of failed tests to [FILE]",
+    )
+    op.add_argument(
+        "--jitflags",
+        dest="jitflags",
+        default="none",
+        choices=valid_jitflags(),
+        help="IonMonkey option combinations (default %(default)s).",
+    )
+    op.add_argument(
+        "--ion",
+        dest="jitflags",
+        action="store_const",
+        const="ion",
+        help="Run tests once with --ion-eager and once with"
+        " --baseline-eager (equivalent to --jitflags=ion)",
+    )
+    op.add_argument(
+        "--no-xdr",
+        dest="use_xdr",
+        action="store_false",
+        help="Whether to disable caching of self-hosted parsed content in XDR format.",
+    )
+    op.add_argument(
+        "--tbpl",
+        dest="jitflags",
+        action="store_const",
+        const="all",
+        help="Run tests with all IonMonkey option combinations"
+        " (equivalent to --jitflags=all)",
+    )
+    op.add_argument(
+        "-j",
+        "--worker-count",
+        dest="max_jobs",
+        type=int,
+        default=max(1, get_cpu_count()),
+        help="Number of tests to run in parallel (default %(default)s).",
+    )
+    op.add_argument(
+        "--remote", action="store_true", help="Run tests on a remote device"
+    )
+    op.add_argument(
+        "--deviceIP",
+        action="store",
+        type=str,
+        dest="device_ip",
+        help="IP address of remote device to test",
+    )
+    op.add_argument(
+        "--devicePort",
+        action="store",
+        type=int,
+        dest="device_port",
+        default=20701,
+        help="port of remote device to test",
+    )
+    op.add_argument(
+        "--deviceSerial",
+        action="store",
+        type=str,
+        dest="device_serial",
+        default=None,
+        help="ADB device serial number of remote device to test",
+    )
+    op.add_argument(
+        "--remoteTestRoot",
+        dest="remote_test_root",
+        action="store",
+        type=str,
+        default="/data/local/tmp/test_root",
+        help="The remote directory to use as test root" " (e.g.  %(default)s)",
+    )
+    op.add_argument(
+        "--localLib",
+        dest="local_lib",
+        action="store",
+        type=str,
+        help="The location of libraries to push -- preferably" " stripped",
+    )
+    op.add_argument(
+        "--repeat", type=int, default=1, help="Repeat tests the given number of times."
+    )
+    op.add_argument("--this-chunk", type=int, default=1, help="The test chunk to run.")
+    op.add_argument(
+        "--total-chunks", type=int, default=1, help="The total number of test chunks."
+    )
+    op.add_argument(
+        "--ignore-timeouts",
+        dest="ignore_timeouts",
+        metavar="FILE",
+        help="Ignore timeouts of tests listed in [FILE]",
+    )
+    op.add_argument(
+        "--retry-remote-timeouts",
+        dest="timeout_retry",
+        type=int,
+        default=1,
+        help="Number of time to retry timeout on remote devices",
+    )
+    op.add_argument(
+        "--test-reflect-stringify",
+        dest="test_reflect_stringify",
+        help="instead of running tests, use them to test the "
+        "Reflect.stringify code in specified file",
+    )
+    # --enable-webrender is ignored as it is not relevant for JIT
+    # tests, but is required for harness compatibility.
+    op.add_argument(
+        "--enable-webrender",
+        action="store_true",
+        dest="enable_webrender",
+        default=False,
+        help=argparse.SUPPRESS,
+    )
+    op.add_argument("js_shell", metavar="JS_SHELL", help="JS shell to run tests with")
+    op.add_argument(
+        "-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
+    )
+
+    options, test_args = op.parse_known_args(argv)
+    js_shell = which(options.js_shell)
+    test_environment = get_environment_overlay(js_shell, options.gc_zeal)
+
+    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
+        if (
+            platform.system() != "Windows"
+            or os.path.isfile(js_shell)
+            or not os.path.isfile(js_shell + ".exe")
+            or not os.access(js_shell + ".exe", os.X_OK)
+        ):
+            op.error("shell is not executable: " + js_shell)
+
+    if jittests.stdio_might_be_broken():
+        # Prefer erring on the side of caution and not using stdio if
+        # it might be broken on this platform.  The file-redirect
+        # fallback should work on any platform, so at worst by
+        # guessing wrong we might have slowed down the tests a bit.
+        #
+        # XXX technically we could check for broken stdio, but it
+        # really seems like overkill.
+        options.avoid_stdio = True
+
+    if options.retest:
+        options.read_tests = options.retest
+        options.write_failures = options.retest
+
+    test_list = []
+    read_all = True
+
+    if test_args:
+        read_all = False
+        for arg in test_args:
+            test_list += jittests.find_tests(arg)
+
+    if options.read_tests:
+        read_all = False
+        try:
+            f = open(options.read_tests)
+            for line in f:
+                test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n")))
+            f.close()
+        except IOError:
+            if options.retest:
+                read_all = True
+            else:
+                sys.stderr.write(
+                    "Exception thrown trying to read test file"
+                    " '{}'\n".format(options.read_tests)
+                )
+                traceback.print_exc()
+                sys.stderr.write("---\n")
+
+    if read_all:
+        test_list = jittests.find_tests()
+
+    if options.exclude_from:
+        with open(options.exclude_from) as fh:
+            for line in fh:
+                line_exclude = line.strip()
+                if not line_exclude.startswith("#") and len(line_exclude):
+                    options.exclude.append(line_exclude)
+
+    if options.exclude:
+        exclude_list = []
+        for exclude in options.exclude:
+            exclude_list += jittests.find_tests(exclude)
+        test_list = [test for test in test_list if test not in set(exclude_list)]
+
+    if not test_list:
+        print("No tests found matching command line arguments.", file=sys.stderr)
+        sys.exit(0)
+
+    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]
+
+    if not options.run_slow:
+        test_list = [_ for _ in test_list if not _.slow]
+
+    if options.test_reflect_stringify is not None:
+        for test in test_list:
+            test.test_reflect_stringify = options.test_reflect_stringify
+
+    # If chunking is enabled, determine which tests are part of this chunk.
+    # This code was adapted from testing/mochitest/runtestsremote.py.
+    if options.total_chunks > 1:
+        total_tests = len(test_list)
+        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
+        start = int(round((options.this_chunk - 1) * tests_per_chunk))
+        end = int(round(options.this_chunk * tests_per_chunk))
+        test_list = test_list[start:end]
+
+    if not test_list:
+        print(
+            "No tests found matching command line arguments after filtering.",
+            file=sys.stderr,
+        )
+        sys.exit(0)
+
+    # The full test list is ready. Now create copies for each JIT configuration.
+    test_flags = get_jitflags(options.jitflags)
+
+    test_list = [_ for test in test_list for _ in test.copy_variants(test_flags)]
+
+    job_list = (test for test in test_list)
+    job_count = len(test_list)
+
+    if options.repeat:
+
+        def repeat_copy(job_list_generator, repeat):
+            job_list = list(job_list_generator)
+            for i in range(repeat):
+                for test in job_list:
+                    if i == 0:
+                        yield test
+                    else:
+                        yield test.copy()
+
+        job_list = repeat_copy(job_list, options.repeat)
+        job_count *= options.repeat
+
+    if options.ignore_timeouts:
+        read_all = False
+        try:
+            with open(options.ignore_timeouts) as f:
+                ignore = set()
+                for line in f.readlines():
+                    path = line.strip("\n")
+                    ignore.add(path)
+                options.ignore_timeouts = ignore
+        except IOError:
+            sys.exit("Error reading file: " + options.ignore_timeouts)
+    else:
+        options.ignore_timeouts = set()
+
+    prefix = (
+        [js_shell] + shlex.split(options.shell_args) + shlex.split(options.feature_args)
+    )
+    prologue = os.path.join(jittests.LIB_DIR, "prologue.js")
+    if options.remote:
+        prologue = posixpath.join(options.remote_test_root, "lib", "prologue.js")
+
+    prefix += ["-f", prologue]
+
+    if options.debugger:
+        if job_count > 1:
+            print(
+                "Multiple tests match command line"
+                " arguments, debugger can only run one"
+            )
+            jobs = list(job_list)
+
+            def display_job(job):
+                flags = ""
+                if len(job.jitflags) != 0:
+                    flags = "({})".format(" ".join(job.jitflags))
+                return "{} {}".format(job.path, flags)
+
+            try:
+                tc = choose_item(jobs, max_items=50, display=display_job)
+            except Exception as e:
+                sys.exit(str(e))
+        else:
+            tc = next(job_list)
+
+        if options.debugger == "gdb":
+            debug_cmd = ["gdb", "--args"]
+        elif options.debugger == "lldb":
+            debug_cmd = ["lldb", "--"]
+        elif options.debugger == "rr":
+            debug_cmd = ["rr", "record"]
+        else:
+            debug_cmd = options.debugger.split()
+
+        with change_env(test_environment):
+            with TemporaryDirectory() as tempdir:
+                if options.debugger == "rr":
+                    subprocess.call(
+                        debug_cmd
+                        + tc.command(
+                            prefix, jittests.LIB_DIR, jittests.MODULE_DIR, tempdir
+                        )
+                    )
+                    os.execvp("rr", ["rr", "replay"])
+                else:
+                    os.execvp(
+                        debug_cmd[0],
+                        debug_cmd
+                        + tc.command(
+                            prefix, jittests.LIB_DIR, jittests.MODULE_DIR, tempdir
+                        ),
+                    )
+        sys.exit()
+
+    try:
+        ok = None
+        if options.remote:
+            ok = jittests.run_tests(job_list, job_count, prefix, options, remote=True)
+        else:
+            with change_env(test_environment):
+                ok = jittests.run_tests(job_list, job_count, prefix, options)
+        if not ok:
+            sys.exit(2)
+    except OSError:
+        if not os.path.exists(prefix[0]):
+            print(
+                "JS shell argument: file does not exist:" " '{}'".format(prefix[0]),
+                file=sys.stderr,
+            )
+            sys.exit(1)
+        else:
+            raise
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])

Property changes on: js/src/jit-test/jit_test.py
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: js/src/tests/jstests.py
===================================================================
--- js/src/tests/jstests.py	(nonexistent)
+++ js/src/tests/jstests.py	(revision 228)
@@ -0,0 +1,877 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+The JS Shell Test Harness.
+
+See the adjacent README.txt for more details.
+"""
+
+from __future__ import print_function
+
+import math
+import os
+import posixpath
+import re
+import shlex
+import sys
+import tempfile
+import platform
+
+from os.path import abspath, dirname, isfile, realpath
+from contextlib import contextmanager
+from copy import copy
+from datetime import datetime
+from itertools import chain
+from subprocess import list2cmdline, call
+
+from lib.tests import (
+    RefTestCase,
+    get_jitflags,
+    get_cpu_count,
+    get_environment_overlay,
+    change_env,
+)
+from lib.results import ResultsSink, TestOutput
+from lib.progressbar import ProgressBar
+from lib.adaptor import xdr_annotate
+from tempfile import TemporaryDirectory
+
+if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
+    from lib.tasks_unix import run_all_tests
+else:
+    from lib.tasks_win import run_all_tests
+
+here = dirname(abspath(__file__))
+
+
+@contextmanager
+def changedir(dirname):
+    pwd = os.getcwd()
+    os.chdir(dirname)
+    try:
+        yield
+    finally:
+        os.chdir(pwd)
+
+
+class PathOptions(object):
+    def __init__(self, location, requested_paths, excluded_paths):
+        self.requested_paths = requested_paths
+        self.excluded_files, self.excluded_dirs = PathOptions._split_files_and_dirs(
+            location, excluded_paths
+        )
+
+    @staticmethod
+    def _split_files_and_dirs(location, paths):
+        """Split up a set of paths into files and directories"""
+        files, dirs = set(), set()
+        for path in paths:
+            fullpath = os.path.join(location, path)
+            if path.endswith("/"):
+                dirs.add(path[:-1])
+            elif os.path.isdir(fullpath):
+                dirs.add(path)
+            elif os.path.exists(fullpath):
+                files.add(path)
+
+        return files, dirs
+
+    def should_run(self, filename):
+        # If any tests are requested by name, skip tests that do not match.
+        if self.requested_paths and not any(
+            req in filename for req in self.requested_paths
+        ):
+            return False
+
+        # Skip excluded tests.
+        if filename in self.excluded_files:
+            return False
+
+        for dir in self.excluded_dirs:
+            if filename.startswith(dir + "/"):
+                return False
+
+        return True
+
+
+def parse_args():
+    """
+    Parse command line arguments.
+    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
+        options :object: The raw OptionParser output.
+        js_shell :str: The absolute location of the shell to test with.
+        requested_paths :set<str>: Test paths specially requested on the CLI.
+        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
+    """
+    from argparse import ArgumentParser
+
+    op = ArgumentParser(
+        description="Run jstests JS shell tests",
+        epilog="Shell output format: [ pass | fail | timeout | skip ] progress | time",
+    )
+    op.add_argument(
+        "--xul-info",
+        dest="xul_info_src",
+        help="config data for xulRuntime" " (avoids search for config/autoconf.mk)",
+    )
+
+    harness_og = op.add_argument_group("Harness Controls", "Control how tests are run.")
+    harness_og.add_argument(
+        "-j",
+        "--worker-count",
+        type=int,
+        default=max(1, get_cpu_count()),
+        help="Number of tests to run in parallel" " (default %(default)s)",
+    )
+    harness_og.add_argument(
+        "-t",
+        "--timeout",
+        type=float,
+        default=150.0,
+        help="Set maximum time a test is allows to run" " (in seconds).",
+    )
+    harness_og.add_argument(
+        "--show-slow",
+        action="store_true",
+        help="Show tests taking longer than a minimum time" " (in seconds).",
+    )
+    harness_og.add_argument(
+        "--slow-test-threshold",
+        type=float,
+        default=5.0,
+        help="Time in seconds a test can take until it is"
+        "considered slow (default %(default)s).",
+    )
+    harness_og.add_argument(
+        "-a",
+        "--args",
+        dest="shell_args",
+        default="",
+        help="Extra args to pass to the JS shell.",
+    )
+    harness_og.add_argument(
+        "--feature-args",
+        dest="feature_args",
+        default="",
+        help="Extra args to pass to the JS shell even when feature-testing.",
+    )
+    harness_og.add_argument(
+        "--jitflags",
+        dest="jitflags",
+        default="none",
+        type=str,
+        help="IonMonkey option combinations. One of all,"
+        " debug, ion, and none (default %(default)s).",
+    )
+    harness_og.add_argument(
+        "--tbpl",
+        action="store_true",
+        help="Runs each test in all configurations tbpl" " tests.",
+    )
+    harness_og.add_argument(
+        "--tbpl-debug",
+        action="store_true",
+        help="Runs each test in some faster configurations" " tbpl tests.",
+    )
+    harness_og.add_argument(
+        "-g", "--debug", action="store_true", help="Run a test in debugger."
+    )
+    harness_og.add_argument(
+        "--debugger", default="gdb -q --args", help="Debugger command."
+    )
+    harness_og.add_argument(
+        "-J", "--jorendb", action="store_true", help="Run under JS debugger."
+    )
+    harness_og.add_argument(
+        "--passthrough",
+        action="store_true",
+        help="Run tests with stdin/stdout attached to" " caller.",
+    )
+    harness_og.add_argument(
+        "--test-reflect-stringify",
+        dest="test_reflect_stringify",
+        help="instead of running tests, use them to test the "
+        "Reflect.stringify code in specified file",
+    )
+    harness_og.add_argument(
+        "--valgrind", action="store_true", help="Run tests in valgrind."
+    )
+    harness_og.add_argument(
+        "--valgrind-args", default="", help="Extra args to pass to valgrind."
+    )
+    harness_og.add_argument(
+        "--rr",
+        action="store_true",
+        help="Run tests under RR record-and-replay debugger.",
+    )
+    harness_og.add_argument(
+        "-C",
+        "--check-output",
+        action="store_true",
+        help="Run tests to check output for different jit-flags",
+    )
+    harness_og.add_argument(
+        "--remote", action="store_true", help="Run tests on a remote device"
+    )
+    harness_og.add_argument(
+        "--deviceIP",
+        action="store",
+        type=str,
+        dest="device_ip",
+        help="IP address of remote device to test",
+    )
+    harness_og.add_argument(
+        "--devicePort",
+        action="store",
+        type=int,
+        dest="device_port",
+        default=20701,
+        help="port of remote device to test",
+    )
+    harness_og.add_argument(
+        "--deviceSerial",
+        action="store",
+        type=str,
+        dest="device_serial",
+        default=None,
+        help="ADB device serial number of remote device to test",
+    )
+    harness_og.add_argument(
+        "--remoteTestRoot",
+        dest="remote_test_root",
+        action="store",
+        type=str,
+        default="/data/local/tmp/test_root",
+        help="The remote directory to use as test root" " (e.g. %(default)s)",
+    )
+    harness_og.add_argument(
+        "--localLib",
+        dest="local_lib",
+        action="store",
+        type=str,
+        help="The location of libraries to push -- preferably" " stripped",
+    )
+    harness_og.add_argument(
+        "--no-xdr",
+        dest="use_xdr",
+        action="store_false",
+        help="Whether to disable caching of self-hosted parsed content in XDR format.",
+    )
+
+    input_og = op.add_argument_group("Inputs", "Change what tests are run.")
+    input_og.add_argument(
+        "-f",
+        "--file",
+        dest="test_file",
+        action="append",
+        help="Get tests from the given file.",
+    )
+    input_og.add_argument(
+        "-x",
+        "--exclude-file",
+        action="append",
+        help="Exclude tests from the given file.",
+    )
+    input_og.add_argument(
+        "--wpt",
+        dest="wpt",
+        choices=["enabled", "disabled", "if-running-everything"],
+        default="if-running-everything",
+        help="Enable or disable shell web-platform-tests "
+        "(default: enable if no test paths are specified).",
+    )
+    input_og.add_argument(
+        "--include",
+        action="append",
+        dest="requested_paths",
+        default=[],
+        help="Include the given test file or directory.",
+    )
+    input_og.add_argument(
+        "--exclude",
+        action="append",
+        dest="excluded_paths",
+        default=[],
+        help="Exclude the given test file or directory.",
+    )
+    input_og.add_argument(
+        "-d",
+        "--exclude-random",
+        dest="random",
+        action="store_false",
+        help='Exclude tests marked as "random."',
+    )
+    input_og.add_argument(
+        "--run-skipped", action="store_true", help='Run tests marked as "skip."'
+    )
+    input_og.add_argument(
+        "--run-only-skipped",
+        action="store_true",
+        help='Run only tests marked as "skip."',
+    )
+    input_og.add_argument(
+        "--run-slow-tests",
+        action="store_true",
+        help='Do not skip tests marked as "slow."',
+    )
+    input_og.add_argument(
+        "--no-extensions",
+        action="store_true",
+        help="Run only tests conforming to the ECMAScript 5" " standard.",
+    )
+    input_og.add_argument(
+        "--repeat", type=int, default=1, help="Repeat tests the given number of times."
+    )
+
+    output_og = op.add_argument_group("Output", "Modify the harness and tests output.")
+    output_og.add_argument(
+        "-s",
+        "--show-cmd",
+        action="store_true",
+        help="Show exact commandline used to run each test.",
+    )
+    output_og.add_argument(
+        "-o",
+        "--show-output",
+        action="store_true",
+        help="Print each test's output to the file given by" " --output-file.",
+    )
+    output_og.add_argument(
+        "-F",
+        "--failed-only",
+        action="store_true",
+        help="If a --show-* option is given, only print" " output for failed tests.",
+    )
+    output_og.add_argument(
+        "--no-show-failed",
+        action="store_true",
+        help="Don't print output for failed tests" " (no-op with --show-output).",
+    )
+    output_og.add_argument(
+        "-O",
+        "--output-file",
+        help="Write all output to the given file" " (default: stdout).",
+    )
+    output_og.add_argument(
+        "--failure-file", help="Write all not-passed tests to the given file."
+    )
+    output_og.add_argument(
+        "--no-progress",
+        dest="hide_progress",
+        action="store_true",
+        help="Do not show the progress bar.",
+    )
+    output_og.add_argument(
+        "--tinderbox",
+        dest="format",
+        action="store_const",
+        const="automation",
+        help="Use automation-parseable output format.",
+    )
+    output_og.add_argument(
+        "--format",
+        dest="format",
+        default="none",
+        choices=["automation", "none"],
+        help="Output format. Either automation or none" " (default %(default)s).",
+    )
+    output_og.add_argument(
+        "--log-wptreport",
+        dest="wptreport",
+        action="store",
+        help="Path to write a Web Platform Tests report (wptreport)",
+    )
+    output_og.add_argument(
+        "--this-chunk", type=int, default=1, help="The test chunk to run."
+    )
+    output_og.add_argument(
+        "--total-chunks", type=int, default=1, help="The total number of test chunks."
+    )
+
+    special_og = op.add_argument_group(
+        "Special", "Special modes that do not run tests."
+    )
+    special_og.add_argument(
+        "--make-manifests",
+        metavar="BASE_TEST_PATH",
+        help="Generate reftest manifest files.",
+    )
+
+    op.add_argument("--js-shell", metavar="JS_SHELL", help="JS shell to run tests with")
+    op.add_argument(
+        "-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
+    )
+
+    options, args = op.parse_known_args()
+
+    # Need a shell unless in a special mode.
+    if not options.make_manifests:
+        if not args:
+            op.error("missing JS_SHELL argument")
+        options.js_shell = os.path.abspath(args.pop(0))
+
+    requested_paths = set(args)
+
+    # Valgrind, gdb, and rr are mutually exclusive.
+    if sum(map(bool, (options.valgrind, options.debug, options.rr))) > 1:
+        op.error("--valgrind, --debug, and --rr are mutually exclusive.")
+
+    # Fill the debugger field, as needed.
+    if options.debug:
+        if options.debugger == "lldb":
+            debugger_prefix = ["lldb", "--"]
+        else:
+            debugger_prefix = options.debugger.split()
+    else:
+        debugger_prefix = []
+
+    if options.valgrind:
+        debugger_prefix = ["valgrind"] + options.valgrind_args.split()
+        if os.uname()[0] == "Darwin":
+            debugger_prefix.append("--dsymutil=yes")
+        options.show_output = True
+    if options.rr:
+        debugger_prefix = ["rr", "record"]
+
+    js_cmd_args = shlex.split(options.shell_args) + shlex.split(options.feature_args)
+    if options.jorendb:
+        options.passthrough = True
+        options.hide_progress = True
+        options.worker_count = 1
+        debugger_path = realpath(
+            os.path.join(
+                abspath(dirname(abspath(__file__))),
+                "..",
+                "..",
+                "examples",
+                "jorendb.js",
+            )
+        )
+        js_cmd_args.extend(["-d", "-f", debugger_path, "--"])
+    prefix = RefTestCase.build_js_cmd_prefix(
+        options.js_shell, js_cmd_args, debugger_prefix
+    )
+
+    # If files with lists of tests to run were specified, add them to the
+    # requested tests set.
+    if options.test_file:
+        for test_file in options.test_file:
+            requested_paths |= set(
+                [line.strip() for line in open(test_file).readlines()]
+            )
+
+    excluded_paths = set(options.excluded_paths)
+
+    # If files with lists of tests to exclude were specified, add them to the
+    # excluded tests set.
+    if options.exclude_file:
+        for filename in options.exclude_file:
+            with open(filename, "r") as fp:
+                for line in fp:
+                    if line.startswith("#"):
+                        continue
+                    line = line.strip()
+                    if not line:
+                        continue
+                    excluded_paths.add(line)
+
+    # Handle output redirection, if requested and relevant.
+    options.output_fp = sys.stdout
+    if options.output_file:
+        if not options.show_cmd:
+            options.show_output = True
+        try:
+            options.output_fp = open(options.output_file, "w")
+        except IOError as ex:
+            raise SystemExit("Failed to open output file: " + str(ex))
+
+    # Hide the progress bar if it will get in the way of other output.
+    options.hide_progress = (
+        options.format == "automation"
+        or not ProgressBar.conservative_isatty()
+        or options.hide_progress
+    )
+
+    return (options, prefix, requested_paths, excluded_paths)
+
+
+def load_wpt_tests(xul_tester, requested_paths, excluded_paths, update_manifest=True):
+    """Return a list of `RefTestCase` objects for the jsshell testharness.js
+    tests filtered by the given paths and debug-ness."""
+    repo_root = abspath(os.path.join(here, "..", "..", ".."))
+    wp = os.path.join(repo_root, "testing", "web-platform")
+    wpt = os.path.join(wp, "tests")
+
+    sys_paths = [
+        "python/mozterm",
+        "python/mozboot",
+        "testing/mozbase/mozcrash",
+        "testing/mozbase/mozdevice",
+        "testing/mozbase/mozfile",
+        "testing/mozbase/mozinfo",
+        "testing/mozbase/mozleak",
+        "testing/mozbase/mozlog",
+        "testing/mozbase/mozprocess",
+        "testing/mozbase/mozprofile",
+        "testing/mozbase/mozrunner",
+        "testing/mozbase/mozversion",
+        "testing/web-platform/",
+        "testing/web-platform/tests/tools",
+        "testing/web-platform/tests/tools/third_party/html5lib",
+        "testing/web-platform/tests/tools/third_party/webencodings",
+        "testing/web-platform/tests/tools/wptrunner",
+        "testing/web-platform/tests/tools/wptserve",
+        "third_party/python/requests",
+    ]
+    abs_sys_paths = [os.path.join(repo_root, path) for path in sys_paths]
+
+    failed = False
+    for path in abs_sys_paths:
+        if not os.path.isdir(path):
+            failed = True
+            print("Could not add '%s' to the path")
+    if failed:
+        return []
+
+    sys.path[0:0] = abs_sys_paths
+
+    import manifestupdate
+    from wptrunner import products, testloader, wptcommandline, wpttest, wptlogging
+
+    manifest_root = tempfile.gettempdir()
+    (maybe_dist, maybe_bin) = os.path.split(os.path.dirname(xul_tester.js_bin))
+    if maybe_bin == "bin":
+        (maybe_root, maybe_dist) = os.path.split(maybe_dist)
+        if maybe_dist == "dist":
+            if os.path.exists(os.path.join(maybe_root, "_tests")):
+                # Assume this is a gecko objdir.
+                manifest_root = maybe_root
+
+    logger = wptlogging.setup({}, {})
+
+    test_manifests = manifestupdate.run(
+        repo_root, manifest_root, logger, update=update_manifest
+    )
+
+    kwargs = vars(wptcommandline.create_parser().parse_args([]))
+    kwargs.update(
+        {
+            "config": os.path.join(
+                manifest_root, "_tests", "web-platform", "wptrunner.local.ini"
+            ),
+            "gecko_e10s": False,
+            "verify": False,
+            "wasm": xul_tester.test("wasmIsSupported()"),
+        }
+    )
+    wptcommandline.set_from_config(kwargs)
+
+    def filter_jsshell_tests(it):
+        for item_type, path, tests in it:
+            tests = set(item for item in tests if item.jsshell)
+            if tests:
+                yield item_type, path, tests
+
+    run_info_extras = products.Product(kwargs["config"], "firefox").run_info_extras(
+        **kwargs
+    )
+    run_info = wpttest.get_run_info(
+        kwargs["run_info"],
+        "firefox",
+        debug=xul_tester.test("isDebugBuild"),
+        extras=run_info_extras,
+    )
+    release_or_beta = xul_tester.test("getBuildConfiguration().release_or_beta")
+    run_info["release_or_beta"] = release_or_beta
+    run_info["nightly_build"] = not release_or_beta
+    early_beta_or_earlier = xul_tester.test(
+        "getBuildConfiguration().early_beta_or_earlier"
+    )
+    run_info["early_beta_or_earlier"] = early_beta_or_earlier
+
+    path_filter = testloader.TestFilter(
+        test_manifests, include=requested_paths, exclude=excluded_paths
+    )
+    loader = testloader.TestLoader(
+        test_manifests,
+        ["testharness"],
+        run_info,
+        manifest_filters=[path_filter, filter_jsshell_tests],
+    )
+
+    extra_helper_paths = [
+        os.path.join(here, "web-platform-test-shims.js"),
+        os.path.join(wpt, "resources", "testharness.js"),
+        os.path.join(here, "testharnessreport.js"),
+    ]
+
+    def resolve(test_path, script):
+        if script.startswith("/"):
+            return os.path.join(wpt, script[1:])
+
+        return os.path.join(wpt, os.path.dirname(test_path), script)
+
+    tests = []
+    for test in loader.tests["testharness"]:
+        test_path = os.path.relpath(test.path, wpt)
+        scripts = [resolve(test_path, s) for s in test.scripts]
+        extra_helper_paths_for_test = extra_helper_paths + scripts
+
+        # We must create at least one test with the default options, along with
+        # one test for each option given in a test-also annotation.
+        options = [None]
+        for m in test.itermeta():
+            if m.has_key("test-also"):  # NOQA: W601
+                options += m.get("test-also").split()
+        for option in options:
+            test_case = RefTestCase(
+                wpt,
+                test_path,
+                extra_helper_paths=extra_helper_paths_for_test[:],
+                wpt=test,
+            )
+            if option:
+                test_case.options.append(option)
+            tests.append(test_case)
+    return tests
+
+
+def load_tests(options, requested_paths, excluded_paths):
+    """
+    Returns a tuple: (test_count, test_gen)
+        test_count: [int] Number of tests that will be in test_gen
+        test_gen: [iterable<Test>] Tests found that should be run.
+    """
+    import lib.manifest as manifest
+
+    if options.js_shell is None:
+        xul_tester = manifest.NullXULInfoTester()
+    else:
+        if options.xul_info_src is None:
+            xul_info = manifest.XULInfo.create(options.js_shell)
+        else:
+            xul_abi, xul_os, xul_debug = options.xul_info_src.split(r":")
+            xul_debug = xul_debug.lower() == "true"
+            xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
+        feature_args = shlex.split(options.feature_args)
+        xul_tester = manifest.XULInfoTester(xul_info, options, feature_args)
+
+    test_dir = dirname(abspath(__file__))
+    path_options = PathOptions(test_dir, requested_paths, excluded_paths)
+    test_count = manifest.count_tests(test_dir, path_options)
+    test_gen = manifest.load_reftests(test_dir, path_options, xul_tester)
+
+    # WPT tests are already run in the browser in their own harness.
+    wpt_enabled = options.wpt == "enabled" or (
+        options.wpt == "if-running-everything"
+        and len(requested_paths) == 0
+        and not options.make_manifests
+    )
+    if wpt_enabled:
+        wpt_tests = load_wpt_tests(xul_tester, requested_paths, excluded_paths)
+        test_count += len(wpt_tests)
+        test_gen = chain(test_gen, wpt_tests)
+
+    if options.test_reflect_stringify is not None:
+
+        def trs_gen(tests):
+            for test in tests:
+                test.test_reflect_stringify = options.test_reflect_stringify
+                # Even if the test is not normally expected to pass, we still
+                # expect reflect-stringify to be able to handle it.
+                test.expect = True
+                test.random = False
+                test.slow = False
+                yield test
+
+        test_gen = trs_gen(test_gen)
+
+    if options.make_manifests:
+        manifest.make_manifests(options.make_manifests, test_gen)
+        sys.exit()
+
+    # Create a new test list. Apply each TBPL configuration to every test.
+    flags_list = None
+    if options.tbpl:
+        flags_list = get_jitflags("all")
+    elif options.tbpl_debug:
+        flags_list = get_jitflags("debug")
+    else:
+        flags_list = get_jitflags(options.jitflags, none=None)
+
+    if flags_list:
+
+        def flag_gen(tests):
+            for test in tests:
+                for jitflags in flags_list:
+                    tmp_test = copy(test)
+                    tmp_test.jitflags = copy(test.jitflags)
+                    tmp_test.jitflags.extend(jitflags)
+                    yield tmp_test
+
+        test_count = test_count * len(flags_list)
+        test_gen = flag_gen(test_gen)
+
+    if options.test_file:
+        paths = set()
+        for test_file in options.test_file:
+            paths |= set([line.strip() for line in open(test_file).readlines()])
+        test_gen = (_ for _ in test_gen if _.path in paths)
+
+    if options.no_extensions:
+        pattern = os.sep + "extensions" + os.sep
+        test_gen = (_ for _ in test_gen if pattern not in _.path)
+
+    if not options.random:
+        test_gen = (_ for _ in test_gen if not _.random)
+
+    if options.run_only_skipped:
+        options.run_skipped = True
+        test_gen = (_ for _ in test_gen if not _.enable)
+
+    if not options.run_slow_tests:
+        test_gen = (_ for _ in test_gen if not _.slow)
+
+    if options.repeat:
+        test_gen = (test for test in test_gen for i in range(options.repeat))
+        test_count *= options.repeat
+
+    return test_count, test_gen
+
+
+def main():
+    options, prefix, requested_paths, excluded_paths = parse_args()
+    if options.js_shell is not None and not (
+        isfile(options.js_shell) and os.access(options.js_shell, os.X_OK)
+    ):
+        if (
+            platform.system() != "Windows"
+            or isfile(options.js_shell)
+            or not isfile(options.js_shell + ".exe")
+            or not os.access(options.js_shell + ".exe", os.X_OK)
+        ):
+            print("Could not find executable shell: " + options.js_shell)
+            return 1
+
+    test_count, test_gen = load_tests(options, requested_paths, excluded_paths)
+    test_environment = get_environment_overlay(options.js_shell, options.gc_zeal)
+
+    if test_count == 0:
+        print("no tests selected")
+        return 1
+
+    test_dir = dirname(abspath(__file__))
+
+    if options.debug:
+        if test_count > 1:
+            print(
+                "Multiple tests match command line arguments,"
+                " debugger can only run one"
+            )
+            for tc in test_gen:
+                print("    {}".format(tc.path))
+            return 2
+
+        with changedir(test_dir), change_env(
+            test_environment
+        ), TemporaryDirectory() as tempdir:
+            cmd = next(test_gen).get_command(prefix, tempdir)
+            if options.show_cmd:
+                print(list2cmdline(cmd))
+            call(cmd)
+        return 0
+
+    # The test_gen generator is converted into a list in
+    # run_all_tests. Go ahead and do it here so we can apply
+    # chunking.
+    #
+    # If chunking is enabled, determine which tests are part of this chunk.
+    # This code was adapted from testing/mochitest/runtestsremote.py.
+    if options.total_chunks > 1:
+        tests_per_chunk = math.ceil(test_count / float(options.total_chunks))
+        start = int(round((options.this_chunk - 1) * tests_per_chunk))
+        end = int(round(options.this_chunk * tests_per_chunk))
+        test_gen = list(test_gen)[start:end]
+
+    if options.remote:
+        results = ResultsSink("jstests", options, test_count)
+        try:
+            from lib.remote import init_remote_dir, init_device
+
+            device = init_device(options)
+            tempdir = posixpath.join(options.remote_test_root, "tmp")
+            jtd_tests = posixpath.join(options.remote_test_root, "tests", "tests")
+            init_remote_dir(device, jtd_tests)
+            device.push(test_dir, jtd_tests, timeout=600)
+            device.chmod(jtd_tests, recursive=True)
+            prefix[0] = options.js_shell
+            if options.use_xdr:
+                test_gen = xdr_annotate(test_gen, options)
+            for test in test_gen:
+                out = run_test_remote(test, device, prefix, tempdir, options)
+                results.push(out)
+            results.finish(True)
+        except KeyboardInterrupt:
+            results.finish(False)
+
+        return 0 if results.all_passed() else 1
+
+    with changedir(test_dir), change_env(
+        test_environment
+    ), TemporaryDirectory() as tempdir:
+        results = ResultsSink("jstests", options, test_count)
+        try:
+            for out in run_all_tests(test_gen, prefix, tempdir, results.pb, options):
+                results.push(out)
+            results.finish(True)
+        except KeyboardInterrupt:
+            results.finish(False)
+
+        return 0 if results.all_passed() else 1
+
+    return 0
+
+
+def run_test_remote(test, device, prefix, tempdir, options):
+    from mozdevice import ADBDevice, ADBProcessError
+
+    cmd = test.get_command(prefix, tempdir)
+    test_root_parent = os.path.dirname(test.root)
+    jtd_tests = posixpath.join(options.remote_test_root, "tests")
+    cmd = [_.replace(test_root_parent, jtd_tests) for _ in cmd]
+
+    env = {"TZ": "PST8PDT", "LD_LIBRARY_PATH": os.path.dirname(prefix[0])}
+
+    adb_cmd = ADBDevice._escape_command_line(cmd)
+    start = datetime.now()
+    try:
+        # Allow ADBError or ADBTimeoutError to terminate the test run,
+        # but handle ADBProcessError in order to support the use of
+        # non-zero exit codes in the JavaScript shell tests.
+        out = device.shell_output(
+            adb_cmd, env=env, cwd=options.remote_test_root, timeout=int(options.timeout)
+        )
+        returncode = 0
+    except ADBProcessError as e:
+        # Treat ignorable intermittent adb communication errors as
+        # skipped tests.
+        out = str(e.adb_process.stdout)
+        returncode = e.adb_process.exitcode
+        re_ignore = re.compile(r"error: (closed|device .* not found)")
+        if returncode == 1 and re_ignore.search(out):
+            print("Skipping {} due to ignorable adb error {}".format(test.path, out))
+            test.skip_if_cond = "true"
+            returncode = test.SKIPPED_EXIT_STATUS
+
+    elapsed = (datetime.now() - start).total_seconds()
+
+    # We can't distinguish between stdout and stderr so we pass
+    # the same buffer to both.
+    return TestOutput(test, cmd, out, out, returncode, elapsed, False)
+
+
+if __name__ == "__main__":
+    sys.exit(main())

Property changes on: js/src/tests/jstests.py
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: js/src/tests/lib/tempfile.py
===================================================================
--- js/src/tests/lib/tempfile.py	(nonexistent)
+++ js/src/tests/lib/tempfile.py	(revision 228)
@@ -0,0 +1,5 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from __future__ import absolute_import
+from tempfile import TemporaryDirectory