if directory:
llvm_config.with_environment('PATH', directory, append_path=True)
+# This test suite calls %{lit} to test lit's behavior for the sample test
+# suites in %{inputs}. This test suite's results are then determined in part
+# by %{lit}'s textual output, which includes the output of FileCheck calls
+# within %{inputs}'s test suites. Thus, %{lit} clears environment variables
+# that can affect FileCheck's output.
config.substitutions.append(('%{inputs}', os.path.join(
config.test_source_root, 'Inputs')))
-config.substitutions.append(('%{lit}', "%%{python} %s" % (
- os.path.join(lit_path, 'lit.py'),)))
+config.substitutions.append(('%{lit}',
+ "{env} %{{python}} {lit}".format(
+ env="env -u FILECHECK_OPTS -u FILECHECK_DUMP_INPUT_ON_FAILURE",
+ lit=os.path.join(lit_path, 'lit.py'))))
config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
# Enable coverage.py reporting, assuming the coverage module has been installed
# Check that -vv makes the line number of the failing RUN command clear.
# (-v is actually sufficient in the case of the internal shell.)
#
-# RUN: env -u FILECHECK_OPTS not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out
+# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.