+2005-09-30 hugo303
+
+ * src/check.c, src/check.h.in, src/check_impl.h, src/check_print.c,
+ src/check_run.c, src/check_str.c, tests/Makefile.am,
+ tests/check_check_fixture.c, tests/check_check_master.c,
+ tests/ex_xml_output.c, tests/test_log_output.sh, tests/test_output.sh,
+ tests/test_xml_output.sh:
+
+ Added a new kind of test, looping tests, which are called with a new
+ context for each loop iteration. This makes them ideal for table based
+ tests. Previously, with the loop in the test itself, only the first
+ error was caught and then the test would exit. Now all errors are
+ shown at once which should help in debugging.
+
2005-09-15 hugo303
* configure.in, tests/check_check_sub.c, tests/check_check.h,
+Added a new kind of test, looping tests, which are called with a new
+context for each loop iteration. This makes them ideal for table based
+tests. Previously, with the loop in the test itself, only the first
+error was caught and then the test would exit. Now all errors are
+shown at once which should help in debugging
+
+Added possibility to turn off timeout tests in check's own unit tests
+through configure option --enable-timeout-tests=no.
+
+Added coverage analysis for check's own unit tests.
+
+
Thu, Aug 25, 2005: Released Check 0.9.3
Applied debian patches from debian maintainer.
+
Fixed documentation bug #1216502.
+
gcc 2.95.3 compatibility fixed (patch #1161654, bug #1211672).
-Messaging refactored to make it work with forking tests, and also with threading tests on linux 2.4. Added check_fork and check_waitpid_and_exit to be used for forking tests. (bug # 1233585)
+
+Messaging refactored to make it work with forking tests, and also with
+threading tests on linux 2.4. Added check_fork and check_waitpid_and_exit
+to be used for forking tests. (bug # 1233585)
+
Timeout error message changed (feature request #1121452, bug #1160305).
-fix check.spec for fc3 x86_64 (patch #1111782)
+
+Fix check.spec for fc3 x86_64 (patch #1111782)
Fri, Nov 12, 2004: Released Check 0.9.2
list_add_end (s->tclst, tc);
}
-void _tcase_add_test (TCase *tc, TFun fn, const char *name, int signal)
+void _tcase_add_test (TCase *tc, TFun fn, const char *name, int signal, int start, int end)
{
TF * tf;
if (tc == NULL || fn == NULL || name == NULL)
return;
tf = emalloc (sizeof(TF)); /* freed in tcase_free */
tf->fn = fn;
+ tf->loop_start = start;
+ tf->loop_end = end;
tf->signal = signal; /* 0 means no signal expected */
tf->name = name;
list_add_end (tc->tflst, tf);
typedef struct TCase TCase;
/* type for a test function */
-typedef void (*TFun) (void);
+typedef void (*TFun) (int);
/* type for a setup/teardown function */
typedef void (*SFun) (void);
/* Add a test function with signal handling to a test case (macro version) */
#define tcase_add_test_raise_signal(tc,tf,signal) \
- _tcase_add_test((tc),(tf),"" # tf "",(signal))
+ _tcase_add_test((tc),(tf),"" # tf "",(signal), 0, 1)
+/* Add a looping test function to a test case (macro version)
+
+ The test will be called in a for(i = s; i < e; i++) loop with each
+ iteration being executed in a new context. The loop variable 'i' is
+ available in the test.
+ */
+#define tcase_add_loop_test(tc,tf,s,e) \
+ _tcase_add_test((tc),(tf),"" # tf "",0,(s),(e))
+
/* Add a test function to a test case
(function version -- use this when the macro won't work
*/
-void _tcase_add_test (TCase *tc, TFun tf, const char *fname, int signal);
+void _tcase_add_test (TCase *tc, TFun tf, const char *fname, int signal, int start, int end);
/* Add unchecked fixture setup/teardown functions to a test case
One must use braces within a START_/END_ pair to declare new variables
*/
#define START_TEST(__testname)\
-static void __testname (void)\
+static void __testname (int i)\
{\
tcase_fn_start (""# __testname, __FILE__, __LINE__);
typedef struct TF {
TFun fn;
+ int loop_start;
+ int loop_end;
const char *name;
int signal;
} TF;
enum ck_result_ctx ctx; /* When the result occurred */
char *file; /* File where the test occured */
int line; /* Line number where the test occurred */
+ int iter; /* The iteration value for looping tests */
const char *tcname; /* Test case that generated the result */
const char *tname; /* Test that generated the result */
char *msg; /* Failure message */
fprintf(file, " <path>%s</path>\n", path_name);
fprintf(file, " <fn>%s:%d</fn>\n", file_name, tr->line);
fprintf(file, " <id>%s</id>\n", tr->tname);
+ fprintf(file, " <iteration>%d</iteration>\n", tr->iter);
fprintf(file, " <description>%s</description>\n", tr->tcname);
fprintf(file, " <message>%s</message>\n", tr->msg);
fprintf(file, " </test>\n");
static void tcase_run_checked_teardown (TCase *tc);
static void srunner_iterate_tcase_tfuns (SRunner *sr, TCase *tc);
static void srunner_add_failure (SRunner *sr, TestResult *tf);
-static TestResult *tcase_run_tfun_fork (SRunner *sr, TCase *tc, TF *tf);
-static TestResult *tcase_run_tfun_nofork (SRunner *sr, TCase *tc, TF *tf);
+static TestResult *tcase_run_tfun_fork (SRunner *sr, TCase *tc, TF *tf, int i);
+static TestResult *tcase_run_tfun_nofork (SRunner *sr, TCase *tc, TF *tf, int i);
static TestResult *receive_result_info_fork (const char *tcname,
const char *tname,
+ int iter,
int status, int expected_signal);
static TestResult *receive_result_info_nofork (const char *tcname,
- const char *tname);
+ const char *tname,
+ int iter);
static void set_fork_info (TestResult *tr, int status, int expected_signal);
static void set_nofork_info (TestResult *tr);
static char *signal_msg (int sig);
tfl = tc->tflst;
for (list_front(tfl); !list_at_end (tfl); list_advance (tfl)) {
+ int i;
tfun = list_val (tfl);
- switch (srunner_fork_status(sr)) {
- case CK_FORK:
- tr = tcase_run_tfun_fork (sr, tc, tfun);
- break;
- case CK_NOFORK:
- tr = tcase_run_tfun_nofork (sr, tc, tfun);
- break;
- default:
- eprintf("Bad fork status in SRunner", __FILE__, __LINE__);
+
+ for (i = tfun->loop_start; i < tfun->loop_end; i++)
+ {
+ switch (srunner_fork_status(sr)) {
+ case CK_FORK:
+ tr = tcase_run_tfun_fork (sr, tc, tfun, i);
+ break;
+ case CK_NOFORK:
+ tr = tcase_run_tfun_nofork (sr, tc, tfun, i);
+ break;
+ default:
+ eprintf("Bad fork status in SRunner", __FILE__, __LINE__);
+ }
+ srunner_add_failure (sr, tr);
+ log_test_end(sr, tr);
}
- srunner_add_failure (sr, tr);
- log_test_end(sr, tr);
}
}
f = list_val(l);
f->fun();
- tr = receive_result_info_nofork (tc->name, "unchecked_setup");
+ tr = receive_result_info_nofork (tc->name, "unchecked_setup", 0);
if (tr->rtype != CK_PASS) {
srunner_add_failure(sr, tr);
/* Stop the setup and return the failure if nofork mode. */
if (fstat == CK_NOFORK) {
- tr = receive_result_info_nofork (tc->name, "checked_setup");
+ tr = receive_result_info_nofork (tc->name, "checked_setup", 0);
if (tr->rtype != CK_PASS) {
break;
}
static TestResult *receive_result_info_fork (const char *tcname,
const char *tname,
+ int iter,
int status, int expected_signal)
{
TestResult *tr;
eprintf("Failed to receive test result", __FILE__, __LINE__);
tr->tcname = tcname;
tr->tname = tname;
+ tr->iter = iter;
set_fork_info(tr, status, expected_signal);
return tr;
}
static TestResult *receive_result_info_nofork (const char *tcname,
- const char *tname)
+ const char *tname,
+ int iter)
{
TestResult *tr;
eprintf("Failed to receive test result", __FILE__, __LINE__);
tr->tcname = tcname;
tr->tname = tname;
+ tr->iter = iter;
set_nofork_info(tr);
return tr;
}
}
-static TestResult *tcase_run_tfun_nofork (SRunner *sr, TCase *tc, TF *tfun)
+static TestResult *tcase_run_tfun_nofork (SRunner *sr, TCase *tc, TF *tfun, int i)
{
TestResult *tr;
tr = tcase_run_checked_setup(sr, tc);
if (tr == NULL) {
- tfun->fn();
+ tfun->fn(i);
tcase_run_checked_teardown(tc);
- return receive_result_info_nofork(tc->name, tfun->name);
+ return receive_result_info_nofork(tc->name, tfun->name, i);
}
return tr;
}
-static TestResult *tcase_run_tfun_fork (SRunner *sr, TCase *tc, TF *tfun)
+static TestResult *tcase_run_tfun_fork (SRunner *sr, TCase *tc, TF *tfun, int i)
{
pid_t pid_w;
pid_t pid;
int status = 0;
-
pid = fork();
if (pid == -1)
- eprintf("Unable to fork:",__FILE__,__LINE__);
+ eprintf("Unable to fork:",__FILE__,__LINE__);
if (pid == 0) {
setpgid(0, 0);
group_pid = getpgid(0);
tcase_run_checked_setup(sr, tc);
- tfun->fn();
+ tfun->fn(i);
tcase_run_checked_teardown(tc);
exit(EXIT_SUCCESS);
} else {
killpg(pid, SIGKILL); /* Kill remaining processes. */
- return receive_result_info_fork(tc->name, tfun->name, status, tfun->signal);
+ return receive_result_info_fork(tc->name, tfun->name, i, status, tfun->signal);
}
static char *signal_error_msg (int signal_received, int signal_expected)
exact_msg = (tr->rtype == CK_ERROR) ? "(after this point) ": "";
- rstr = ck_strdup_printf ("%s:%d:%s:%s:%s: %s%s",
+ rstr = ck_strdup_printf ("%s:%d:%s:%s:%s:%d: %s%s",
tr->file, tr->line,
- tr_type_str(tr), tr->tcname, tr->tname,
+ tr_type_str(tr), tr->tcname, tr->tname, tr->iter,
exact_msg, tr->msg);
return rstr;
INCLUDES = -I$(srcdir)/../src
LDADD = $(LIBCHECK_A)
-CLEANFILES = *~ *.log test_logfile
+CLEANFILES = *~ *.log *.xml test_logfile
$(LIBCHECK_A):
$(MAKE) -C ../src
{
TestResult **tra;
char *trm;
- const char *trmexp = "check_check_fixture.c:14:S:Core:unchecked_setup: Test failure in fixture";
+ const char *trmexp = "check_check_fixture.c:14:S:Core:unchecked_setup:0: Test failure in fixture";
tra = srunner_failures(fixture_sr);
trm = tr_str(tra[0]);
trm = tr_str(srunner_failures(sr)[0]);
if (strstr(trm,
- "check_check_fixture.c:127:S:Core:test_sub_fail: Failed setup")
+ "check_check_fixture.c:127:S:Core:test_sub_fail:0: Failed setup")
== 0) {
snprintf(errm, sizeof(errm),
"Bad failed checked setup tr msg (%s)", trm);
trm = tr_str(srunner_failures(sr)[0]);
if (strstr(trm,
- "check_check_fixture.c:137:S:Core:test_sub_fail: "
+ "check_check_fixture.c:137:S:Core:test_sub_fail:0: "
"(after this point) Received signal 8")
== 0) {
snprintf(errm, sizeof(errm),
trm = tr_str(srunner_failures(sr)[0]);
if (strstr(trm,
- "check_check_fixture.c:132:S:Core:test_sub_pass: Failed teardown")
+ "check_check_fixture.c:132:S:Core:test_sub_pass:0: Failed teardown")
== 0) {
snprintf(errm, sizeof(errm),
"Bad failed checked teardown tr msg (%s)", trm);
trm = tr_str(srunner_failures(sr)[0]);
if (strstr(trm,
- "check_check_fixture.c:143:S:Core:test_sub_pass: "
+ "check_check_fixture.c:143:S:Core:test_sub_pass:0: "
"(after this point) Received signal 8")
== 0) {
snprintf(errm, sizeof(errm),
{ "Core", -1, CK_FAILURE, "We failed" }
};
+static int nr_of_master_tests = sizeof master_tests /sizeof master_tests[0];
START_TEST(test_check_nfailures)
{
int i;
int failed = 0;
- for (i = 0; i < sizeof master_tests /sizeof master_tests[0]; i++) {
+ for (i = 0; i < nr_of_master_tests; i++) {
if (master_tests[i].failure_type != CK_PASS) {
failed++;
}
START_TEST(test_check_ntests_run)
{
- fail_unless (sub_ntests == (sizeof master_tests /sizeof master_tests[0]),
+ fail_unless (sub_ntests == nr_of_master_tests,
"Unexpected number of tests run, %d.", sub_ntests);
}
END_TEST
START_TEST(test_check_all_ftypes)
{
- int i;
- for (i = 0; i < sub_ntests; i++) {
- fail_unless(master_tests[i].failure_type == tr_rtype(tr_all_array[i]),
- "Failure type wrong for test %d", i);
- }
+ fail_unless(master_tests[i].failure_type == tr_rtype(tr_all_array[i]),
+ "Failure type wrong for test %d", i);
}
END_TEST
tcase_add_test (tc_core, test_check_failure_lfiles);
tcase_add_test (tc_core, test_check_tcnames);
tcase_add_test (tc_core, test_check_all_msgs);
- tcase_add_test (tc_core, test_check_all_ftypes);
+ tcase_add_loop_test (tc_core, test_check_all_ftypes, 0, nr_of_master_tests);
tcase_add_unchecked_fixture(tc_fixture, test_fixture_setup,
test_fixture_teardown);
/* add the test 3 times to make sure we adequately test
}
END_TEST
+START_TEST(test_loop)
+{
+ fail_unless (i==1, "Iteration %d failed", i);
+}
+END_TEST
+
static Suite *make_s1_suite (void)
{
Suite *s;
tc = tcase_create ("Core");
suite_add_tcase(s, tc);
tcase_add_test (tc, test_pass2);
+ tcase_add_loop_test(tc, test_loop, 0, 3);
return s;
}
fi
expected="Running suite S1
-${lsrc}ex_log_output.c:8:P:Core:test_pass: Passed
-${lsrc}ex_log_output.c:14:F:Core:test_fail: Failure
-${lsrc}ex_log_output.c:18:E:Core:test_exit: (after this point) Early exit with return value 1
+${lsrc}ex_log_output.c:8:P:Core:test_pass:0: Passed
+${lsrc}ex_log_output.c:14:F:Core:test_fail:0: Failure
+${lsrc}ex_log_output.c:18:E:Core:test_exit:0: (after this point) Early exit with return value 1
Running suite S2
-${lsrc}ex_log_output.c:26:P:Core:test_pass2: Passed
+${lsrc}ex_log_output.c:26:P:Core:test_pass2:0: Passed
Results for all suites run:
50%: Checks: 4, Failures: 1, Errors: 1"
33%: Checks: 3, Failures: 1, Errors: 1"
t2="xRunning suite(s): Master
33%: Checks: 3, Failures: 1, Errors: 1
-${lsrc}ex_output.c:14:F:Core:test_fail: Failure
-${lsrc}ex_output.c:18:E:Core:test_exit: (after this point) Early exit with return value 1"
+${lsrc}ex_output.c:14:F:Core:test_fail:0: Failure
+${lsrc}ex_output.c:18:E:Core:test_exit:0: (after this point) Early exit with return value 1"
t3="xRunning suite(s): Master
33%: Checks: 3, Failures: 1, Errors: 1
-${lsrc}ex_output.c:8:P:Core:test_pass: Passed
-${lsrc}ex_output.c:14:F:Core:test_fail: Failure
-${lsrc}ex_output.c:18:E:Core:test_exit: (after this point) Early exit with return value 1"
+${lsrc}ex_output.c:8:P:Core:test_pass:0: Passed
+${lsrc}ex_output.c:14:F:Core:test_fail:0: Failure
+${lsrc}ex_output.c:18:E:Core:test_exit:0: (after this point) Early exit with return value 1"
op0=`./ex_output CK_SILENT`
op1=`./ex_output CK_MINIMAL`
<test result=\"success\">
<fn>ex_xml_output.c:8</fn>
<id>test_pass</id>
+ <iteration>0</iteration>
<description>Core</description>
<message>Passed</message>
</test>
<test result=\"failure\">
<fn>ex_xml_output.c:14</fn>
<id>test_fail</id>
+ <iteration>0</iteration>
<description>Core</description>
<message>Failure</message>
</test>
<test result=\"error\">
<fn>ex_xml_output.c:18</fn>
<id>test_exit</id>
+ <iteration>0</iteration>
<description>Core</description>
<message>Early exit with return value 1</message>
</test>
<test result=\"success\">
<fn>ex_xml_output.c:26</fn>
<id>test_pass2</id>
+ <iteration>0</iteration>
<description>Core</description>
<message>Passed</message>
</test>
+ <test result=\"failure\">
+ <fn>ex_xml_output.c:32</fn>
+ <id>test_loop</id>
+ <iteration>0</iteration>
+ <description>Core</description>
+ <message>Iteration 0 failed</message>
+ </test>
+ <test result=\"success\">
+ <fn>ex_xml_output.c:32</fn>
+ <id>test_loop</id>
+ <iteration>1</iteration>
+ <description>Core</description>
+ <message>Passed</message>
+ </test>
+ <test result=\"failure\">
+ <fn>ex_xml_output.c:32</fn>
+ <id>test_loop</id>
+ <iteration>2</iteration>
+ <description>Core</description>
+ <message>Iteration 2 failed</message>
+ </test>
</suite>
</testsuites>"