X-Git-Url: https://git.openssl.org/?p=openssl.git;a=blobdiff_plain;f=test%2Ftestutil.h;h=1e18ea11a301bfa06b95b6bddc6ae10cbcd1bab6;hp=0975e2b779c3a0fd7ac077ed54429f891755f10d;hb=bdcacd93b14ed7381a922b41d74c481224ef9fa1;hpb=bd91e3c870402c4b10909c47082daece473d22ef diff --git a/test/testutil.h b/test/testutil.h index 0975e2b779..1e18ea11a3 100644 --- a/test/testutil.h +++ b/test/testutil.h @@ -1,5 +1,5 @@ /* - * Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2014-2017 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -17,34 +17,31 @@ #include /*- - * Simple unit tests should implement register_tests(). + * Simple unit tests should implement setup_tests(). + * This function should return zero if the registration process fails. * To register tests, call ADD_TEST or ADD_ALL_TESTS: * - * void register_tests(void) + * int setup_tests(void) * { * ADD_TEST(test_foo); * ADD_ALL_TESTS(test_bar, num_test_bar); + * return 1; * } * - * Tests that need to perform custom setup or read command-line arguments should - * implement test_main(): + * Tests that require clean up after execution should implement: * - * int test_main(int argc, char *argv[]) - * { - * int ret; + * void cleanup_tests(void); * - * // Custom setup ... + * The cleanup_tests function will be called even if setup_tests() + * returns failure. * - * ADD_TEST(test_foo); - * ADD_ALL_TESTS(test_bar, num_test_bar); - * // Add more tests ... + * In some cases, early initialization before the framework is set up + * may be needed. In such a case, this should be implemented: * - * ret = run_tests(argv[0]); + * int global_init(void); * - * // Custom teardown ... - * - * return ret; - * } + * This function should return zero if there is an unrecoverable error and + * non-zero if the initialization was successful. */ /* Adds a simple test case. */ @@ -68,12 +65,13 @@ * SETUP_TEST_FIXTURE will call set_up() to create a new TEST_FIXTURE_TYPE * object called "fixture". It will also allocate the "result" variable used * by EXECUTE_TEST. set_up() should take a const char* specifying the test - * case name and return a TEST_FIXTURE_TYPE by value. + * case name and return a TEST_FIXTURE_TYPE by reference. * - * EXECUTE_TEST will pass fixture to execute_func() by value, call + * EXECUTE_TEST will pass fixture to execute_func() by reference, call * tear_down(), and return the result of execute_func(). execute_func() should - * take a TEST_FIXTURE_TYPE by value and return 1 on success and 0 on - * failure. + * take a TEST_FIXTURE_TYPE by reference and return 1 on success and 0 on + * failure. The tear_down function is responsible for deallocation of the + * result variable, if required. * * Unit tests can define their own SETUP_TEST_FIXTURE and EXECUTE_TEST * variations like so: @@ -94,18 +92,14 @@ * } */ # define SETUP_TEST_FIXTURE(TEST_FIXTURE_TYPE, set_up)\ - TEST_FIXTURE_TYPE fixture = set_up(TEST_CASE_NAME); \ + TEST_FIXTURE_TYPE *fixture = set_up(TEST_CASE_NAME); \ int result = 0 # define EXECUTE_TEST(execute_func, tear_down)\ + if (fixture != NULL) {\ result = execute_func(fixture);\ tear_down(fixture);\ - return result - -/* Shorthand if tear_down does nothing. */ -# define EXECUTE_TEST_NO_TEARDOWN(execute_func)\ - result = execute_func(fixture);\ - return result + } /* * TEST_CASE_NAME is defined as the name of the test case function where @@ -124,30 +118,38 @@ # endif /* __STDC_VERSION__ */ /* - * Internal helpers. Test programs shouldn't use these directly, but should - * rather link to one of the helper main() methods. + * Tests that need access to command line arguments should use the functions: + * test_get_argument(int n) to get the nth argument, the first argument is + * argument 0. This function returns NULL on error. + * test_get_argument_count() to get the count of the arguments. + * test_has_option(const char *) to check if the specified option was passed. + * test_get_option_argument(const char *) to get an option which includes an + * argument. NULL is returns if the option is not found. + * const char *test_get_program_name(void) returns the name of the test program + * being executed. */ +const char *test_get_program_name(void); +char *test_get_argument(size_t n); +size_t test_get_argument_count(void); +int test_has_option(const char *option); +const char *test_get_option_argument(const char *option); -/* setup_test() should be called as the first thing in a test main(). */ -void setup_test(void); /* - * finish_test() should be called as the last thing in a test main(). - * The result of run_tests() should be the input to finish_test(). + * Internal helpers. Test programs shouldn't use these directly, but should + * rather link to one of the helper main() methods. */ -__owur int finish_test(int ret); -void add_test(const char *test_case_name, int (*test_fn) ()); +void add_test(const char *test_case_name, int (*test_fn) (void)); void add_all_tests(const char *test_case_name, int (*test_fn)(int idx), int num, int subtest); -__owur int run_tests(const char *test_prog_name); -void set_test_title(const char *title); /* - * Declarations for user defined functions + * Declarations for user defined functions. + * The first two return a boolean indicating that the test should not proceed. */ -void register_tests(void); -int test_main(int argc, char *argv[]); - +int global_init(void); +int setup_tests(void); +void cleanup_tests(void); /* * Test assumption verification helpers. @@ -165,7 +167,7 @@ int test_main(int argc, char *argv[]); # endif #endif -# define DECLARE_COMPARISON(type, name, opname) \ +# define DECLARE_COMPARISON(type, name, opname) \ int test_ ## name ## _ ## opname(const char *, int, \ const char *, const char *, \ const type, const type); @@ -233,8 +235,8 @@ int test_mem_ne(const char *, int, const char *, const char *, /* * Check a boolean result for being true or false. - * They return 1 if the condition is true (i.e. the value is non-zro). - * Otherwise, they return 0 and pretty-prints diagnostics using |desc|. + * They return 1 if the condition is true (i.e. the value is non-zero). + * Otherwise, they return 0 and pretty-prints diagnostics using |s|. * These should not be called directly, use the TEST_xxx macros below instead. */ int test_true(const char *file, int line, const char *s, int b); @@ -271,7 +273,9 @@ void test_error_c90(const char *desc, ...) PRINTF_FORMAT(1, 2); void test_info(const char *file, int line, const char *desc, ...) PRINTF_FORMAT(3, 4); void test_info_c90(const char *desc, ...) PRINTF_FORMAT(1, 2); +void test_note(const char *desc, ...) PRINTF_FORMAT(1, 2); void test_openssl_errors(void); +void test_perror(const char *s); /* * The following macros provide wrapper calls to the test functions with @@ -384,23 +388,20 @@ void test_openssl_errors(void); # define TEST_error(...) test_error(__FILE__, __LINE__, __VA_ARGS__) # define TEST_info(...) test_info(__FILE__, __LINE__, __VA_ARGS__) # endif +# define TEST_note test_note # define TEST_openssl_errors test_openssl_errors +# define TEST_perror test_perror + +extern BIO *bio_out; +extern BIO *bio_err; /* - * For "impossible" conditions such as malloc failures or bugs in test code, - * where continuing the test would be meaningless. Note that OPENSSL_assert - * is fatal, and is never compiled out. + * Formatted output for strings, memory and bignums. */ -# define TEST_check(condition) \ - do { \ - if (!(condition)) { \ - TEST_openssl_errors(); \ - OPENSSL_assert(!#condition); \ - } \ - } while (0) +void test_output_string(const char *name, const char *m, size_t l); +void test_output_bignum(const char *name, const BIGNUM *bn); +void test_output_memory(const char *name, const unsigned char *m, size_t l); -extern BIO *bio_out; -extern BIO *bio_err; /* * Utilities to parse a test file.