testthat/0000755000176200001440000000000013570757323012126 5ustar liggesuserstestthat/NAMESPACE0000644000176200001440000001017213564563701013344 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,testthat_results) S3method(as.expectation,default) S3method(as.expectation,error) S3method(as.expectation,expectation) S3method(as.expectation,skip) S3method(as.expectation,warning) S3method(compare,POSIXt) S3method(compare,character) S3method(compare,default) S3method(compare,numeric) S3method(format,expectation) S3method(format,expectation_success) S3method(format,mismatch_character) S3method(format,mismatch_numeric) S3method(is_informative_error,default) S3method(output_replay,character) S3method(output_replay,error) S3method(output_replay,message) S3method(output_replay,recordedplot) S3method(output_replay,source) S3method(output_replay,warning) S3method(print,comparison) S3method(print,expectation) S3method(print,mismatch_character) S3method(print,mismatch_numeric) S3method(print,testthat_results) export("%>%") export(CheckReporter) export(DebugReporter) export(FailReporter) export(JunitReporter) export(ListReporter) export(LocationReporter) export(MinimalReporter) export(MultiReporter) export(ProgressReporter) export(Reporter) export(RstudioReporter) export(SilentReporter) export(StopReporter) export(SummaryReporter) export(TapReporter) export(TeamcityReporter) export(auto_test) export(auto_test_package) export(capture_condition) export(capture_error) export(capture_expectation) export(capture_message) export(capture_messages) export(capture_output) export(capture_output_lines) export(capture_warning) export(capture_warnings) export(check_reporter) export(compare) export(context) export(default_reporter) export(describe) export(equals) export(equals_reference) export(evaluate_promise) export(exp_signal) export(expect) export(expect_condition) export(expect_cpp_tests_pass) export(expect_equal) export(expect_equal_to_reference) export(expect_equivalent) export(expect_error) export(expect_failure) export(expect_false) export(expect_gt) export(expect_gte) export(expect_identical) export(expect_invisible) export(expect_is) export(expect_known_failure) export(expect_known_hash) export(expect_known_output) export(expect_known_value) export(expect_length) export(expect_less_than) export(expect_lt) export(expect_lte) export(expect_mapequal) export(expect_match) export(expect_message) export(expect_more_than) export(expect_named) export(expect_null) export(expect_output) export(expect_output_file) export(expect_reference) export(expect_s3_class) export(expect_s4_class) export(expect_setequal) export(expect_silent) export(expect_success) export(expect_that) export(expect_true) export(expect_type) export(expect_vector) export(expect_visible) export(expect_warning) export(expectation) export(fail) export(find_test_scripts) export(get_reporter) export(gives_warning) export(has_names) export(is.expectation) export(is_a) export(is_equivalent_to) export(is_false) export(is_identical_to) export(is_informative_error) export(is_less_than) export(is_more_than) export(is_null) export(is_testing) export(is_true) export(local_mock) export(make_expectation) export(matches) export(new_expectation) export(not) export(prints_text) export(quasi_label) export(set_reporter) export(setup) export(show_failure) export(shows_message) export(skip) export(skip_if) export(skip_if_not) export(skip_if_not_installed) export(skip_if_offline) export(skip_if_translated) export(skip_on_appveyor) export(skip_on_bioc) export(skip_on_ci) export(skip_on_covr) export(skip_on_cran) export(skip_on_os) export(skip_on_travis) export(source_dir) export(source_file) export(source_test_helpers) export(source_test_setup) export(source_test_teardown) export(succeed) export(takes_less_than) export(teardown) export(test_check) export(test_dir) export(test_env) export(test_example) export(test_examples) export(test_file) export(test_package) export(test_path) export(test_rd) export(test_that) export(testing_package) export(testthat_example) export(testthat_examples) export(testthat_tolerance) export(throws_error) export(try_again) export(use_catch) export(verify_output) export(watch) export(with_mock) export(with_reporter) import(rlang) importFrom(R6,R6Class) importFrom(magrittr,"%>%") useDynLib(testthat, .registration = TRUE) testthat/LICENSE0000644000176200001440000000007213521025554013120 0ustar liggesusersYEAR: 2013-2019 COPYRIGHT HOLDER: Hadley Wickham; RStudio testthat/README.md0000644000176200001440000000511313564563701013403 0ustar liggesusers # testthat [![CRAN status](https://www.r-pkg.org/badges/version/testthat)](https://cran.r-project.org/package=testthat) [![Travis build status](https://travis-ci.org/r-lib/testthat.svg?branch=master)](https://travis-ci.org/r-lib/testthat) ![R build status](https://github.com/r-lib/testthat/workflows/R-full/badge.svg) [![Codecov test coverage](https://codecov.io/gh/r-lib/testthat/branch/master/graph/badge.svg)](https://codecov.io/gh/r-lib/testthat?branch=master) ## Overview Testing your code can be painful and tedious, but it greatly increases the quality of your code. **testthat** tries to make testing as fun as possible, so that you get a visceral satisfaction from writing tests. Testing should be addictive, so you do it all the time. To make that happen, testthat: - Provides functions that make it easy to describe what you expect a function to do, including catching errors, warnings, and messages. - Easily integrates in your existing workflow, whether it’s informal testing on the command line, building test suites, or using R CMD check. - Displays test progress visually, showing a pass, fail, or error for every expectation. If you’re using the terminal or a recent version of RStudio, it’ll even colour the output. testthat draws inspiration from the xUnit family of testing packages, as well as from many of the innovative ruby testing libraries, like [rspec](http://rspec.info/), [testy](https://github.com/ahoward/testy), [bacon](https://github.com/chneukirchen/bacon) and [cucumber](https://cucumber.io). testthat is the most popular unit testing package for R and is used by thousands of CRAN packages. If you’re not familiar with testthat, the [testing chapter](http://r-pkgs.had.co.nz/tests.html) in [R packages](http://r-pkgs.had.co.nz/) gives a good overview, along with workflow advice and concrete examples. ## Installation ``` r # Install the released version from CRAN install.packages("testthat") # Or the development version from GitHub: # install.packages("devtools") devtools::install_github("r-lib/testthat") ``` ## Usage The easiest way to get started is with [usethis](https://github.com/r-lib/usethis). Assuming you’re in a package directory, just run `usethis::use_test("name")` to create a test file, and set up all the other infrastructure you need. If you’re using RStudio, press Cmd/Ctrl + Shift + T (or run `devtools::test()` if not) to run all the tests in a package. testthat/man/0000755000176200001440000000000013564563701012677 5ustar liggesuserstestthat/man/testthat-package.Rd0000644000176200001440000000273713216707011016414 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-that.R \docType{package} \name{testthat-package} \alias{testthat} \alias{testthat-package} \title{R package to make testing fun!} \description{ Try the example below. Have a look at the references and learn more from function documentation such as \code{\link[=expect_that]{expect_that()}}. } \section{Options}{ \itemize{ \item \code{testthat.use_colours}: Should the output be coloured? (Default: \code{TRUE}). \item \code{testthat.summary.max_reports}: The maximum number of detailed test reports printed for the summary reporter (default: 10). \item \code{testthat.summary.omit_dots}: Omit progress dots in the summary reporter (default: \code{FALSE}). } } \examples{ library(testthat) a <- 9 expect_that(a, is_less_than(10)) expect_lt(a, 10) } \references{ Wickham, H (2011). testthat: Get Started with Testing. \strong{The R Journal} \emph{3/1} 5-10. \url{https://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf} \url{http://adv-r.had.co.nz/Testing.html} } \seealso{ Useful links: \itemize{ \item \url{http://testthat.r-lib.org} \item \url{https://github.com/r-lib/testthat} \item Report bugs at \url{https://github.com/r-lib/testthat/issues} } } \author{ \strong{Maintainer}: Hadley Wickham \email{hadley@rstudio.com} Other contributors: \itemize{ \item RStudio [copyright holder, funder] \item R Core team (Implementation of utils::recover()) [contributor] } } \keyword{internal} testthat/man/auto_test.Rd0000644000176200001440000000277413165405551015202 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auto-test.R \name{auto_test} \alias{auto_test} \title{Watches code and tests for changes, rerunning tests as appropriate.} \usage{ auto_test(code_path, test_path, reporter = default_reporter(), env = test_env(), hash = TRUE) } \arguments{ \item{code_path}{path to directory containing code} \item{test_path}{path to directory containing tests} \item{reporter}{test reporter to use} \item{env}{environment in which to execute test suite.} \item{hash}{Passed on to \code{\link[=watch]{watch()}}. When FALSE, uses less accurate modification time stamps, but those are faster for large files.} } \description{ The idea behind \code{auto_test()} is that you just leave it running while you develop your code. Everytime you save a file it will be automatically tested and you can easily see if your changes have caused any test failures. } \details{ The current strategy for rerunning tests is as follows: \itemize{ \item if any code has changed, then those files are reloaded and all tests rerun \item otherwise, each new or modified test is run } In the future, \code{auto_test()} might implement one of the following more intelligent alternatives: \itemize{ \item Use codetools to build up dependency tree and then rerun tests only when a dependency changes. \item Mimic ruby's autotest and rerun only failing tests until they pass, and then rerun all tests. } } \seealso{ \code{\link[=auto_test_package]{auto_test_package()}} } \keyword{debugging} testthat/man/expect_setequal.Rd0000644000176200001440000000300513456034771016357 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-setequal.R \name{expect_setequal} \alias{expect_setequal} \alias{expect_mapequal} \title{Expectation: do two vectors contain the same values?} \usage{ expect_setequal(object, expected) expect_mapequal(object, expected) } \arguments{ \item{object}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} } \description{ \itemize{ \item \code{expect_setequal(x, y)} tests that every element of \code{x} occurs in \code{y}, and that every element of \code{y} occurs in \code{x}. \item \code{expect_mapequal(x, y)} tests that \code{x} and \code{y} have the same names, and that \code{x[names(y)]} equals \code{x}. } } \details{ Note that \code{expect_setequal()} ignores names, and you will be warned if both \code{object} and \code{expected} have them. } \examples{ expect_setequal(letters, rev(letters)) show_failure(expect_setequal(letters[-1], rev(letters))) x <- list(b = 2, a = 1) expect_mapequal(x, list(a = 1, b = 2)) show_failure(expect_mapequal(x, list(a = 1))) show_failure(expect_mapequal(x, list(a = 1, b = "x"))) show_failure(expect_mapequal(x, list(a = 1, b = 2, c = 3))) } testthat/man/reporter-accessors.Rd0000644000176200001440000000217513456034771017020 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{reporter-accessors} \alias{reporter-accessors} \alias{set_reporter} \alias{get_reporter} \alias{with_reporter} \title{Get and set active reporter.} \usage{ set_reporter(reporter) get_reporter() with_reporter(reporter, code, start_end_reporter = TRUE) } \arguments{ \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{code}{Code to execute.} \item{start_end_reporter}{Should the reporters \code{start_reporter()} and \code{end_reporter()} methods be called? For expert use only.} } \value{ \code{with_reporter()} invisible returns the reporter active when \code{code} was evaluated. } \description{ \code{get_reporter()} and \code{set_reporter()} access and modify the current "active" reporter. Generally, these functions should not be called directly; instead use \code{with_reporter()} to temporarily change, then reset, the active reporter. } \keyword{internal} testthat/man/expect_vector.Rd0000644000176200001440000000227513456034771016046 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-vector.R \name{expect_vector} \alias{expect_vector} \title{Expectation: does the object have vctr properties?} \usage{ expect_vector(object, ptype = NULL, size = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{ptype}{(Optional) Vector prototype to test against. Should be a size-0 (empty) generalised vector.} \item{size}{(Optional) Size to check for.} } \description{ \code{expect_vector()} is a thin wrapper around \code{\link[vctrs:vec_assert]{vctrs::vec_assert()}}, converting the results of that function in to the expectations used by testthat. This means that it used the vctrs of \code{ptype} (prototype) and \code{size}. See details in \url{https://vctrs.r-lib.org/articles/type-size.html} } \examples{ if (requireNamespace("vctrs") && packageVersion("vctrs") > "0.1.0.9002") { expect_vector(1:10, ptype = integer(), size = 10) show_failure(expect_vector(1:10, ptype = integer(), size = 5)) show_failure(expect_vector(1:10, ptype = character(), size = 5)) } } testthat/man/quasi_label.Rd0000644000176200001440000000331513564563701015451 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quasi-label.R \name{quasi_label} \alias{quasi_label} \title{Quasi-labelling} \usage{ quasi_label(quo, label = NULL, arg = "quo") } \arguments{ \item{quo}{A quosure created by \code{rlang::enquo()}.} \item{label}{An optional label to override the default. This is only provided for internal usage. Modern expectations should not include a \code{label} parameter.} \item{arg}{Argument name shown in error message if \code{quo} is missing.} } \value{ A list containing two elements: \item{val}{The evaluate value of \code{quo}} \item{lab}{The quasiquoted label generated from \code{quo}} } \description{ The first argument to every \code{expect_} function can use unquoting to construct better labels. This makes it easy to create informative labels when expectations are used inside a function or a for loop. \code{quasi_label()} wraps up the details, returning the expression and label. } \section{Limitations}{ Because all \code{expect_} function use unquoting to generate more informative labels, you can not use unquoting for other purposes. Instead, you'll need to perform all other unquoting outside of the expectation and only test the results. } \examples{ f <- function(i) if (i > 3) i * 9 else i * 10 i <- 10 # This sort of expression commonly occurs inside a for loop or function # And the failure isn't helpful because you can't see the value of i # that caused the problem: show_failure(expect_equal(f(i), i * 10)) # To overcome this issue, testthat allows you to unquote expressions using # !!. This causes the failure message to show the value rather than the # variable name show_failure(expect_equal(f(!!i), !!(i * 10))) } \keyword{internal} testthat/man/watch.Rd0000644000176200001440000000203613173076020014262 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{watch} \alias{watch} \title{Watch a directory for changes (additions, deletions & modifications).} \usage{ watch(path, callback, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{character vector of paths to watch. Omit trailing backslash.} \item{callback}{function called everytime a change occurs. It should have three parameters: added, deleted, modified, and should return TRUE to keep watching, or FALSE to stop.} \item{pattern}{file pattern passed to \code{\link[=dir]{dir()}}} \item{hash}{hashes are more accurate at detecting changes, but are slower for large files. When FALSE, uses modification time stamps} } \description{ This is used to power the \code{\link[=auto_test]{auto_test()}} and \code{\link[=auto_test_package]{auto_test_package()}} functions which are used to rerun tests whenever source code changes. } \details{ Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to stop the watcher. } \keyword{internal} testthat/man/auto_test_package.Rd0000644000176200001440000000131113456034771016644 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auto-test.R \name{auto_test_package} \alias{auto_test_package} \title{Watches a package for changes, rerunning tests as appropriate.} \usage{ auto_test_package(pkg = ".", reporter = default_reporter(), hash = TRUE) } \arguments{ \item{pkg}{path to package} \item{reporter}{test reporter to use} \item{hash}{Passed on to \code{\link[=watch]{watch()}}. When FALSE, uses less accurate modification time stamps, but those are faster for large files.} } \description{ Watches a package for changes, rerunning tests as appropriate. } \seealso{ \code{\link[=auto_test]{auto_test()}} for details on how method works } \keyword{debugging} testthat/man/test_that.Rd0000644000176200001440000000204013164532741015155 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-that.R \name{test_that} \alias{test_that} \title{Create a test.} \usage{ test_that(desc, code) } \arguments{ \item{desc}{test name. Names should be kept as brief as possible, as they are often used as line prefixes.} \item{code}{test code containing expectations} } \description{ A test encapsulates a series of expectations about small, self-contained set of functionality. Each test is contained in a \link{context} and contains multiple expectations. } \details{ Tests are evaluated in their own environments, and should not affect global state. When run from the command line, tests return \code{NULL} if all expectations are met, otherwise it raises an error. } \examples{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1 / sqrt(2)) expect_equal(cos(pi / 4), 1 / sqrt(2)) expect_equal(tan(pi / 4), 1) }) # Failing test: \dontrun{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1) }) } } testthat/man/SummaryReporter.Rd0000644000176200001440000000237413564523315016351 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-summary.R \docType{data} \name{SummaryReporter} \alias{SummaryReporter} \title{Test reporter: summary of errors.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ SummaryReporter } \description{ This is a reporter designed for interactive usage: it lets you know which tests have run successfully and as well as fully reporting information about failures and errors. } \details{ You can use the \code{max_reports} field to control the maximum number of detailed reports produced by this reporter. This is useful when running with \code{\link[=auto_test]{auto_test()}} As an additional benefit, this reporter will praise you from time-to-time if all your tests pass. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/expect_null.Rd0000644000176200001440000000251413564563701015512 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-null.R \name{expect_null} \alias{expect_null} \title{Expectation: is an object \code{NULL}?} \usage{ expect_null(object, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ This is a special case because \code{NULL} is a singleton so it's possible check for it either with \code{expect_equal(x, NULL)} or \code{expect_type(x, "NULL")}. } \examples{ x <- NULL y <- 10 expect_null(x) show_failure(expect_null(y)) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_known_output.Rd0000644000176200001440000000661313564563701017320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-known.R \name{expect_known_output} \alias{expect_known_output} \alias{expect_output_file} \alias{expect_known_value} \alias{expect_equal_to_reference} \alias{expect_known_hash} \title{Expectations: is the output or the value equal to a known good value?} \usage{ expect_known_output(object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80) expect_known_value(object, file, update = TRUE, ..., info = NULL, label = NULL, version = 2) expect_known_hash(object, hash = NULL) } \arguments{ \item{object}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{file}{File path where known value/output will be stored.} \item{update}{Should the file be updated? Defaults to \code{TRUE}, with the expectation that you'll notice changes because of the first failure, and then see the modified files in git.} \item{...}{For \code{expect_equal()} and \code{expect_equivalent()}, passed on \code{\link[=compare]{compare()}}, for \code{expect_identical()} passed on to \code{\link[=identical]{identical()}}. Used to control the details of the comparison.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{print}{If \code{TRUE} and the result of evaluating \code{code} is visible this will print the result, ensuring that the output of printing the object is included in the overall output} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} \item{version}{The serialization format version to use. The default, 2, was the default format from R 1.4.0 to 3.5.3. Version 3 became the default from R 3.6.0 and can only be read by R versions 3.5.0 and higher.} \item{hash}{Known hash value. Leave empty and you'll be informed what to use in the test output.} } \description{ For complex printed output and objects, it is often challenging to describe exactly what you expect to see. \code{expect_known_value()} and \code{expect_known_output()} provide a slightly weaker guarantee, simply asserting that the values have not changed since the last time that you ran them. } \details{ These expectations should be used in conjunction with git, as otherwise there is no way to revert to previous values. Git is particularly useful in conjunction with \code{expect_known_output()} as the diffs will show you exactly what has changed. Note that known values updates will only be updated when running tests interactively. \code{R CMD check} clones the package source so any changes to the reference files will occur in a temporary directory, and will not be synchronised back to the source package. } \examples{ tmp <- tempfile() # The first run always succeeds expect_known_output(mtcars[1:10, ], tmp, print = TRUE) # Subsequent runs will succeed only if the file is unchanged # This will succeed: expect_known_output(mtcars[1:10, ], tmp, print = TRUE) \dontrun{ # This will fail expect_known_output(mtcars[1:9, ], tmp, print = TRUE) } } \keyword{internal} testthat/man/expect_named.Rd0000644000176200001440000000375713564563701015636 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-named.R \name{expect_named} \alias{expect_named} \title{Expectation: does object have names?} \usage{ expect_named(object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Character vector of expected names. Leave missing to match any names. Use \code{NULL} to check for absence of names.} \item{ignore.order}{If \code{TRUE}, sorts names before comparing to ignore the effect of order.} \item{ignore.case}{If \code{TRUE}, lowercases all names to ignore the effect of case.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{...}{Other arguments passed on to \code{\link[=has_names]{has_names()}}.} } \description{ You can either check for the presence of names (leaving \code{expected} blank), specific names (by suppling a vector of names), or absence of names (with \code{NULL}). } \examples{ x <- c(a = 1, b = 2, c = 3) expect_named(x) expect_named(x, c("a", "b", "c")) # Use options to control sensitivity expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) # Can also check for the absence of names with NULL z <- 1:4 expect_named(z, NULL) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/fail.Rd0000644000176200001440000000141313521025554014070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{fail} \alias{fail} \alias{succeed} \title{Default expectations that always succeed or fail.} \usage{ fail(message = "Failure has been forced", info = NULL) succeed(message = "Success has been forced", info = NULL) } \arguments{ \item{message}{a string to display.} \item{info}{Character vector continuing additional information. Included for backward compatibility only and new expectations should not use it.} } \description{ These allow you to manually trigger success or failure. Failure is particularly useful to a pre-condition or mark a test as not yet implemented. } \examples{ \dontrun{ test_that("this test fails", fail()) test_that("this test succeeds", succeed()) } } testthat/man/test_env.Rd0000644000176200001440000000074713164532741015021 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{test_env} \alias{test_env} \title{Generate default testing environment.} \usage{ test_env() } \description{ We use a new environment which inherits from \code{\link[=globalenv]{globalenv()}}. In an ideal world, we'd avoid putting the global environment on the search path for tests, but it's not currently possible without losing the ability to load packages in tests. } \keyword{internal} testthat/man/LocationReporter.Rd0000644000176200001440000000175713564523315016470 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-location.R \docType{data} \name{LocationReporter} \alias{LocationReporter} \title{Test reporter: location} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ LocationReporter } \description{ This reporter simply prints the location of every expectation and error. This is useful if you're trying to figure out the source of a segfault, or you want to figure out which code triggers a C/C++ breakpoint } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/StopReporter.Rd0000644000176200001440000000231613564523315015635 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-stop.R \docType{data} \name{StopReporter} \alias{StopReporter} \title{Test reporter: stop on error.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ StopReporter } \description{ The default reporter, executed when \code{expect_that} is run interactively. It responds by \code{\link[=stop]{stop()}}ping on failures and doing nothing otherwise. This will ensure that a failing test will raise an error. } \details{ This should be used when doing a quick and dirty test, or during the final automated testing of R CMD check. Otherwise, use a reporter that runs all tests and gives you more context about the problem. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/capture_condition.Rd0000644000176200001440000000312413564563701016677 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/capture-condition.R \name{capture_condition} \alias{capture_condition} \alias{capture_error} \alias{capture_expectation} \alias{capture_message} \alias{capture_warning} \alias{capture_messages} \alias{capture_warnings} \title{Capture conditions, including messeages, warnings, expectations, and errors.} \usage{ capture_condition(code, entrace = FALSE) capture_error(code, entrace = FALSE) capture_expectation(code, entrace = FALSE) capture_message(code, entrace = FALSE) capture_warning(code, entrace = FALSE) capture_messages(code) capture_warnings(code) } \arguments{ \item{code}{Code to evaluate} \item{entrace}{Whether to add a \link[rlang:trace_back]{backtrace} to the captured condition.} } \value{ Singular functions (\code{capture_condition}, \code{capture_expectation} etc) return a condition object. \code{capture_messages()} and \code{capture_warnings} return a character vector of message text. } \description{ These functions allow you to capture the side-effects of a function call including printed output, messages and warnings. They are used to evaluate code for \code{\link[=expect_output]{expect_output()}}, \code{\link[=expect_message]{expect_message()}}, \code{\link[=expect_warning]{expect_warning()}}, and \code{\link[=expect_silent]{expect_silent()}}. } \examples{ f <- function() { message("First") warning("Second") message("Third") } capture_message(f()) capture_messages(f()) capture_warning(f()) capture_warnings(f()) # Condition will capture anything capture_condition(f()) } \keyword{internal} testthat/man/is_informative_error.Rd0000644000176200001440000000260513564563701017420 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-condition.R \name{is_informative_error} \alias{is_informative_error} \title{Is an error informative?} \usage{ is_informative_error(x, ...) } \arguments{ \item{x}{An error object.} \item{...}{These dots are for future extensions and must be empty.} } \description{ \code{is_informative_error()} is a generic predicate that indicates whether testthat users should explicitly test for an error class. When it returns \code{TRUE} (the default), and \code{expect_error()} does not check for the class, a warning is issued during tests. You can silence the warning by implementing \code{is_informative_error()}. The main use case for overriding this method is to introduce an experimental error class when you need more experience while developing an error hierarchy for your package. Override \code{is_informative_error()} to return \code{FALSE} to avoid encouraging users to depend on the experimental class in their tests. Since testthat should be a \code{Suggest} dependency, methods for \code{is_informative_error()} should typically be lazily registered, e.g. with \code{vctrs::s3_register()}. } \details{ A few classes are hard-coded as uninformative: \itemize{ \item \code{simpleError} \item \code{rlang_error} unless a subclass is detected \item \code{Rcpp::eval_error} \item \code{Rcpp::exception} } } \keyword{internal} testthat/man/expect_message.Rd0000644000176200001440000000656513564563701016176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-messages.R \name{expect_message} \alias{expect_message} \alias{expect_warning} \title{Expectation: does code produce warnings or messages?} \usage{ expect_message(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL) expect_warning(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against. \itemize{ \item A character vector giving a regular expression that must match the message/warning \item If \code{NULL}, the default, asserts that there should be a messsage/warning, but doesn't test for a specific value. \item If \code{NA}, asserts that there shouldn't be any messages or warnings. }} \item{...}{Arguments passed on to \code{expect_match} \describe{ \item{all}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE)} \item{perl}{logical. Should Perl-compatible regexps be used?} \item{fixed}{logical. If \code{TRUE}, \code{pattern} is a string to be matched as is. Overrides all conflicting arguments.} }} \item{all}{Do messages/warnings need to match the \code{regexp} (\code{TRUE}), or does only one need to match (\code{FALSE})?} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ The first argument, invisibly. } \description{ Use \code{expect_message()} and \code{expect_warning()} to check if the messages or warnings match the given regular expression. } \examples{ # Messages ------------------------------------------------------------------ f <- function(x) { if (x < 0) { message("*x* is already negative") return(x) } -x } expect_message(f(-1)) expect_message(f(-1), "already negative") expect_message(f(1), NA) # To test message and output, store results to a variable expect_message(out <- f(-1), "already negative") expect_equal(out, -1) # You can use the arguments of grepl to control the matching expect_message(f(-1), "*x*", fixed = TRUE) expect_message(f(-1), "NEGATIVE", ignore.case = TRUE) # Warnings ------------------------------------------------------------------ f <- function(x) { if (x < 0) { warning("*x* is already negative") return(x) } -x } expect_warning(f(-1)) expect_warning(f(-1), "already negative") expect_warning(f(1), NA) # To test message and output, store results to a variable expect_warning(out <- f(-1), "already negative") expect_equal(out, -1) # You can use the arguments of grepl to control the matching expect_warning(f(-1), "*x*", fixed = TRUE) expect_warning(f(-1), "NEGATIVE", ignore.case = TRUE) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/oldskool.Rd0000644000176200001440000000262113164532741015011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{oldskool} \alias{oldskool} \alias{is_null} \alias{is_a} \alias{is_true} \alias{is_false} \alias{has_names} \alias{is_less_than} \alias{is_more_than} \alias{equals} \alias{is_equivalent_to} \alias{is_identical_to} \alias{equals_reference} \alias{shows_message} \alias{gives_warning} \alias{prints_text} \alias{throws_error} \alias{matches} \title{Old-style expectations.} \usage{ is_null() is_a(class) is_true() is_false() has_names(expected, ignore.order = FALSE, ignore.case = FALSE) is_less_than(expected, label = NULL, ...) is_more_than(expected, label = NULL, ...) equals(expected, label = NULL, ...) is_equivalent_to(expected, label = NULL) is_identical_to(expected, label = NULL) equals_reference(file, label = NULL, ...) shows_message(regexp = NULL, all = FALSE, ...) gives_warning(regexp = NULL, all = FALSE, ...) prints_text(regexp = NULL, ...) throws_error(regexp = NULL, ...) matches(regexp, all = TRUE, ...) } \description{ Initial testthat used a style of testing that looked like \code{expect_that(a, equals(b)))} this allowed expectations to read like English sentences, but was verbose and a bit too cutesy. This style will continue to work but has been soft-deprecated - it is no longer documented, and new expectations will only use the new style \code{expect_equal(a, b)}. } \keyword{internal} testthat/man/find_reporter.Rd0000644000176200001440000000077213164532741016032 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{find_reporter} \alias{find_reporter} \title{Find reporter object given name or object.} \usage{ find_reporter(reporter) } \arguments{ \item{reporter}{name of reporter(s), or reporter object(s)} } \description{ If not found, will return informative error message. Pass a character vector to create a \link{MultiReporter} composed of individual reporters. Will return null if given NULL. } \keyword{internal} testthat/man/CheckReporter.Rd0000644000176200001440000000166213564523315015730 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-check.R \docType{data} \name{CheckReporter} \alias{CheckReporter} \title{Check reporter: 13 line summary of problems} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ CheckReporter } \description{ \code{R CMD check} displays only the last 13 lines of the result, so this report is design to ensure that you see something useful there. } \seealso{ Other reporters: \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/expect_match.Rd0000644000176200001440000000444013564563701015634 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectations-matches.R \name{expect_match} \alias{expect_match} \title{Expectation: does string match a regular expression?} \usage{ expect_match(object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against.} \item{perl}{logical. Should Perl-compatible regexps be used?} \item{fixed}{logical. If \code{TRUE}, \code{pattern} is a string to be matched as is. Overrides all conflicting arguments.} \item{...}{Arguments passed on to \code{base::grepl} \describe{ \item{ignore.case}{if \code{FALSE}, the pattern matching is \emph{case sensitive} and if \code{TRUE}, case is ignored during matching.} \item{useBytes}{logical. If \code{TRUE} the matching is done byte-by-byte rather than character-by-character. See \sQuote{Details}.} }} \item{all}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE)} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ Expectation: does string match a regular expression? } \details{ \code{expect_match()} is a wrapper around \code{\link[=grepl]{grepl()}}. See its documentation for more detail about the individual arguments. } \examples{ expect_match("Testing is fun", "fun") expect_match("Testing is fun", "f.n") \dontrun{ expect_match("Testing is fun", "horrible") # Zero-length inputs always fail expect_match(character(), ".") } } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/MultiReporter.Rd0000644000176200001440000000166113564523315016004 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-multi.R \docType{data} \name{MultiReporter} \alias{MultiReporter} \title{Multi reporter: combine several reporters in one.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ MultiReporter } \description{ This reporter is useful to use several reporters at the same time, e.g. adding a custom reporter without removing the current one. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/not.Rd0000644000176200001440000000066413173076020013761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{not} \alias{not} \title{Negate an expectation} \usage{ not(f) } \arguments{ \item{f}{an existing expectation function} } \description{ This negates an expectation, making it possible to express that you want the opposite of a standard expectation. This function is deprecated and will be removed in a future version. } \keyword{internal} testthat/man/testthat_examples.Rd0000644000176200001440000000106013456034771016721 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/example.R \name{testthat_examples} \alias{testthat_examples} \alias{testthat_example} \title{Retrieve paths to built-in example test files} \usage{ testthat_examples() testthat_example(filename) } \arguments{ \item{filename}{Name of test file} } \description{ \code{testthat_examples()} retrieves path to directory of test files, \code{testthat_example()} retrieves path to a single test file. } \examples{ dir(testthat_examples()) testthat_example("success") } \keyword{internal} testthat/man/test_file.Rd0000644000176200001440000000363713564563701015155 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{test_file} \alias{test_file} \title{Run all tests in specified file} \usage{ test_file(path, reporter = default_reporter(), env = test_env(), start_end_reporter = TRUE, load_helpers = TRUE, encoding = "unknown", wrap = TRUE) } \arguments{ \item{path}{Path to file.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{env}{Environment in which to execute the tests. Expert use only.} \item{start_end_reporter}{Should the reporters \code{start_reporter()} and \code{end_reporter()} methods be called? For expert use only.} \item{load_helpers}{Source helper files before running the tests? See \code{\link[=source_test_helpers]{source_test_helpers()}} for more details.} \item{encoding}{Deprecated. All files now assumed to be UTF-8.} \item{wrap}{Automatically wrap all code within \code{\link[=test_that]{test_that()}}? This ensures that all expectations are reported, even if outside a test block.} } \value{ Invisibily, a list with one element for each test. } \description{ Execute code in the specified file, displaying results using a \code{reporter}. Use this function when you want to run a single file's worth of tests. You are responsible for ensuring that the functions to test are available in the global environment. } \details{ Any errors that occur in code run outside of \code{test_that()} will generate a test failure and terminate execution of that test file. } \examples{ path <- testthat_example("success") test_file(path, reporter = "minimal") # test_file() invisibly returns a list, with one element for each test. # This can be useful if you want to compute on your test results. out <- test_file(path, reporter = "minimal") str(out[[1]]) } testthat/man/ProgressReporter.Rd0000644000176200001440000000227113564523315016514 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-progress.R \docType{data} \name{ProgressReporter} \alias{ProgressReporter} \title{Test reporter: interactive progress bar of errors.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ ProgressReporter } \description{ This reporter is a reimagining of \link{SummaryReporter} desgined to make the most information available up front, while taking up less space overall. It is the default reporting reporter used by \code{\link[=test_dir]{test_dir()}} and \code{\link[=test_file]{test_file()}}. } \details{ As an additional benefit, this reporter will praise you from time-to-time if all your tests pass. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/source_file.Rd0000644000176200001440000000335313456034771015471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/source.R \name{source_file} \alias{source_file} \alias{source_dir} \alias{source_test_helpers} \alias{source_test_setup} \alias{source_test_teardown} \title{Source a file, directory of files, or various important subsets} \usage{ source_file(path, env = test_env(), chdir = TRUE, encoding = "unknown", wrap = TRUE) source_dir(path, pattern = "\\\\.[rR]$", env = test_env(), chdir = TRUE, wrap = TRUE) source_test_helpers(path = "tests/testthat", env = test_env()) source_test_setup(path = "tests/testthat", env = test_env()) source_test_teardown(path = "tests/testthat", env = test_env()) } \arguments{ \item{path}{Path to files.} \item{env}{Environment in which to evaluate code.} \item{chdir}{Change working directory to \code{dirname(path)}?} \item{encoding}{Deprecated.} \item{wrap}{Automatically wrap all code within \code{\link[=test_that]{test_that()}}? This ensures that all expectations are reported, even if outside a test block.} \item{pattern}{Regular expression used to filter files.} } \description{ These are used by \code{\link[=test_dir]{test_dir()}} and friends } \section{Test files}{ For package code, tests should live in \code{tests/testthat}. There are four classes of \code{.R} files that have special behaviour: \itemize{ \item Test files start with \code{test} and are executed in alphabetical order. \item Helper files start with \code{helper} and are executed before tests are run and from \code{devtools::load_all()}. \item Setup files start with \code{setup} and are executed before tests, but not during \code{devtools::load_all()}. \item Teardown files start with \code{teardown} and are executed after the tests are run. } } \keyword{internal} testthat/man/capture_output.Rd0000644000176200001440000000233013456034771016247 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/capture-output.R \name{capture_output} \alias{capture_output} \alias{capture_output_lines} \title{Capture output to console} \usage{ capture_output(code, print = FALSE, width = 80) capture_output_lines(code, print = FALSE, width = 80) } \arguments{ \item{code}{Code to evaluate.} \item{print}{If \code{TRUE} and the result of evaluating \code{code} is visible this will print the result, ensuring that the output of printing the object is included in the overall output} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} } \value{ \code{capture_output()} returns a single string. \code{capture_output_lines()} returns a character vector with one entry for each line } \description{ Evaluates \code{code} in a special context in which all output is captured, similar to \code{\link[=capture.output]{capture.output()}}. } \examples{ capture_output({ cat("Hi!\\n") cat("Bye\\n") }) capture_output_lines({ cat("Hi!\\n") cat("Bye\\n") }) capture_output("Hi") capture_output("Hi", print = TRUE) } \keyword{internal} testthat/man/DebugReporter.Rd0000644000176200001440000000160713564523315015740 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-debug.R \docType{data} \name{DebugReporter} \alias{DebugReporter} \title{Test reporter: start recovery.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ DebugReporter } \description{ This reporter will call a modified version of \code{\link[=recover]{recover()}} on all broken expectations. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/test_dir.Rd0000644000176200001440000000771313564563701015013 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-directory.R \name{test_dir} \alias{test_dir} \alias{test_package} \alias{test_check} \alias{is_testing} \alias{testing_package} \title{Run all tests in directory or package} \usage{ test_dir(path, filter = NULL, reporter = default_reporter(), env = test_env(), ..., encoding = "unknown", load_helpers = TRUE, stop_on_failure = FALSE, stop_on_warning = FALSE, wrap = TRUE) test_package(package, filter = NULL, reporter = check_reporter(), ..., stop_on_failure = TRUE, stop_on_warning = FALSE) test_check(package, filter = NULL, reporter = check_reporter(), ..., stop_on_failure = TRUE, stop_on_warning = FALSE, wrap = TRUE) is_testing() testing_package() } \arguments{ \item{path}{Path to directory containing tests.} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching be performed on the file name after it has been stripped of \code{"test-"} and \code{".R"}.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{env}{Environment in which to execute the tests. Expert use only.} \item{...}{Additional arguments passed to \code{\link[=grepl]{grepl()}} to control filtering.} \item{encoding}{Deprecated. All files now assumed to be UTF-8.} \item{load_helpers}{Source helper files before running the tests? See \code{\link[=source_test_helpers]{source_test_helpers()}} for more details.} \item{stop_on_failure}{If \code{TRUE}, throw an error if any tests fail. For historical reasons, the default value of \code{stop_on_failure} is \code{TRUE} for \code{test_package()} and \code{test_check()} but \code{FALSE} for \code{test_dir()}, so if you're calling \code{test_dir()} you may want to consider explicitly setting \code{stop_on_failure = TRUE}.} \item{stop_on_warning}{If \code{TRUE}, throw an error if any tests generate warnings.} \item{wrap}{Automatically wrap all code within \code{\link[=test_that]{test_that()}}? This ensures that all expectations are reported, even if outside a test block.} \item{package}{Name of installed package.} } \value{ A list of test results. } \description{ Use \code{test_dir()} for a collection of tests in a directory; use \code{test_package()} interactively at the console, and \code{test_check()} inside of \code{R CMD check}. In your own code, you can use \code{is_testing()} to determine if code is being run as part of a test and \code{testing_package()} to retrieve the name of the package being tested. You can also check the underlying env var directly \code{identical(Sys.getenv("TESTTHAT"), "true")} to avoid creating a run-time dependency on testthat. } \section{Test files}{ For package code, tests should live in \code{tests/testthat}. There are four classes of \code{.R} files that have special behaviour: \itemize{ \item Test files start with \code{test} and are executed in alphabetical order. \item Helper files start with \code{helper} and are executed before tests are run and from \code{devtools::load_all()}. \item Setup files start with \code{setup} and are executed before tests, but not during \code{devtools::load_all()}. \item Teardown files start with \code{teardown} and are executed after the tests are run. } } \section{Environments}{ Each test is run in a clean environment to keep tests as isolated as possible. For package tests, that environment that inherits from the package's namespace environment, so that tests can access internal functions and objects. } \section{\code{R CMD check}}{ To run testthat automatically from \code{R CMD check}, make sure you have a \code{tests/testthat.R} that contains:\preformatted{library(testthat) library(yourpackage) test_check("yourpackage") } } \examples{ test_dir(testthat_examples(), reporter = "summary") test_dir(testthat_examples(), reporter = "minimal") } testthat/man/TeamcityReporter.Rd0000644000176200001440000000175513564523315016475 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-teamcity.R \docType{data} \name{TeamcityReporter} \alias{TeamcityReporter} \title{Test reporter: Teamcity format.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ TeamcityReporter } \description{ This reporter will output results in the Teamcity message format. For more information about Teamcity messages, see http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}} } \concept{reporters} \keyword{datasets} testthat/man/safe_digest.Rd0000644000176200001440000000071613164532741015443 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{safe_digest} \alias{safe_digest} \title{Compute a digest of a filename, returning NA if the file doesn't exist.} \usage{ safe_digest(path) } \arguments{ \item{filename}{filename to compute digest on} } \value{ a digest of the file, or NA if it doesn't exist. } \description{ Compute a digest of a filename, returning NA if the file doesn't exist. } \keyword{internal} testthat/man/equality-expectations.Rd0000644000176200001440000000565613564563701017543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-equality.R \name{equality-expectations} \alias{equality-expectations} \alias{expect_equal} \alias{expect_equivalent} \alias{expect_identical} \alias{expect_reference} \title{Expectation: is the object equal to a value?} \usage{ expect_equal(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) expect_equivalent(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) expect_identical(object, expected, info = NULL, label = NULL, expected.label = NULL, ...) expect_reference(object, expected, info = NULL, label = NULL, expected.label = NULL) } \arguments{ \item{object, expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{...}{For \code{expect_equal()} and \code{expect_equivalent()}, passed on \code{\link[=compare]{compare()}}, for \code{expect_identical()} passed on to \code{\link[=identical]{identical()}}. Used to control the details of the comparison.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label, expected.label}{Used to customise failure messages. For expert use only.} } \description{ \itemize{ \item \code{expect_identical()} compares values with \code{\link[=identical]{identical()}}. \item \code{expect_equal()} compares values with \code{\link[=all.equal]{all.equal()}} \item \code{expect_equivalent()} compares values with \code{\link[=all.equal]{all.equal()}} and \code{check.attributes = FALSE} \item \code{expect_reference()} compares the underlying memory addresses. } } \examples{ a <- 10 expect_equal(a, 10) # Use expect_equal() when testing for numeric equality sqrt(2) ^ 2 - 1 expect_equal(sqrt(2) ^ 2, 2) # Neither of these forms take floating point representation errors into # account \dontrun{ expect_true(sqrt(2) ^ 2 == 2) expect_identical(sqrt(2) ^ 2, 2) } # You can pass on additional arguments to all.equal: \dontrun{ # Test the ABSOLUTE difference is within .002 expect_equal(10.01, 10, tolerance = .002, scale = 1) } # Test the RELATIVE difference is within .002 x <- 10 expect_equal(10.01, expected = x, tolerance = 0.002, scale = x) # expect_equivalent ignores attributes a <- b <- 1:3 names(b) <- letters[1:3] expect_equivalent(a, b) } \seealso{ \code{expect_setequal()} to test for set equality. Other expectations: \code{\link{comparison-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/inheritance-expectations.Rd0000644000176200001440000000451413564563701020167 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-inheritance.R \name{inheritance-expectations} \alias{inheritance-expectations} \alias{expect_type} \alias{expect_s3_class} \alias{expect_s4_class} \title{Expectation: does the object inherit from a S3 or S4 class, or is it a base type?} \usage{ expect_type(object, type) expect_s3_class(object, class, exact = FALSE) expect_s4_class(object, class) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{type}{String giving base type (as returned by \code{\link[=typeof]{typeof()}}).} \item{class}{character vector of class names} \item{exact}{If \code{FALSE}, the default, checks that \code{object} inherits from \code{class}. If \code{TRUE}, checks that object has a class that's identical to \code{class}.} } \description{ See \url{https://adv-r.hadley.nz/oo.html} for an overview of R's OO systems, and the vocabulary used here. \itemize{ \item \code{expect_type(x, type)} checks that \code{typeof(x)} is \code{type}. \item \code{expect_s3_class(x, class)} checks that \code{x} is an S3 object that \code{\link[=inherits]{inherits()}} from \code{class} \item \code{expect_s4_class(x, class)} checks that \code{x} is an S4 object that \code{\link[=is]{is()}} \code{class}. } } \examples{ x <- data.frame(x = 1:10, y = "x") # A data frame is an S3 object with class data.frame expect_s3_class(x, "data.frame") show_failure(expect_s4_class(x, "data.frame")) # A data frame is built from a list: expect_type(x, "list") # An integer vector is an atomic vector of type "integer" expect_type(x$x, "integer") # It is not an S3 object show_failure(expect_s3_class(x$x, "integer")) # By default data.frame() converts characters to factors: show_failure(expect_type(x$y, "character")) expect_s3_class(x$y, "factor") expect_type(x$y, "integer") } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_is.Rd0000644000176200001440000000156613456034771015161 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-inheritance.R \name{expect_is} \alias{expect_is} \title{Expectation: does the object inherit from a given class?} \usage{ expect_is(object, class, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{class}{character vector of class names} } \description{ \code{expect_is()} is an older form that uses \code{\link[=inherits]{inherits()}} without checking whether \code{x} is S3, S4, or neither. Intead, I'd recommend using \code{\link[=expect_type]{expect_type()}}, \code{\link[=expect_s3_class]{expect_s3_class()}} or \code{\link[=expect_s4_class]{expect_s4_class()}} to more clearly convey your intent. } \keyword{internal} testthat/man/expect_success.Rd0000644000176200001440000000150313456034771016205 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-self-test.R \name{expect_success} \alias{expect_success} \alias{expect_failure} \alias{show_failure} \alias{expect_known_failure} \title{Tools for testing expectations} \usage{ expect_success(expr) expect_failure(expr, message = NULL, ...) show_failure(expr) expect_known_failure(path, expr) } \arguments{ \item{expr}{Expression that evaluates a single expectation.} \item{message}{Check that the failure message matches this regexp.} \item{...}{Other arguments passed on to \code{\link[=expect_match]{expect_match()}}.} \item{path}{Path to save failure output} } \description{ Use these expectations to test other expectations. Use \code{show_failure()} in examples to print the failure message without throwing an error. } \keyword{internal} testthat/man/MinimalReporter.Rd0000644000176200001440000000205513564523315016276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-minimal.R \docType{data} \name{MinimalReporter} \alias{MinimalReporter} \title{Test reporter: minimal.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ MinimalReporter } \description{ The minimal test reporter provides the absolutely minimum amount of information: whether each expectation has succeeded, failed or experienced an error. If you want to find out what the failures and errors actually were, you'll need to run a more informative test reporter. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/default_reporter.Rd0000644000176200001440000000076613456034771016545 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter.R \name{default_reporter} \alias{default_reporter} \alias{check_reporter} \title{Retrieve the default reporter} \usage{ default_reporter() check_reporter() } \description{ The defaults are: \itemize{ \item \link{ProgressReporter} for interactive; override with \code{testthat.default_reporter} \item \link{CheckReporter} for R CMD check; override with \code{testthat.default_check_reporter} } } \keyword{internal} testthat/man/FailReporter.Rd0000644000176200001440000000165113564523315015564 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-fail.R \docType{data} \name{FailReporter} \alias{FailReporter} \title{Test reporter: fail at end.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ FailReporter } \description{ This reporter will simply throw an error if any of the tests failed. It is best combined with another reporter, such as the \link{SummaryReporter}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/expect_that.Rd0000644000176200001440000000222613456034771015500 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{expect_that} \alias{expect_that} \title{Expect that a condition holds.} \usage{ expect_that(object, condition, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{condition, }{a function that returns whether or not the condition is met, and if not, an error message to display.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ the (internal) expectation result as an invisible list } \description{ An old style of testing that's no longer encouraged. } \examples{ expect_that(5 * 2, equals(10)) expect_that(sqrt(2) ^ 2, equals(2)) \dontrun{ expect_that(sqrt(2) ^ 2, is_identical_to(2)) } } \seealso{ \code{\link[=fail]{fail()}} for an expectation that always fails. } \keyword{internal} testthat/man/use_catch.Rd0000644000176200001440000001123513173076020015113 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-compiled-code.R \name{use_catch} \alias{use_catch} \title{Use Catch for C++ Unit Testing} \usage{ use_catch(dir = getwd()) } \arguments{ \item{dir}{The directory containing an \R package.} } \description{ Add the necessary infrastructure to enable C++ unit testing in \R packages with \href{https://github.com/philsquared/Catch}{Catch} and \code{testthat}. } \details{ Calling \code{use_catch()} will: \enumerate{ \item Create a file \code{src/test-runner.cpp}, which ensures that the \code{testthat} package will understand how to run your package's unit tests, \item Create an example test file \code{src/test-example.cpp}, which showcases how you might use Catch to write a unit test, \item Add a test file \code{tests/testthat/test-cpp.R}, which ensures that \code{testthat} will run your compiled tests during invocations of \code{devtools::test()} or \code{R CMD check}, and \item Create a file \code{R/catch-routine-registration.R}, which ensures that \R will automatically register this routine when \code{tools::package_native_routine_registration_skeleton()} is invoked. } C++ unit tests can be added to C++ source files within the \code{src} directory of your package, with a format similar to \R code tested with \code{testthat}. Here's a simple example of a unit test written with \code{testthat} + Catch: \preformatted{ context("C++ Unit Test") { test_that("two plus two is four") { int result = 2 + 2; expect_true(result == 4); } } } When your package is compiled, unit tests alongside a harness for running these tests will be compiled into your \R package, with the C entry point \code{run_testthat_tests()}. \code{testthat} will use that entry point to run your unit tests when detected. } \section{Functions}{ All of the functions provided by Catch are available with the \code{CATCH_} prefix -- see \href{https://github.com/philsquared/Catch/blob/master/docs/assertions.md}{here} for a full list. \code{testthat} provides the following wrappers, to conform with \code{testthat}'s \R interface: \tabular{lll}{ \strong{Function} \tab \strong{Catch} \tab \strong{Description} \cr \code{context} \tab \code{CATCH_TEST_CASE} \tab The context of a set of tests. \cr \code{test_that} \tab \code{CATCH_SECTION} \tab A test section. \cr \code{expect_true} \tab \code{CATCH_CHECK} \tab Test that an expression evaluates to \code{true}. \cr \code{expect_false} \tab \code{CATCH_CHECK_FALSE} \tab Test that an expression evalutes to \code{false}. \cr \code{expect_error} \tab \code{CATCH_CHECK_THROWS} \tab Test that evaluation of an expression throws an exception. \cr \code{expect_error_as} \tab \code{CATCH_CHECK_THROWS_AS} \tab Test that evaluation of an expression throws an exception of a specific class. \cr } In general, you should prefer using the \code{testthat} wrappers, as \code{testthat} also does some work to ensure that any unit tests within will not be compiled or run when using the Solaris Studio compilers (as these are currently unsupported by Catch). This should make it easier to submit packages to CRAN that use Catch. } \section{Symbol Registration}{ If you've opted to disable dynamic symbol lookup in your package, then you'll need to explicitly export a symbol in your package that \code{testthat} can use to run your unit tests. \code{testthat} will look for a routine with one of the names: \preformatted{ C_run_testthat_tests c_run_testthat_tests run_testthat_tests } See \href{https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Controlling-visibility}{Controlling Visibility} and \href{https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-symbols}{Registering Symbols} in the \strong{Writing R Extensions} manual for more information. } \section{Advanced Usage}{ If you'd like to write your own Catch test runner, you can instead use the \code{testthat::catchSession()} object in a file with the form: \preformatted{ #define TESTTHAT_TEST_RUNNER #include void run() { Catch::Session& session = testthat::catchSession(); // interact with the session object as desired } } This can be useful if you'd like to run your unit tests with custom arguments passed to the Catch session. } \section{Standalone Usage}{ If you'd like to use the C++ unit testing facilities provided by Catch, but would prefer not to use the regular \code{testthat} \R testing infrastructure, you can manually run the unit tests by inserting a call to: \preformatted{ .Call("run_testthat_tests", PACKAGE = ) } as necessary within your unit test suite. } \seealso{ \href{https://github.com/philsquared/Catch}{Catch}, the library used to enable C++ unit testing. } testthat/man/comparison-expectations.Rd0000644000176200001440000000354613564563701020054 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-comparison.R \name{comparison-expectations} \alias{comparison-expectations} \alias{expect_lt} \alias{expect_lte} \alias{expect_gt} \alias{expect_gte} \alias{expect_less_than} \alias{expect_more_than} \title{Expectation: is returned value less or greater than specified value?} \usage{ expect_lt(object, expected, label = NULL, expected.label = NULL) expect_lte(object, expected, label = NULL, expected.label = NULL) expect_gt(object, expected, label = NULL, expected.label = NULL) expect_gte(object, expected, label = NULL, expected.label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Single numeric value to compare.} \item{label}{Used to customise failure messages. For expert use only.} \item{expected.label}{Used to customise failure messages. For expert use only.} \item{...}{For \code{expect_equal()} and \code{expect_equivalent()}, passed on \code{\link[=compare]{compare()}}, for \code{expect_identical()} passed on to \code{\link[=identical]{identical()}}. Used to control the details of the comparison.} } \description{ Expectation: is returned value less or greater than specified value? } \examples{ a <- 9 expect_lt(a, 10) \dontrun{ expect_lt(11, 10) } a <- 11 expect_gt(a, 10) \dontrun{ expect_gt(9, 10) } } \seealso{ Other expectations: \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/with_mock.Rd0000644000176200001440000000401013456034771015145 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mock.R \name{with_mock} \alias{with_mock} \alias{local_mock} \title{Mock functions in a package.} \usage{ with_mock(..., .env = topenv()) local_mock(..., .env = topenv(), .local_envir = parent.frame()) } \arguments{ \item{...}{named parameters redefine mocked functions, unnamed parameters will be evaluated after mocking the functions} \item{.env}{the environment in which to patch the functions, defaults to the top-level environment. A character is interpreted as package name.} \item{.local_env}{Environment in which to add exit hander. For expert use only.} } \value{ The result of the last unnamed parameter } \description{ Executes code after temporarily substituting implementations of package functions. This is useful for testing code that relies on functions that are slow, have unintended side effects or access resources that may not be available when testing. } \details{ This works by using some C code to temporarily modify the mocked function \emph{in place}. On exit (regular or error), all functions are restored to their previous state. This is somewhat abusive of R's internals, and is still experimental, so use with care. Functions in base packages cannot be mocked, but this can be worked around easily by defining a wrapper function. } \examples{ add_one <- function(x) x + 1 expect_equal(add_one(2), 3) with_mock( add_one = function(x) x - 1, expect_equal(add_one(2), 1) ) square_add_one <- function(x) add_one(x)^2 expect_equal(square_add_one(2), 9) expect_equal( with_mock( add_one = function(x) x - 1, square_add_one(2) ), 1 ) # local_mock() ------------------------------- plus <- function(x, y) x + y test_that("plus(1, 1) == 2", { expect_equal(plus(1, 1), 2) }) test_that("plus(1, 1) == 3", { local_mock(plus = function(x, y) 3) expect_equal(plus(1, 1), 3) }) } \references{ Suraj Gupta (2012): \href{http://obeautifulcode.com/R/How-R-Searches-And-Finds-Stuff}{How R Searches And Finds Stuff} } \keyword{internal} testthat/man/figures/0000755000176200001440000000000013173620207014332 5ustar liggesuserstestthat/man/figures/logo.png0000644000176200001440000003600313173076020016000 0ustar liggesusersPNG  IHDRxb]esRGB pHYs  iTXtXML:com.adobe.xmp Adobe ImageReady 1 ).=9IDATx} |\U{LM4ItP AED! " -%-ZPThQ (RUtI&i&;L2s?Ϲsdnrsg=sy/(|$ueIْNr>L{%zur߶)[u ;ܽ3NՑD׏cx}Âvp!4%ղ*@ګL܇D,nX_pn(dt;N(`YƊS?`錓ݻ`B䠠8dں4˜myhqq很}}P# X]Z]P`NW͝o GDq1-lFu#Ve.yY9Sמհ6߾"N|AƔ c*ũ]n8{Rmc?JǟD|OI8,v˟ &uBoP+:+Y Xǭ[&09G/Bbd7kmiƞʖLQ8 { M= qt4;Aqc;yxtV^m^۾{-l`0D~!BZˎ-B@? 6#[iZzeSMO+9Jds.k*M ;tev.D1r)iH1"ȓ&(S <_?!1Ny kF+Cǜ~Z;LGnŜ4^l{5K ,JKKbUgGΟ K]X]׏E<u Uu2q?) MY}_ V8oj'.W?+]nA?۽G{%-X`P>ͱo͝0ףd̬6[IF>zޟtF7uvl4hhj .?]VmtEU=S21/]c4gl`Zb =ey@>ptqya/ϓ籑,c`XXPbO7\"2O=P $Af)gL~vqg%>TsWi>^A8t:(;*Y$$Ү~Ɵ< lӪ.0;=1`Mr ەZ b@¹~P4 ~wtnLvXr؉ߩy)ʣf}L$#2N2zOaɨ~aIr==ak٥;656Pn\rT5lun72{!$2 1d V'$pmkƦ$?hn Ԃ~гHysMk`/GQԇ܏aI6E.gArr{!y68WxImc{*Ig7ϝ>_dKjOܛL} g-gSǽ*L?qSLG?tOX2);*v"| p]ieESeJ=a5\jD x$?ίl]F?LIUb&s;8*]ר{@w+4 C۰$ܞ[{@x@ ՅK(ԅ:jYm@m8fX{*̨P**CS@k’p{6@lD96s{Vp6RqPvaʲa~RTEz,>Q (=/}’@A%.,麿^z֭ڐ T*(~8CB]v^G(Hu| n~Y {HPaI+W;7)}5n y&2Cs-ҿ#}oZ7fIvT9T@$Wcca_TS{v8WeeXcW1m5Dm }x+LO3K؏n&sx']Tˡ~S@ Q"mK4ڤoXXę`lYaH0)`l ;T ۅkzgҐ ֋m'@@sQj f{[k BNGNRJ`c۪!eމz;,pjo ˍ=T@җHA|4#>LV&\v\8B" 8qX q!n VyrB{֏}02R!S TM?$A=H{9қqr&*U.`F|P4ZO#\H j6/o+-lr#Aƍ"f8G4ج/볰BPG0$LCK.pjBEbDL4=̊bG T4Ͻ1u 676N Wv` йLl5(Ib\54x`e MQYvLuaqŴW쌗J=X{:g sxَy]zm Fhr6x**bE,7`k/iCb]]]_z*<8n׻}PZ{0;κ6:M6PCJ)SdSRœ漹n fmCL- +lϱlgXAibe23y+UpX3p B@sP$sD}I*D,E݂HAG(eiDtַ6.%`r2TWz( x:F*0g\_^>t ky}H.J`dɻSڷI Jp aC2\_t]0r(˰K9i"]'^4q."bˇ& b*l> ZjϗNn3 pnv˵Mǩ%Jkb z,d\q5~KCN2T#0="1`Tٯz:qb8I޳/*ؔcJjW`{ Q,LPnr "Fłž`CeA{hD^4E<"sZ4$/N^AbTUʮ' / j<*-'QP-|tڦ% ",e[Z%}99{߽bB)nx6J60beIa_#n4@f'Ϝz&Q m',HKr69ڌ5E]:#p#+a*Cݘ>DZZʚ(?Gߨt?3wA|'yRbMLRwOp:Zx >:0obbwgR$ఙhCӫ/~Xri9,k1!"DG]PGyXƜj*b(R%yG^2 R "R>Gλ-Z|FOt>.ΕzQlnD ,ti#x raNcB+c D D.D-X+[.5cF[LTr|#a&@T|]cTxEܭ,N[{d,8VRο@5*Dž:AÆ;q2~ȅ<{21omG-;; 7J C_5kWmZ?ZlRhuRsW_Jwel͛mͿ|㠮ht)h2mma@t}C CR:ɤ b>B p#G4]χשQ">,)pNLb*v% 1sŘ {&ne=xfn-.)riboo|K|Ӿ9%}|Rgn r%څ HM Bֈ] DIYFxobmƞOd^W^^8xz8,l?,0)Œ}G%$܏K}Ï <$I=0;&H'gjDvaZV;B[S-iW])$P/-CBJAP488(nYRL& )$(*d ,۠ ĜR ul}#q}X;6or3ngi(;˥_q':keeAg-xBuI2+WI (t72e@ lp]8羟_6 ʌͦVl[rfHEWNnƄ)!F@+ &G8mHte"@VA 021z|ӓ(Jd{N0\9-cz-a}I}1bDtL` ~LBI_b,uEZSx|כ>7IPϵBbAw*&[Kj NgnQ,F€HWa,S&&GLгet"z1rD**N5Tx) "i?v.8zwt?@*C!Q9AuJ  :Bxsܣ1/YQ``WҀFUa1PN߄Cd8(.PQڢŒqiHWux ʻF%"9}S 5t$' 9W5BG _/Ȩe׮VYLpk`A邹9H'B: *F-h+Wޞ% aI4^ץX1>1bG1gaÁ bnx"X-BMЬ) ~18$8I- bT[)x BbnGí8hlq6lAHNɪ?sI+&.Rq9w-^ߎWæ o@mJZ D.J1JՔ1yALwʚQ&ƓxSxO"wAx̸2$,2(/)\."Nʃ9m*٢.4-hl\#A>$>tTPހ7bm1e1Nbe͟#N/,TƙjLAX##pO8lStIbFV_qLJU0Pt۲X]rd`Wz€˒7IV)HtσJ/"H͏UZ/[Id֡ IuߐEdjԁ{1M{%t<qڴ>q)=ò&)hLy}ӎ;~vK%$PP+&ܘ+[D`r7Eo!GI.pުOЋe\#%ݢާܘߕm %#X ΍t rpb"yϮ 'jw6Rn|Uη7"Bк6m<4 AbIO.zg+&/nJ\${_BSJ#P["L@*L022̃ v ep< IW_%[[#Latu*ߚs4ĦZ0 ӄϮh$U5TpsO>)2=Z[>WzzR,82ysqbCN@Y,$hq8:]3Gz$уd#- "AGwхd;]xOޛ[?FΗ6a~lD=ȔIJm_~\]bA>)QFGj<`W4YOWF-OU{K@L* S5=٭WUHlp.LE>DO㸋̋Ə#W^c`~ ipWkR1b̩baTJŞi+|ek[| ҠTo`s51M޵"vPꇳa7?Osc!Ș[5N$h @;x91fK ҧGS\x&b\m7k}sd!< Ԅ?8/8 KPua2nM]g]{H>R-|F]6ppܦ_$p8fUT]}&z\@~{` 1YһvD0G7(\ E<P: G~{(+O+V7iSb8s ݷe2:գ=Uu^/'j?@l29*m?.Y,DdiVJTs0i5s6PChtuvxVI /<$yFȯyE׫U.1 H%4r_NL/G7ދ\SH5Xt.b=w"ĿA¿yp=:? _%ٟ9NļF_вHjs_E p #$[y ŽGh߱E?ʺDpWaI2Аäx^Pث+_iHG:}ȋ"9F05Wo66f1bh/<+VLɂ`.8XWŏzS0{i?J1U!q"p3BAu! 1R+$aO3KK fd-b=E&?fx=:b`_ȷ[*kXRU|p T}I &=X ߋs'HahL :1WB.5^z)fvn5r g}T/NYpebq CuKУQ0Qls{F'{2{/k|.ꯉS A57́{L`)GO`0X/ZL|iUv~`X2?NmKs&K+jd*`F *6} HpPi}3BΐBK 0o2nxwSoxQ"BSu_m꼋/U!@8މ" *ce.WgJ]kLV-p^[0 9a, Zkƛ%K89= XCCWozwz0xMnq/ik+D~0|E;:C"ۯes%e9+(PAϽ \NX?>|%VTʕWoZ/ t*wZl`8x,lm֗Ӏq+z8oW$>%nL^ UýzdW0z B <D \җ vPEQi!ϙt]?bzM F@Ħ3"r-*QG rf,\ S7 "$ ,%7莟'0븏ƿ7qZrEq߂+֭#ԩs ׼%`!"!e7OBnhI!,X~#YW~KqpQM#8- >W6?ESeSNfgV"i ~ ,iHPh-.J ٕmB[7T]8g,>:X|=u+hEɴ b;rd xqY&^AO";Y4ӴB\u[.Tݻ`XI>KOų'vEO_*s-"ksM'Stc! 9^#A~;jcY ]x?j^bvuvR<ҷ 0}󃥥8qkP3;.Q-Q`zz}F7`g[A%WJڛ]\j% H)"({X_rJGSjoq{JQU'Ϛjk=K"9#(J1\;j2jib aW7cB"|[I9WgUD+Dl6Zot5MD .xg yvDit#<:JXK;.Tuu5Ngyn46 cӪL)r _2t`7ܘ~.M*2MgFEk+iGN>?kPV?,E܋ΝgqlE5Ubc{ҳ3X_U{脱:N\> wBgjhA$G-.D\g@IV%͆gwE[{|J#{5is7^\Sy: =Y `ftZeTD_R)8Hg )r{Tj~$C^S0̞N>x/Zq=WqtDR$;Ԏ z:FCx·OO~i͕jzwl*RNlڃ{N1>[WyWzq{'x>ӪSN@ZfEQ=baDT1ʐ"t+Y~jIܻXHПqs91G|HJ01тҳt/bN+] p5UO-N Yٟtu'* ~NA89=pg3:e:5?JĊF1#kC ^Z^#OT\k FNڼ"qr[81 aQkWNsP@P0촣~f0,7L-4DaIb.ߦ$E2^t p4[/#lm/q(RZPQ&T~6nL<u/zV CvuK_o5\Y}’JX.t)>sbO?#u>-'H\,<, 03a<}_X[l=ih?b@~^9u\w)2>%h8p8baI4@"k %;- R$s 0"gtܷ]^|q?>a<~2UXVp3 KNLZ^gWd,OU L! @noJý+g_cJX6n| -ڬФm06#aɲar.0Dio҆9$MvGP &}M/ȼ90 .zKt-LJ"^Z %g ,ÈQX?m~~dBXK鼊>Y8J>CZTS(4VmC0~erd| \58X7Չ,u?&ۋˤi>pAUT3U/O›to=10;G?(.aȩ’C$+ܯ0p˜h3_ kp4aIuIGŢins6B>WkmH3맍iC[d3rj6u V~C2>cT4}9iٴYoi*ķA10f>c*.}wxy{?\ެzR ߊ%=sΔ ~!O&Doٞ"x'== ㉻EUUmaG[ϲ1 p K2+:HI} -̕Het<Կl+!]@0=\%0a險g?;T`Dc`E (aILK QYaXX/WnO,rd% V8*/(!\Sj½6+ 8LN{Rkph RU>s'㱱Zx 8wki͹ KbMM]MV)OSjG} )=]x!ϯTu~\ YM9 ̲R Ò|x} )َՌ|Xz0T/0=X%G`FP_TV)`Rb;~5̜a͖i|nm'eSQ݋:O>Aϲq ~ ʒS\DaI1;ίWTO`M g~ֆʒi_t3%:s&B?۹mD=}F4kbA̛seX20?#GU ,F#{h~&K_ZzĊjtSdy<POhT~dƹ+LVss7PGbmIENDB`testthat/man/reexports.Rd0000644000176200001440000000061213164532741015214 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \docType{import} \name{reexports} \alias{reexports} \alias{\%>\%} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{magrittr}{\code{\link[magrittr]{\%>\%}}} }} testthat/man/expect_length.Rd0000644000176200001440000000201513564563701016015 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-length.R \name{expect_length} \alias{expect_length} \title{Expectation: does a vector have the specified length?} \usage{ expect_length(object, n) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{n}{Expected length.} } \description{ Expectation: does a vector have the specified length? } \examples{ expect_length(1, 1) expect_length(1:10, 10) \dontrun{ expect_length(1:10, 1) } } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/takes_less_than.Rd0000644000176200001440000000061613164532741016334 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{takes_less_than} \alias{takes_less_than} \title{Expectation: does expression take less than a fixed amount of time to run?} \usage{ takes_less_than(amount) } \arguments{ \item{amount}{maximum duration in seconds} } \description{ This is useful for performance regression testing. } \keyword{internal} testthat/man/compare_state.Rd0000644000176200001440000000070713164532741016014 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{compare_state} \alias{compare_state} \title{Compare two directory states.} \usage{ compare_state(old, new) } \arguments{ \item{old}{previous state} \item{new}{current state} } \value{ list containing number of changes and files which have been \code{added}, \code{deleted} and \code{modified} } \description{ Compare two directory states. } \keyword{internal} testthat/man/find_test_scripts.Rd0000644000176200001440000000112013164532741016702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{find_test_scripts} \alias{find_test_scripts} \title{Find the test files.} \usage{ find_test_scripts(path, filter = NULL, invert = FALSE, ...) } \arguments{ \item{path}{path to tests} \item{filter}{cf \code{\link[=test_dir]{test_dir()}}} \item{invert}{If \sQuote{TRUE} return files which do \emph{not} match.} \item{...}{Additional arguments passed to \code{\link[=grepl]{grepl()}} to control filtering.} } \value{ the test file paths } \description{ Find the test files. } \keyword{internal} testthat/man/expectation.Rd0000644000176200001440000000263513564563701015517 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectation.R \name{expectation} \alias{expectation} \alias{new_expectation} \alias{exp_signal} \alias{is.expectation} \title{Construct an expectation object} \usage{ expectation(type, message, srcref = NULL, trace = NULL) new_expectation(type, message, ..., srcref = NULL, trace = NULL, .subclass = NULL) exp_signal(exp) is.expectation(x) } \arguments{ \item{type}{Expectation type. Must be one of "success", "failure", "error", "skip", "warning".} \item{message}{Message describing test failure} \item{srcref}{Optional \code{srcref} giving location of test.} \item{trace}{An optional backtrace created by \code{\link[rlang:trace_back]{rlang::trace_back()}}. When supplied, the expectation is displayed with the backtrace.} \item{...}{Additional attributes for the expectation object.} \item{.subclass}{An optional subclass for the expectation object.} \item{exp}{An expectation object, as created by \code{\link[=new_expectation]{new_expectation()}}.} \item{x}{object to test for class membership} } \description{ For advanced use only. If you are creating your own expectation, you should call \code{\link[=expect]{expect()}} instead. See \code{vignette("custom-expectation")} for more details. } \details{ Create an expectation with \code{expectation()} or \code{new_expectation()} and signal it with \code{exp_signal()}. } \keyword{internal} testthat/man/test_examples.Rd0000644000176200001440000000166613521025554016044 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-example.R \name{test_examples} \alias{test_examples} \alias{test_rd} \alias{test_example} \title{Test package examples} \usage{ test_examples(path = "../..") test_rd(rd, title = attr(rd, "Rdfile")) test_example(path, title = path) } \arguments{ \item{path}{For \code{test_examples()}, path to directory containing Rd files. For \code{test_example()}, path to a single Rd file. Remember the working directory for tests is \code{tests/testthat}.} \item{rd}{A parsed Rd object, obtained from \code{\link[tools:Rd_db]{tools::Rd_db()}} or otherwise.} \item{title}{Test title to use} } \description{ These helper functions make it easier to test the examples in a package. Each example counts as one test, and it succeeds if the code runs without an error. Generally, this is redundant with R CMD check, and is not recommended in routine practice. } \keyword{internal} testthat/man/expect_silent.Rd0000644000176200001440000000207213564563701016035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-silent.R \name{expect_silent} \alias{expect_silent} \title{Expectation: is the code silent?} \usage{ expect_silent(object) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} } \value{ The first argument, invisibly. } \description{ Checks that the code produces no output, messages, or warnings. } \examples{ expect_silent("123") f <- function() { message("Hi!") warning("Hey!!") print("OY!!!") } \dontrun{ expect_silent(f()) } } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/dir_state.Rd0000644000176200001440000000073713164532741015147 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{dir_state} \alias{dir_state} \title{Capture the state of a directory.} \usage{ dir_state(path, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{path to directory} \item{pattern}{regular expression with which to filter files} \item{hash}{use hash (slow but accurate) or time stamp (fast but less accurate)} } \description{ Capture the state of a directory. } \keyword{internal} testthat/man/test_path.Rd0000644000176200001440000000067513164532741015165 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-path.R \name{test_path} \alias{test_path} \title{Locate file in testing directory.} \usage{ test_path(...) } \arguments{ \item{...}{Character vectors giving path component.} } \value{ A character vector giving the path. } \description{ This function is designed to work both interatively and during tests, locating files in the \code{tests/testthat} directory } testthat/man/expect.Rd0000644000176200001440000000343013564563701014456 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectation.R \name{expect} \alias{expect} \title{The building block of all \code{expect_} functions} \usage{ expect(ok, failure_message, info = NULL, srcref = NULL, trace = NULL) } \arguments{ \item{ok}{\code{TRUE} or \code{FALSE} indicating if the expectation was successful.} \item{failure_message}{Message to show if the expectation failed.} \item{info}{Character vector continuing additional information. Included for backward compatibility only and new expectations should not use it.} \item{srcref}{Location of the failure. Should only needed to be explicitly supplied when you need to forward a srcref captured elsewhere.} \item{trace}{An optional backtrace created by \code{\link[rlang:trace_back]{rlang::trace_back()}}. When supplied, the expectation is displayed with the backtrace.} } \value{ An expectation object. Signals the expectation condition with a \code{continue_test} restart. } \description{ Call \code{expect()} when writing your own expectations. See \code{vignette("custom-expectation")} for details. } \details{ While \code{expect()} creates and signals an expectation in one go, \code{exp_signal()} separately signals an expectation that you have manually created with \code{\link[=new_expectation]{new_expectation()}}. Expectations are signalled with the following protocol: \itemize{ \item If the expectation is a failure or an error, it is signalled with \code{\link[base:stop]{base::stop()}}. Otherwise, it is signalled with \code{\link[base:signalCondition]{base::signalCondition()}}. \item The \code{continue_test} restart is registered. When invoked, failing expectations are ignored and normal control flow is resumed to run the other tests. } } \seealso{ \code{\link[=exp_signal]{exp_signal()}} } testthat/man/skip.Rd0000644000176200001440000000705013564563701014136 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/skip.R \name{skip} \alias{skip} \alias{skip_if_not} \alias{skip_if} \alias{skip_if_not_installed} \alias{skip_if_offline} \alias{skip_on_cran} \alias{skip_on_os} \alias{skip_on_travis} \alias{skip_on_appveyor} \alias{skip_on_ci} \alias{skip_on_covr} \alias{skip_on_bioc} \alias{skip_if_translated} \title{Skip a test.} \usage{ skip(message) skip_if_not(condition, message = deparse(substitute(condition))) skip_if(condition, message = NULL) skip_if_not_installed(pkg, minimum_version = NULL) skip_if_offline(host = "r-project.org") skip_on_cran() skip_on_os(os) skip_on_travis() skip_on_appveyor() skip_on_ci() skip_on_covr() skip_on_bioc() skip_if_translated(msgid = "'\%s' not found") } \arguments{ \item{message}{A message describing why the test was skipped.} \item{condition}{Boolean condition to check. \code{skip_if_not()} will skip if \code{FALSE}, \code{skip_if()} will skip if \code{TRUE}.} \item{pkg}{Name of package to check for} \item{minimum_version}{Minimum required version for the package} \item{host}{A string with a hostname to lookup} \item{os}{Character vector of system names. Supported values are \code{"windows"}, \code{"mac"}, \code{"linux"} and \code{"solaris"}.} \item{msgid}{R message identifier used to check for translation: the default uses a message included in most translation packs. See the complete list in \href{https://github.com/wch/r-source/blob/master/src/library/base/po/R-base.pot}{R-base.pot}.} } \description{ This function allows you to skip a test if it's not currently available. This will produce an informative message, but will not cause the test suite to fail. } \details{ \code{skip*} functions are intended for use within \code{\link[=test_that]{test_that()}} blocks. All expectations following the \code{skip*} statement within the same \code{test_that} block will be skipped. Test summaries that report skip counts are reporting how many \code{test_that} blocks triggered a \code{skip*} statement, not how many expectations were skipped. } \section{Helpers}{ \code{skip_if_not()} works like \code{\link[=stopifnot]{stopifnot()}}, generating a message automatically based on the first argument. \code{skip_if_offline()} skips tests if an internet connection is not available using \code{\link[curl:nslookup]{curl::nslookup()}}. \code{skip_on_cran()} skips tests on CRAN, using the \code{NOT_CRAN} environment variable set by devtools. \code{skip_on_travis()} skips tests on Travis CI by inspecting the \code{TRAVIS} environment variable. \code{skip_on_appveyor()} skips tests on AppVeyor by inspecting the \code{APPVEYOR} environment variable. \code{skip_on_ci()} skips tests on continuous integration systems by inspecting the \code{CI} environment variable. \code{skip_on_covr()} skips tests when covr is running by inspecting the \code{R_COVR} environment variable \code{skip_on_bioc()} skips tests on Bioconductor by inspecting the \code{BBS_HOME} environment variable. \code{skip_if_not_installed()} skips a tests if a package is not installed or cannot be loaded (useful for suggested packages). It loads the package as a side effect, because the package is likely to be used anyway. } \examples{ if (FALSE) skip("No internet connection") ## The following are only meaningful when put in test files and ## run with `test_file`, `test_dir`, `test_check`, etc. test_that("skip example", { expect_equal(1, 1L) # this expectation runs skip('skip') expect_equal(1, 2) # this one skipped expect_equal(1, 3) # this one is also skipped }) } testthat/man/make_expectation.Rd0000644000176200001440000000121313173076020016470 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make-expectation.R \name{make_expectation} \alias{make_expectation} \title{Make an equality test.} \usage{ make_expectation(x, expectation = "equals") } \arguments{ \item{x}{a vector of values} \item{expectation}{the type of equality you want to test for (\code{"equals"}, \code{"is_equivalent_to"}, \code{"is_identical_to"})} } \description{ This a convenience function to make a expectation that checks that input stays the same. } \examples{ x <- 1:10 make_expectation(x) make_expectation(mtcars$mpg) df <- data.frame(x = 2) make_expectation(df) } \keyword{internal} testthat/man/logical-expectations.Rd0000644000176200001440000000335413564563701017311 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-logical.R \name{logical-expectations} \alias{logical-expectations} \alias{expect_true} \alias{expect_false} \title{Expectation: is the object true/false?} \usage{ expect_true(object, info = NULL, label = NULL) expect_false(object, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ These are fall-back expectations that you can use when none of the other more specific expectations apply. The disadvantage is that you may get a less informative error message. } \details{ Attributes are ignored. } \examples{ expect_true(2 == 2) # Failed expectations will throw an error \dontrun{ expect_true(2 != 2) } expect_true(!(2 != 2)) # or better: expect_false(2 != 2) a <- 1:3 expect_true(length(a) == 3) # but better to use more specific expectation, if available expect_equal(length(a), 3) } \seealso{ \code{\link[=is_false]{is_false()}} for complement Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}} } \concept{expectations} testthat/man/SilentReporter.Rd0000644000176200001440000000200213564523315016136 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-silent.R \docType{data} \name{SilentReporter} \alias{SilentReporter} \title{Test reporter: gather all errors silently.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ SilentReporter } \description{ This reporter quietly runs all tests, simply gathering all expectations. This is helpful for programmatically inspecting errors after a test run. You can retrieve the results with the \code{expectations()} method. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/try_again.Rd0000644000176200001440000000106113173076020015126 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/try-again.R \name{try_again} \alias{try_again} \title{Try evaluating an expressing multiple times until it succeeds.} \usage{ try_again(times, code) } \arguments{ \item{times}{Maximum number of attempts.} \item{code}{Code to evaluate} } \description{ Try evaluating an expressing multiple times until it succeeds. } \examples{ third_try <- local({ i <- 3 function() { i <<- i - 1 if (i > 0) fail(paste0("i is ", i)) } }) try_again(3, third_try()) } \keyword{internal} testthat/man/teardown.Rd0000644000176200001440000000137313165405551015010 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/teardown.R \name{teardown} \alias{teardown} \alias{setup} \title{Run code on setup/teardown} \usage{ teardown(code, env = parent.frame()) setup(code, env = parent.frame()) } \arguments{ \item{code}{Code to evaluate} \item{env}{Environment in which code will be evaluted. For expert use only.} } \description{ Code in a \code{setup()} block is run immediately in a clean environment. Code in a \code{teardown()} block is run upon completion of a test file, even if it exits with an error. Multiple calls to \code{teardown()} will be executed in the order they were created. } \examples{ \dontrun{ tmp <- tempfile() setup(writeLines(tmp, "some test data")) teardown(unlink(tmp)) } } testthat/man/expect_cpp_tests_pass.Rd0000644000176200001440000000113413564563701017567 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-compiled-code.R \name{expect_cpp_tests_pass} \alias{expect_cpp_tests_pass} \title{Expectation: do C++ tests past?} \usage{ expect_cpp_tests_pass(package) } \arguments{ \item{package}{The name of the package to test.} } \description{ Test compiled code in the package \code{package}. A call to this function will automatically be generated for you in \code{tests/testthat/test-cpp.R} after calling \code{\link[=use_catch]{use_catch()}}; you should not need to manually call this expectation yourself. } \keyword{internal} testthat/man/ListReporter.Rd0000644000176200001440000000174513564523315015630 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-list.R \docType{data} \name{ListReporter} \alias{ListReporter} \title{List reporter: gather all test results along with elapsed time and file information.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ ListReporter } \description{ This reporter gathers all results, adding additional information such as test elapsed time, and test filename if available. Very useful for reporting. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/Reporter.Rd0000644000176200001440000000326313564563701014774 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter.R \docType{data} \name{Reporter} \alias{Reporter} \title{Manage test reporting} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ Reporter } \description{ The job of a reporter is to aggregate the results from files, tests, and expectations and display them in an informative way. Every testtthat function that runs multiple tests provides a \code{reporter} argument which you can use to override the default (which is selected by \code{\link[=default_reporter]{default_reporter()}}). } \details{ You only need to use this \code{Reporter} object directly if you are creating a new reporter. Currently, creating new Reporters is undocumented, so if you want to create your own, you'll need to make sure that you're familiar with \href{https://adv-r.hadley.nz/R6.html}{R6} and then need read the source code for a few. } \examples{ path <- testthat_example("success") # The default reporter - doesn't display well in examples because # it's designed to work in an interactive console. test_file(path) # Override the default by supplying the name of a reporter test_file(path, reporter = "minimal") } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{internal} testthat/man/TapReporter.Rd0000644000176200001440000000173713564523315015442 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-tap.R \docType{data} \name{TapReporter} \alias{TapReporter} \title{Test reporter: TAP format.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ TapReporter } \description{ This reporter will output results in the Test Anything Protocol (TAP), a simple text-based interface between testing modules in a test harness. For more information about TAP, see http://testanything.org } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{RstudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/expect_error.Rd0000644000176200001440000001051113564563701015665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-condition.R \name{expect_error} \alias{expect_error} \alias{expect_condition} \title{Expectation: does code throw error or other condition?} \usage{ expect_error(object, regexp = NULL, class = NULL, ..., info = NULL, label = NULL) expect_condition(object, regexp = NULL, class = NULL, ..., info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against. \itemize{ \item A character vector giving a regular expression that must match the error message. \item If \code{NULL}, the default, asserts that there should be a error, but doesn't test for a specific value. \item If \code{NA}, asserts that there should be no errors. }} \item{class}{Instead of supplying a regular expression, you can also supply a class name. This is useful for "classed" conditions.} \item{...}{Arguments passed on to \code{expect_match} \describe{ \item{all}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE)} \item{perl}{logical. Should Perl-compatible regexps be used?} \item{fixed}{logical. If \code{TRUE}, \code{pattern} is a string to be matched as is. Overrides all conflicting arguments.} }} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ If \code{regexp = NA}, the value of the first argument; otherwise the captured condition. } \description{ \code{expect_error()} and \code{expect_condition()} check that code throws an error or condition with a message that matches \code{regexp}, or a class that inherits from \code{class}. See below for more details. } \section{Testing \code{message} vs \code{class}}{ When checking that code generates an error, it's important to check that the error is the one you expect. There are two ways to do this. The first way is the simplest: you just provide a \code{regexp} that match some fragment of the error message. This is easy, but fragile, because the test will fail if the error message changes (even if its the same error). A more robust way is to test for the class of the error, if it has one. You can learn more about custom conditions at \url{https://adv-r.hadley.nz/conditions.html#custom-conditions}, but in short, errors are S3 classes and you can generate a custom class and check for it using \code{class} instead of \code{regexp}. Because this is a more reliable check, you \code{expect_error()} will warn if the error has a custom class but you are testing the message. Eliminate the warning by using \code{class} instead of \code{regexp}. Alternatively, if you think the warning is a false positive, use \code{class = "error"} to suppress it for any input. If you are using \code{expect_error()} to check that an error message is formatted in such a way that it makes sense to a human, we now recommend using \code{\link[=verify_output]{verify_output()}} instead. } \examples{ f <- function() stop("My error!") expect_error(f()) expect_error(f(), "My error!") # You can use the arguments of grepl to control the matching expect_error(f(), "my error!", ignore.case = TRUE) # If you are working with classed conditions, it's better to test for # the class name, rather than the error message (which may change over time) custom_err <- function(var) { rlang::abort("A special error", var = var, .subclass = "testthat_special") } expect_error(custom_err("a"), class = "testthat_special") # Note that `expect_error()` returns the error object so you can test # its components if needed err <- expect_error(custom_err("a"), class = "testthat_special") expect_equal(err$var, "a") } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_output}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/RstudioReporter.Rd0000644000176200001440000000157113564523315016343 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-rstudio.R \docType{data} \name{RstudioReporter} \alias{RstudioReporter} \title{Test reporter: RStudio} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ RstudioReporter } \description{ This reporter is designed for output to RStudio. It produces results in any easily parsed form. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{datasets} testthat/man/verify_output.Rd0000644000176200001440000000563613564563701016124 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/verify-output.R \name{verify_output} \alias{verify_output} \title{Verify output} \usage{ verify_output(path, code, width = 80, crayon = FALSE, unicode = FALSE, env = caller_env()) } \arguments{ \item{path}{Path to record results. This should usually be a call to \code{\link[=test_path]{test_path()}} to ensures that same path is used when run interactively (when the working directory is typically the project root), and when run as an autmated test (when the working directory will be \code{tests/testthat}).} \item{code}{Code to execute. This will usually be a multiline expression contained within \code{{}} (similarly to \code{test_that()} calls).} \item{width}{Width of console output} \item{crayon}{Enable crayon package colouring?} \item{unicode}{Enable cli package UTF-8 symbols? If you set this to \code{TRUE}, call \code{skip_if(!cli::is_utf8_output())} to disable the test on your CI platforms that don't support UTF-8 (e.g. Windows).} \item{env}{The environment to evaluate \code{code} in.} } \description{ This is a regression test that records interwoven code and output into a file, in a similar way to kniting an \code{.Rmd} (but see caveats below). \code{verify_output()} designed particularly for testing print methods and error messages, where the primary goal is to ensure that the output is helpful to a human. Obviously, you can't test that with code, so the best you can do is make the results explicit by saving them to text file. This makes the output easy to see in code reviews, and ensures that you don't change the output accidentally. \code{verify_output()} is designed to be used with git: to see what has changed from the previous run, you'll need to use \code{git diff} or similar. } \section{Syntax}{ \code{verify_output()} can only capture the abstract syntax tree, losing all whitespace and comments. To mildy offset this limitation: \itemize{ \item Strings are converted to R comments in the output. \item Strings starting with \code{# } are converted to headers in the output. } } \section{CRAN}{ On CRAN, \code{verify_output()} will never fail, even if the output changes. This avoids false positives because tests of print methods and error messages are often fragile due to implicit dependencies on other packages, and failure does not imply incorrect computation, just a change in presentation. } \examples{ # The first argument would usually be `test_path("informative-name.txt"`) # but that is not permitted in examples path <- tempfile() verify_output(path, { head(mtcars) log(-10) "a" * 3 }) writeLines(readLines(path)) # Use strings to create comments in the output verify_output(tempfile(), { "Print method" head(mtcars) "Warning" log(-10) "Error" "a" * 3 }) # Use strings starting with # to create headings verify_output(tempfile(), { "# Base functions" head(mtcars) log(-10) "a" * 3 }) } testthat/man/context.Rd0000644000176200001440000000140113456034771014646 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/context.R \name{context} \alias{context} \title{Describe the context of a set of tests.} \usage{ context(desc) } \arguments{ \item{desc}{description of context. Should start with a capital letter.} } \description{ Use of \code{context()} is no longer recommend. Instead omit it, and messages will use the name of the file instead. This ensures that the context and test file name are always in sync. } \details{ A context defines a set of tests that test related functionality. Usually you will have one context per file, but you may have multiple contexts in a single file if you so choose. } \examples{ context("String processing") context("Remote procedure calls") } \keyword{internal} testthat/man/expect_output.Rd0000644000176200001440000000477213564563701016110 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-output.R \name{expect_output} \alias{expect_output} \title{Expectation: does code print output to the console?} \usage{ expect_output(object, regexp = NULL, ..., info = NULL, label = NULL, width = 80) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against. \itemize{ \item A character vector giving a regular expression that must match the output. \item If \code{NULL}, the default, asserts that there should output, but doesn't check for a specific value. \item If \code{NA}, asserts that there should be no output. }} \item{...}{Arguments passed on to \code{expect_match} \describe{ \item{all}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE)} \item{perl}{logical. Should Perl-compatible regexps be used?} \item{fixed}{logical. If \code{TRUE}, \code{pattern} is a string to be matched as is. Overrides all conflicting arguments.} }} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} } \value{ The first argument, invisibly. } \description{ Test for output produced by \code{print()} or \code{cat()}. This is best used for very simple output; for more complex cases use \code{\link[=verify_output]{verify_output()}}. } \examples{ str(mtcars) expect_output(str(mtcars), "32 obs") expect_output(str(mtcars), "11 variables") # You can use the arguments of grepl to control the matching expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) expect_output(str(mtcars), "$ mpg", fixed = TRUE) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}}, \code{\link{expect_length}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_named}}, \code{\link{expect_null}}, \code{\link{expect_silent}}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_invisible.Rd0000644000176200001440000000176313456034771016531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-invisible.R \name{expect_invisible} \alias{expect_invisible} \alias{expect_visible} \title{Expectation: does expression return visibily or invisibly?} \usage{ expect_invisible(call, label = NULL) expect_visible(call, label = NULL) } \arguments{ \item{call}{A function call.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ The evaluated \code{call}, invisibly. } \description{ Use this to test whether a function returns a visible or invisible output. Typically you'll use this to check that functions called primarily for their side-effects return their data argument invisibly. } \examples{ expect_invisible(x <- 10) expect_visible(x) # Typically you'll assign the result of the expectation so you can # also check that the value is as you expect. greet <- function(name) { message("Hi ", name) invisible(name) } out <- expect_invisible(greet("Hadley")) expect_equal(out, "Hadley") } testthat/man/compare.Rd0000644000176200001440000000474413564563701014625 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.R, R/compare-character.R, % R/compare-numeric.R, R/compare-time.R \name{compare} \alias{compare} \alias{compare.default} \alias{compare.character} \alias{compare.numeric} \alias{testthat_tolerance} \alias{compare.POSIXt} \title{Provide human-readable comparison of two objects} \usage{ compare(x, y, ...) \method{compare}{default}(x, y, ..., max_diffs = 9) \method{compare}{character}(x, y, check.attributes = TRUE, ..., max_diffs = 5, max_lines = 5, width = cli::console_width()) \method{compare}{numeric}(x, y, tolerance = testthat_tolerance(), check.attributes = TRUE, ..., max_diffs = 9) testthat_tolerance(x) \method{compare}{POSIXt}(x, y, tolerance = 0.001, ..., max_diffs = 9) } \arguments{ \item{x, y}{Objects to compare} \item{...}{Additional arguments used to control specifics of comparison} \item{max_diffs}{Maximum number of differences to show} \item{check.attributes}{If \code{TRUE}, also checks values of attributes.} \item{max_lines}{Maximum number of lines to show from each difference} \item{width}{Width of output device} \item{tolerance}{Numerical tolerance: any differences smaller than this value will be ignored. The default tolerance is \code{sqrt(.Machine$double.eps)}, unless long doubles are not available, in which case the test is skipped.} } \description{ \code{compare} is similar to \code{\link[base:all.equal]{base::all.equal()}}, but shows you examples of where the failures occured. } \examples{ # Character ----------------------------------------------------------------- x <- c("abc", "def", "jih") compare(x, x) y <- paste0(x, "y") compare(x, y) compare(letters, paste0(letters, "-")) x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " compare(x, y) compare(c(x, x), c(y, y)) # Numeric ------------------------------------------------------------------- x <- y <- runif(100) y[sample(100, 10)] <- 5 compare(x, y) x <- y <- 1:10 x[5] <- NA x[6] <- 6.5 compare(x, y) # Compare ignores minor numeric differences in the same way # as all.equal. compare(x, x + 1e-9) } \keyword{internal} testthat/man/testthat_results.Rd0000644000176200001440000000111413173076020016571 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-list.R \name{testthat_results} \alias{testthat_results} \title{Create a \code{testthat_results} object from the test results as stored in the ListReporter results field.} \usage{ testthat_results(results) } \arguments{ \item{results}{a list as stored in ListReporter} } \value{ its list argument as a \code{testthat_results} object } \description{ Create a \code{testthat_results} object from the test results as stored in the ListReporter results field. } \seealso{ ListReporter } \keyword{internal} testthat/man/describe.Rd0000644000176200001440000000403513164532741014744 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe.R \name{describe} \alias{describe} \title{describe: a BDD testing language} \usage{ describe(description, code) } \arguments{ \item{description}{description of the feature} \item{code}{test code containing the specs} } \description{ A simple BDD DSL for writing tests. The language is similiar to RSpec for Ruby or Mocha for JavaScript. BDD tests read like sentences and it should thus be easier to understand what the specification of a function/component is. } \details{ Tests using the \code{describe} syntax not only verify the tested code, but also document its intended behaviour. Each \code{describe} block specifies a larger component or function and contains a set of specifications. A specification is definied by an \code{it} block. Each \code{it} block functions as a test and is evaluated in its own environment. You can also have nested \code{describe} blocks. This test syntax helps to test the intented behaviour of your code. For example: you want to write a new function for your package. Try to describe the specification first using \code{describe}, before your write any code. After that, you start to implement the tests for each specification (i.e. the \code{it} block). Use \code{describe} to verify that you implement the right things and use \code{\link[=test_that]{test_that()}} to ensure you do the things right. } \examples{ describe("matrix()", { it("can be multiplied by a scalar", { m1 <- matrix(1:4, 2, 2) m2 <- m1 * 2 expect_equivalent(matrix(1:4 * 2, 2, 2), m2) }) it("can have not yet tested specs") }) # Nested specs: ## code addition <- function(a, b) a + b division <- function(a, b) a / b ## specs describe("math library", { describe("addition()", { it("can add two numbers", { expect_equivalent(1 + 1, addition(1, 1)) }) }) describe("division()", { it("can divide two numbers", { expect_equivalent(10 / 2, division(10, 2)) }) it("can handle division by 0") #not yet implemented }) }) } testthat/man/JunitReporter.Rd0000644000176200001440000000217513564523315016004 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-junit.R \docType{data} \name{JunitReporter} \alias{JunitReporter} \title{Test reporter: summary of errors in jUnit XML format.} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ JunitReporter } \description{ This reporter includes detailed results about each test and summaries, written to a file (or stdout) in jUnit XML format. This can be read by the Jenkins Continuous Integration System to report on a dashboard etc. Requires the \emph{xml2} package. } \details{ To fit into the jUnit structure, context() becomes the \code{} name as well as the base of the \code{ classname}. The test_that() name becomes the rest of the \code{ classname}. The deparsed expect_that() call becomes the \code{} name. On failure, the message goes into the \code{} node message argument (first line only) and into its text content (full message). Execution time and some other details are also recorded. References for the jUnit XML format: \url{http://llg.cubic.org/docs/junit/} } \keyword{datasets} testthat/man/evaluate_promise.Rd0000644000176200001440000000136013173076020016517 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/evaluate-promise.R \name{evaluate_promise} \alias{evaluate_promise} \title{Evaluate a promise, capturing all types of output.} \usage{ evaluate_promise(code, print = FALSE) } \arguments{ \item{code}{Code to evaluate.} } \value{ A list containing \item{result}{The result of the function} \item{output}{A string containing all the output from the function} \item{warnings}{A character vector containing the text from each warning} \item{messages}{A character vector containing the text from each message} } \description{ Evaluate a promise, capturing all types of output. } \examples{ evaluate_promise({ print("1") message("2") warning("3") 4 }) } \keyword{internal} testthat/DESCRIPTION0000644000176200001440000000516413570757323013642 0ustar liggesusersPackage: testthat Title: Unit Testing for R Version: 2.3.1 Authors@R: c( person("Hadley", "Wickham", , "hadley@rstudio.com", c("aut", "cre")), person("RStudio", role = c("cph", "fnd")), person("R Core team", role = "ctb", comment = "Implementation of utils::recover()") ) Description: Software testing is important, but, in part because it is frustrating and boring, many of us avoid it. 'testthat' is a testing framework for R that is easy to learn and use, and integrates with your existing 'workflow'. License: MIT + file LICENSE URL: http://testthat.r-lib.org, https://github.com/r-lib/testthat BugReports: https://github.com/r-lib/testthat/issues Depends: R (>= 3.1) Imports: cli, crayon (>= 1.3.4), digest, ellipsis, evaluate, magrittr, methods, pkgload, praise, R6 (>= 2.2.0), rlang (>= 0.4.1), withr (>= 2.0.0) Suggests: covr, curl (>= 0.9.5), devtools, knitr, rmarkdown, usethis, vctrs (>= 0.1.0), xml2 VignetteBuilder: knitr Encoding: UTF-8 RoxygenNote: 6.1.1 Collate: 'auto-test.R' 'capture-condition.R' 'capture-output.R' 'colour-text.R' 'compare.R' 'compare-character.R' 'compare-numeric.R' 'compare-time.R' 'context.R' 'describe.R' 'evaluate-promise.R' 'example.R' 'expect-comparison.R' 'expect-condition.R' 'expect-equality.R' 'expect-inheritance.R' 'expect-invisible.R' 'expect-known.R' 'expect-length.R' 'expect-logical.R' 'expect-messages.R' 'expect-named.R' 'expect-null.R' 'expect-output.R' 'reporter.R' 'expect-self-test.R' 'expect-setequal.R' 'expect-silent.R' 'expect-that.R' 'expect-vector.R' 'expectation.R' 'expectations-matches.R' 'make-expectation.R' 'mock.R' 'old-school.R' 'praise.R' 'quasi-label.R' 'recover.R' 'reporter-check.R' 'reporter-debug.R' 'reporter-fail.R' 'reporter-junit.R' 'reporter-list.R' 'reporter-location.R' 'reporter-minimal.R' 'reporter-multi.R' 'stack.R' 'reporter-progress.R' 'reporter-rstudio.R' 'reporter-silent.R' 'reporter-stop.R' 'reporter-summary.R' 'reporter-tap.R' 'reporter-teamcity.R' 'reporter-zzz.R' 'skip.R' 'source.R' 'teardown.R' 'test-compiled-code.R' 'test-directory.R' 'test-example.R' 'test-files.R' 'test-path.R' 'test-that.R' 'try-again.R' 'utils-io.R' 'utils.R' 'verify-output.R' 'watcher.R' NeedsCompilation: yes Packaged: 2019-11-19 12:37:52 UTC; lionel Author: Hadley Wickham [aut, cre], RStudio [cph, fnd], R Core team [ctb] (Implementation of utils::recover()) Maintainer: Hadley Wickham Repository: CRAN Date/Publication: 2019-12-01 15:40:03 UTC testthat/build/0000755000176200001440000000000013564761037013225 5ustar liggesuserstestthat/build/vignette.rds0000644000176200001440000000032413564761037015563 0ustar liggesusersb```b`fcd`b2 1# 'K.-.M(HM.I, MAS% VM82JrsД`LaZ `aBÚnKjAj^ HvѴpxVaaqIY0AAn0Ez0?"Ht&${+%$Q/n]Etestthat/tests/0000755000176200001440000000000013564761040013262 5ustar liggesuserstestthat/tests/test-catch.R0000644000176200001440000000414213456034771015451 0ustar liggesuserslibrary(testthat) local({ # Disable test on Windows, pending devtools # compatibility with new toolchain isWindows <- Sys.info()[["sysname"]] == "Windows" if (isWindows) return() # Disable tests on Solaris, because we don't use Catch there. isSolaris <- Sys.info()[["sysname"]] == "SunOS" if (isSolaris) return() if (!requireNamespace("devtools", quietly = TRUE)) return() if (!requireNamespace("usethis", quietly = TRUE)) return() devel <- try(devtools::has_devel(), silent = TRUE) if (!isTRUE(devel)) return() quietly <- function(expr) { suppressMessages(capture_output(result <- expr)) result } perform_test <- function(pkgName, catchEnabled) { owd <- setwd(tempdir()) on.exit(setwd(owd), add = TRUE) pkgPath <- file.path(tempdir(), pkgName) libPath <- file.path(tempdir(), "rlib") if (!utils::file_test("-d", libPath)) dir.create(libPath) .libPaths(c(libPath, .libPaths())) on.exit({ unlink(pkgPath, recursive = TRUE) unlink(libPath, recursive = TRUE) }, add = TRUE) quietly(usethis::create_package(pkgPath)) quietly(testthat::use_catch(pkgPath)) cat("LinkingTo: testthat", file = file.path(pkgPath, "DESCRIPTION"), append = TRUE, sep = "\n") cat( sprintf("useDynLib(%s, .registration=TRUE)", pkgName), file = file.path(pkgPath, "NAMESPACE"), append = TRUE, sep = "\n" ) if (!catchEnabled) { makevarsPath <- file.path( pkgPath, "src", if (isWindows) "Makevars.win" else "Makevars" ) cat( "PKG_CPPFLAGS = -DTESTTHAT_DISABLED", file = makevarsPath, sep = "\n" ) } devtools::install(pkgPath, quick = TRUE, quiet = FALSE) library(pkgName, character.only = TRUE) stopifnot(.Call("run_testthat_tests", PACKAGE = pkgName)) devtools::unload(pkgName) } withr::with_envvar(c(R_TESTS = ''), perform_test("testthatclient1", TRUE)) withr::with_envvar(c(R_TESTS = ''), perform_test("testthatclient2", FALSE)) }) testthat/tests/testthat/0000755000176200001440000000000013570757322015127 5ustar liggesuserstestthat/tests/testthat/test-expect-null.R0000644000176200001440000000022713547563465020477 0ustar liggesuserstest_that("expect_null works", { expect_success(expect_null(NULL)) expect_failure(expect_null(1L)) expect_failure(expect_null(environment())) }) testthat/tests/testthat/test-source_dir.R0000644000176200001440000000104613547563465020375 0ustar liggesuserstest_that("source_dir()", { res <- source_dir("test_dir", pattern = "hello", chdir = TRUE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir(normalizePath("test_dir"), pattern = "hello", chdir = TRUE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir("test_dir", pattern = "hello", chdir = FALSE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir(normalizePath("test_dir"), pattern = "hello", chdir = FALSE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") }) testthat/tests/testthat/test-bare.R0000644000176200001440000000037613547563465017155 0ustar liggesusersexpect_equal(2, 2) expect_true(TRUE) expect_error(stop("!")) stopifnot( tryCatch( expect_true(TRUE), expectation_failure = function(e) FALSE ) ) stopifnot( tryCatch( expect_true(FALSE), expectation_failure = function(e) TRUE ) ) testthat/tests/testthat/test-expect-named.R0000644000176200001440000000105213547563465020606 0ustar liggesuserstest_that("expected_named verifies presence of names", { expect_success(expect_named(c(a = 1))) expect_failure(expect_named(1:10)) }) test_that("expected_named verifies actual of names", { expect_success(expect_named(c(a = 1), "a")) expect_failure(expect_named(c(a = 1), "b")) }) test_that("expected_named optionally ignores case", { expect_success(expect_named(c(a = 1), "A", ignore.case = TRUE)) }) test_that("expected_named optionally ignores order", { expect_success(expect_named(c(a = 1, b = 2), c("b", "a"), ignore.order = TRUE)) }) testthat/tests/testthat/test-expect-self-test.R0000644000176200001440000000124113547563465021430 0ustar liggesuserstest_that("fail always fails", { expect_failure(fail()) expect_failure(fail("abc"), "abc") }) test_that("succeed always succeeds", { expect_success(succeed()) }) test_that("expect_success errors if null", { expect_error(expect_success(NULL)) }) test_that("expect_success errors with msg", { expect_error(expect_success(stop("asdf")), 'asdf') }) test_that("expect_failure errors if null", { expect_error(expect_failure(NULL)) }) test_that("expect_failure errors if no failure", { expect_error(expect_failure(TRUE)) }) test_that("show_failure", { expect_null(show_failure(NULL)) expect_output(show_failure(expect_true(FALSE)), "FALSE isn't true.") }) testthat/tests/testthat/test-expect-match.R0000644000176200001440000000205113547563465020616 0ustar liggesuserstest_that("extra arguments to matches passed onto grepl", { expect_success(expect_match("te*st", "e*", fixed = TRUE)) expect_success(expect_match("test", "TEST", ignore.case = TRUE)) }) test_that("special regex characters are escaped in output", { error <- tryCatch(expect_match("f() test", "f() test"), expectation = function(e) e$message) expect_equal(error, "\"f\\(\\) test\" does not match \"f() test\".\nActual value: \"f\\(\\) test\"") }) test_that("correct reporting of expected label", { expect_failure(expect_match("[a]", "[b]"), escape_regex("[a]"), fixed = TRUE) expect_failure(expect_match("[a]", "[b]", fixed = TRUE), "[a]", fixed = TRUE) }) test_that("errors if obj is empty str", { x <- character(0) err <- expect_error( expect_match(x, 'asdf'), class = "expectation_failure" ) expect_match(err$message, 'is empty') }) test_that("prints multiple unmatched values", { err <- expect_error( expect_match(letters[1:10], 'asdf'), class = "expectation_failure" ) expect_match(err$message, "does not match") }) testthat/tests/testthat/test-compare-numeric.R0000644000176200001440000000437713547563465021337 0ustar liggesusers# Metadata ---------------------------------------------------------------- test_that("numeric types are compatible", { expect_true(compare(1, 1L)$equal) expect_true(compare(1L, 1)$equal) }) test_that("non-numeric types are not compatible", { expect_match(compare(1, "a")$message, "double is not character") }) test_that("base lengths must be identical", { expect_match(compare(1, c(1, 2))$message, "1 is not 2") }) test_that("classes must be identical", { f1 <- factor("a") f2 <- factor("a", ordered = TRUE) expect_match(compare(1L, f1)$message, "integer is not factor") expect_match(compare(1L, f2)$message, "integer is not ordered/factor") }) test_that("attributes must be identical", { x1 <- 1L x2 <- c(a = 1L) x3 <- c(b = 1L) x4 <- structure(1L, a = 1) x5 <- structure(1L, b = 1) expect_match(compare(x1, x2)$message, "names for current") expect_match(compare(x2, x3)$message, "Names: 1 string mismatch") expect_match(compare(x1, x4)$message, "target is NULL") expect_match(compare(x4, x5)$message, "Names: 1 string mismatch") }) test_that("unless check.attributes is FALSE", { x1 <- 1L x2 <- c(a = 1L) x3 <- structure(1L, a = 1) expect_equal(compare(x1, x2, check.attributes = FALSE)$message, "Equal") expect_equal(compare(x1, x3, check.attributes = FALSE)$message, "Equal") expect_equal(compare(x2, x3, check.attributes = FALSE)$message, "Equal") }) # Values ------------------------------------------------------------------ test_that("two identical vectors are the same", { expect_true(compare(1:10, 1:10)$equal) }) test_that("unnamed arguments to all.equal passed through correctly", { expect_equal(415, 416, 0.01) }) test_that("named arguments to all.equal passed through", { expect_equal(415, 416, tolerance = 0.01) }) test_that("tolerance used for individual comparisons", { x1 <- 1:3 x2 <- x1 + c(0, 0, 0.1) expect_false(compare(x1, x2)$equal) expect_true(compare(x1, x2, tolerance = 0.1)$equal) }) # Mismatch table ---------------------------------------------------------- test_that("mismatch_numeric truncates diffs", { x <- mismatch_numeric(1:11, 11:1) expect_equal(x$n, 11) expect_equal(x$n_diff, 10) lines <- strsplit(format(x, max_diffs = 5), "\n")[[1]] expect_equal(length(lines), 5 + 2) }) testthat/tests/testthat/test-test-that.R0000644000176200001440000000766613564563701020162 0ustar liggesuserscontext("test_that") test_that("messages are suppressed", { message("YOU SHOULDN'T SEE ME") succeed() }) test_that("errors are captured", { f <- function() g() g <- function() stop("I made a mistake", call. = FALSE) reporter <- with_reporter("silent", { test_that("", f()) }) expect_equal(length(reporter$expectations()), 1) }) test_that("errors captured even when looking for messages", { reporter <- with_reporter("silent", { test_that("", expect_message(stop("a"))) }) expect_equal(length(reporter$expectations()), 1) expect_true(expectation_error(reporter$expectations()[[1L]])) }) test_that("errors captured even when looking for warnings", { reporter <- with_reporter("silent", { test_that("", expect_warning(stop())) }) expect_equal(length(reporter$expectations()), 1) expect_true(expectation_error(reporter$expectations()[[1L]])) }) test_that("failures are errors", { f <- function() { expect_true(FALSE) expect_false(TRUE) } expect_error(f(), "isn't true", class = "expectation_failure") }) test_that("infinite recursion is captured", { f <- function() f() reporter <- with_reporter("silent", { withr::with_options( list(expressions = sys.nframe() + 100), test_that("", f()) ) }) expect_equal(length(reporter$expectations()), 1) }) test_that("return value from test_that", { with_reporter("", success <- test_that("success", succeed())) expect_true(success) with_reporter("", success <- test_that("success", expect(TRUE, "Yes!"))) expect_true(success) with_reporter("", error <- test_that("error", barf)) expect_false(error) with_reporter("", failure <- test_that("failure", expect_true(FALSE))) expect_false(failure) with_reporter("", failure <- test_that("failure", fail())) expect_false(failure) with_reporter("", success <- test_that("failure", expect(FALSE, "No!"))) expect_false(failure) with_reporter("", skip <- test_that("skip", skip("skipping"))) expect_false(skip) # No tests = automatically generated skip with_reporter("", skip <- test_that("success", {})) expect_false(success) }) # Line numbering ---------------------------------------------------------- expectation_lines <- function(code) { srcref <- attr(substitute(code), "srcref") if (!is.list(srcref)) { stop("code doesn't have srcref", call. = FALSE) } results <- with_reporter("silent", code)$expectations() unlist(lapply(results, function(x) x$srcref[1])) - srcref[[1]][1] } test_that("line numbers captured in simple case", { lines <- expectation_lines({ context("testing testFile") # line 1 test_that("simple", { # line 2 expect_true(FALSE) # line 3 }) # line 4 }) expect_equal(lines, 3) }) test_that("line numbers captured inside another function", { lines <- expectation_lines({ test_that("simple", { # line 1 suppressMessages(expect_true(FALSE)) # line 2 }) }) expect_equal(lines, 2) }) test_that("line numbers captured inside a loop", { lines <- expectation_lines({ test_that("simple", { # line 1 for (i in 1:4) expect_true(TRUE) # line 2 }) }) expect_equal(lines, rep(2, 4)) }) test_that("line numbers captured for skip()s", { lines <- expectation_lines({ test_that("simple", { # line 1 skip("Not this time") # line 2 }) # line 3 }) expect_equal(lines, 2) }) test_that("line numbers captured for stop()s", { lines <- expectation_lines({ test_that("simple", { # line 1 skip("Not this time") # line 2 }) # line 3 }) expect_equal(lines, 2) }) test_that("can signal warnings and messages without restart", { expect_null(signalCondition(message_cnd("foo"))) return("Skipping following test because it verbosely registers the warning") expect_null(signalCondition(warning_cnd("foo"))) }) testthat/tests/testthat/test-verify-conditions-lines.txt0000644000176200001440000000032713564563701023431 0ustar liggesusers> message("First.\nSecond.") Message: First. Second. > warning("First.\nSecond.") Warning in eval(expr, envir, enclos): First. Second. > stop("First.\nSecond.") Error in eval(expr, envir, enclos): First. Second. testthat/tests/testthat/test-compare-time.R0000644000176200001440000000237113547563465020623 0ustar liggesusers# Metadata ---------------------------------------------------------------- test_that("both POSIXt classes are compatible", { x1 <- Sys.time() x2 <- as.POSIXlt(x1) expect_true(compare(x1, x2)$equal) expect_true(compare(x2, x1)$equal) }) test_that("other classes are not", { expect_match(compare(Sys.time(), 1)$message, "POSIXct/POSIXt is not numeric") }) test_that("base lengths must be identical", { x1 <- Sys.time() x2 <- c(x1, x1 - 3600) expect_match(compare(x1, x2)$message, "1 is not 2") }) test_that("tzones must be identical", { t1 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "EST") t2 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "US/Eastern") expect_match(compare(t1, t2)$message, '"tzone": 1 string mismatch') }) # Values ------------------------------------------------------------------ test_that("two identical vectors are the same", { x <- Sys.time() expect_true(compare(x, x)$equal) }) test_that("two different values are not the same", { x1 <- Sys.time() x2 <- x1 + 3600 expect_false(compare(x1, x2)$equal) }) test_that("uses all.equal tolerance", { x1 <- structure(1457284588.83749, class = c("POSIXct", "POSIXt")) x2 <- structure(1457284588.837, class = c("POSIXct", "POSIXt")) expect_true(compare(x1, x2)$equal) }) testthat/tests/testthat/test-old-school.R0000644000176200001440000000256613547563465020312 0ustar liggesuserstest_that("old school logical works", { expect_warning( expect_success(expect_that(TRUE, is_true())), "deprecated") expect_warning( expect_success(expect_that(FALSE, is_false())), "deprecated") }) test_that("old school types still work", { expect_success(expect_that(1L, is_a("integer"))) }) test_that("tidyverse conflicts throw warnings", { expect_warning( expect_that(NULL, is_null()), "deprecated" ) expect_warning( expect_that("te*st", matches("e*", fixed = TRUE)), "deprecated" ) expect_warning( expect_that("test", matches("TEST", ignore.case = TRUE)), "deprecated" ) }) test_that("old school names still work", { expect_success(expect_that("a", has_names(NULL))) }) test_that("old school comparisons still work", { expect_success(expect_that(10, is_less_than(11))) expect_failure(expect_that(10, is_more_than(11))) }) test_that("old school equality tests still work", { expect_success(expect_that(10, equals(10))) expect_success(expect_that(10, is_identical_to(10))) expect_success(expect_that(10, is_equivalent_to(10))) }) test_that("old school output tests still work", { expect_success(expect_that(stop("!"), throws_error())) expect_success(expect_that(warning("!"), gives_warning())) expect_success(expect_that(message("!"), shows_message())) expect_success(expect_that(print("!"), prints_text())) }) testthat/tests/testthat/test-verify-constructed-calls.txt0000644000176200001440000000015613564563701023601 0ustar liggesusers> expr(foo(!!c("bar", "baz"))) foo(c("bar", "baz")) > binding <- quote(foo) > expr(foo(!!binding)) foo(foo) testthat/tests/testthat/test-examples.R0000644000176200001440000000055313547563465020057 0ustar liggesuserstest_that("test_examples works with installed packages", { env_test$package <- "testthat" on.exit(env_test$package <- NULL) with_mock( test_rd = identity, { res <- test_examples() } ) expect_true(length(res) > 1) }) test_that("test_examples fails if no examples", { expect_error(test_examples("asdf"), "Could not find examples") }) testthat/tests/testthat/test-catch.R0000644000176200001440000000043113547563465017316 0ustar liggesuserstest_that("get_routine() finds own 'run_testthat_tests'", { routine <- get_routine("testthat", "run_testthat_tests") expect_is(routine, "NativeSymbolInfo") }) test_that("get_routine() fails when no routine exists", { expect_error(get_routine("utils", "no_such_routine")) }) testthat/tests/testthat/test-source.R0000644000176200001440000000226713547563465017545 0ustar liggesuserstest_that("source_file always uses UTF-8 encoding", { has_locale <- function(l) { has <- TRUE tryCatch( withr::with_locale(c(LC_CTYPE = l), "foobar"), warning = function(w) has <<- FALSE, error = function(e) has <<- FALSE ) has } ## Some text in UTF-8 tmp <- tempfile() on.exit(unlink(tmp), add = TRUE) utf8 <- as.raw(c( 0xc3, 0xa1, 0x72, 0x76, 0xc3, 0xad, 0x7a, 0x74, 0xc5, 0xb1, 0x72, 0xc5, 0x91, 0x20, 0x74, 0xc3, 0xbc, 0x6b, 0xc3, 0xb6, 0x72, 0x66, 0xc3, 0xba, 0x72, 0xc3, 0xb3, 0x67, 0xc3, 0xa9, 0x70 )) writeBin(c(charToRaw("x <- \""), utf8, charToRaw("\"\n")), tmp) run_test <- function(locale) { if (has_locale(locale)) { env <- new.env() withr::with_locale( c(LC_CTYPE = locale), source_file(tmp, env = env, wrap = FALSE) ) expect_equal(Encoding(env$x), "UTF-8") expect_equal(charToRaw(env$x), utf8) } } ## Try to read it in latin1 and UTF-8 locales ## They have diffefent names on Unix and Windows run_test("en_US.ISO8859-1") run_test("en_US.UTF-8") run_test("English_United States.1252") run_test("German_Germany.1252") run_test(Sys.getlocale("LC_CTYPE")) }) testthat/tests/testthat/test-expect-messages-warning.txt0000644000176200001440000000154113564563701023405 0ustar liggesusers── 1. Failure: (@test-expect-messages.R#85) ────────────────────────────────── `null()` did not produce any warnings. ── 2. Failure: (@test-expect-messages.R#86) ────────────────────────────────── `foo()` generated warnings: * xxx * yyy ── 3. Failure: (@test-expect-messages.R#87) ────────────────────────────────── `foo()` produced unexpected warnings. Expected match: zzz Actual values: * xxx * yyy ── 4. Failure: (@test-expect-messages.R#88) ────────────────────────────────── `foo()` produced unexpected warnings. Expected match: xxx Actual values: * xxx * yyy testthat/tests/testthat/test-warning/0000755000176200001440000000000013456035067017547 5ustar liggesuserstestthat/tests/testthat/test-warning/test-warning.R0000644000176200001440000000012113456034771022307 0ustar liggesuserstest_that("warning emitted", { warning("This is not a test", call. = FALSE) }) testthat/tests/testthat/test-list-reporter/0000755000176200001440000000000013564563701020716 5ustar liggesuserstestthat/tests/testthat/test-list-reporter/test-exception-outside-tests.R0000644000176200001440000000040313564563701026623 0ustar liggesuserscontext('exception outside test') # the objective is to test what happens if some code fails outside of tests # i.e. not inside a test_that() call. test_that("before", expect_true(TRUE)) stop('dying outside of tests') test_that("after", expect_true(TRUE)) testthat/tests/testthat/test-list-reporter/test-bare-expectations.R0000644000176200001440000000023413564563701025432 0ustar liggesuserscontext('bare expectations') test_that("before", expect_true(TRUE)) # this is a bare expectation expect_true(TRUE) test_that("after", expect_true(TRUE)) testthat/tests/testthat/test-list-reporter/test-exercise-list-reporter.R0000644000176200001440000000043713564563701026442 0ustar liggesuserscontext("context1") test_that("dummy1", expect_true(TRUE)) test_that("dummy2", expect_true(TRUE)) context("context2") test_that("A passing test", expect_true(TRUE)) test_that("A failing test", expect_true(FALSE)) test_that("A crashing test", { stop('argh') expect_true(TRUE) }) testthat/tests/testthat/test-list-reporter/test-only-error.R0000644000176200001440000000003713564563701024126 0ustar liggesusersstop('dying outside of tests') testthat/tests/testthat/test-teardown-1.R0000644000176200001440000000135313547563465020221 0ustar liggesuserstest_that("teardown adds to queue", { on.exit(teardown_reset()) expect_length(teardown_env$queue, 0) teardown({}) expect_length(teardown_env$queue, 1) teardown({}) expect_length(teardown_env$queue, 2) }) test_that("teardowns runs in order", { on.exit(teardown_reset()) a <- 1 teardown(a <<- 2) teardown(a <<- 3) expect_length(teardown_env$queue, 2) teardown_run() expect_equal(a, 3) expect_length(teardown_env$queue, 0) }) # Cross-test check -------------------------------------------------------- test_that("file is created", { # test-testthat-2 confirms that this is deleted. write_lines("test", "teardown.txt") expect_true(file.exists("teardown.txt")) }) teardown({ file.remove("teardown.txt") }) testthat/tests/testthat/test-cpp.R0000644000176200001440000000011413547563465017014 0ustar liggesuserstest_that("Catch unit tests pass", { expect_cpp_tests_pass("testthat") }) testthat/tests/testthat/test-reporter-zzz.R0000644000176200001440000000110013547563465020723 0ustar liggesuserstest_that("can locate reporter from name", { expect_equal(find_reporter("minimal"), MinimalReporter$new()) expect_equal(find_reporter("summary"), SummaryReporter$new()) }) test_that("useful error message if can't find reporter", { expect_error( find_reporter(c("summary", "blah")), "Can not find test reporter blah" ) }) test_that("character vector yields multi reporter", { expect_equal( find_reporter(c("summary", "stop")), MultiReporter$new( reporters = list( SummaryReporter$new(), StopReporter$new() ) ) ) }) testthat/tests/testthat/test-expect_that.R0000644000176200001440000000015313547563465020545 0ustar liggesuserstest_that("expect_that returns the input value", { res <- expect_true(TRUE) expect_equal(res, TRUE) }) testthat/tests/testthat/test-make-expectation.R0000644000176200001440000000040213547563465021470 0ustar liggesuserscontext("make_expectation") test_that("make_expectation returns and prints expectation", { x <- 1:5 out <- capture_output( expect_equal(make_expectation(x), quote(expect_equal(x, 1:5))) ) expect_equal( out, "expect_equal(x, 1:5)" ) }) testthat/tests/testthat/test-expect-length.R0000644000176200001440000000110613547563465021003 0ustar liggesuserstest_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) test_that("uses S4 length method", { A <- setClass("ExpectLengthA", slots = c(x = "numeric", y = "numeric")) setMethod("length", "ExpectLengthA", function(x) 5L) expect_success(expect_length(A(x = 1:9, y = 3), 5)) }) test_that("returns input", { x <- list(1:10, letters) out <- expect_length(x, 2) expect_identical(out, x) }) testthat/tests/testthat/test-environment.R0000644000176200001440000000054413547563465020605 0ustar liggesusersenv <- new.env() setClass("MyClass", where = env) test_that("Cannot create S4 class without special behaviour", { expect_error(setClass("MyClass2", where = env), NA) }) test_that("can't access variables from other tests (1)", { a <- 10 expect_true(TRUE) }) test_that("can't access variables from other tests (2)", { expect_false(exists("a")) }) testthat/tests/testthat/test-error/0000755000176200001440000000000013456035017017226 5ustar liggesuserstestthat/tests/testthat/test-error/test-error.R0000644000176200001440000000006713456034771021470 0ustar liggesuserstest_that("should fail", { expect_equal(1 + 1, 3) }) testthat/tests/testthat/test-helpers.R0000644000176200001440000000013713547563465017701 0ustar liggesusers# See helper-assign.R test_that("helpers run before tests", { expect_equal(abcdefghi, 10) }) testthat/tests/testthat/test-expect-logical.R0000644000176200001440000000103313547563465021133 0ustar liggesuserstest_that("logical tests act as expected", { expect_success(expect_true(TRUE)) expect_success(expect_false(FALSE)) expect_failure(expect_true(FALSE), "FALSE isn't true") expect_failure(expect_false(TRUE), "TRUE isn't false") }) test_that("logical tests ignore attributes", { expect_success(expect_true(c(a = TRUE))) expect_success(expect_false(c(a = FALSE))) }) test_that("additional info returned in message", { expect_failure(expect_true(FALSE, "NOPE"), "\nNOPE") expect_failure(expect_false(TRUE, "YUP"), "\nYUP") }) testthat/tests/testthat/test-test_dir.txt0000644000176200001440000000366713564563701020475 0ustar liggesusers> print(df) file context test nb failed skipped error warning passed 1 test-basic.R Basic logical tests act as expected 4 0 FALSE FALSE 2 2 2 test-basic.R Basic logical tests ignore attributes 4 0 FALSE FALSE 2 2 3 test-basic.R Basic equality holds 2 0 FALSE FALSE 0 2 4 test-basic.R Basic can't access variables from other tests 2 1 0 TRUE FALSE 0 0 5 test-basic.R Basic can't access variables from other tests 1 2 0 FALSE FALSE 1 1 6 test-empty.R empty empty test 1 0 TRUE FALSE 0 0 7 test-empty.R empty empty test with error 0 0 FALSE TRUE 0 0 8 test-errors.R error simple 0 0 FALSE TRUE 0 0 9 test-errors.R error after one success 1 0 FALSE TRUE 0 1 10 test-errors.R error after one failure 1 1 FALSE TRUE 0 0 11 test-errors.R error in the test 0 0 FALSE TRUE 0 0 12 test-errors.R error in expect_error 1 0 FALSE FALSE 0 1 13 test-failures.R failures just one failure 1 1 FALSE FALSE 0 0 14 test-failures.R failures one failure on two 2 1 FALSE FALSE 0 1 15 test-failures.R failures no failure 2 0 FALSE FALSE 0 2 16 test-helper.R helper helper test 1 0 FALSE FALSE 0 1 17 test-skip.R skip Skips skip 1 0 TRUE FALSE 0 0 testthat/tests/testthat/test-watcher.R0000644000176200001440000000514213564563701017665 0ustar liggesuserstest_that("compare state works correctly", { loc <- tempfile("watcher") dir.create(loc) empty <- dir_state(loc) expect_that(length(empty), equals(0)) file.create(file.path(loc, "test-1.txt")) one <- dir_state(loc) expect_that(length(one), equals(1)) expect_that(basename(names(one)), equals("test-1.txt")) diff <- compare_state(empty, one) expect_that(diff$n, equals(1)) expect_that(basename(diff$added), equals("test-1.txt")) write.table(mtcars, file.path(loc, "test-1.txt")) diff <- compare_state(one, dir_state(loc)) expect_that(diff$n, equals(1)) expect_that(basename(diff$modified), equals("test-1.txt")) file.rename(file.path(loc, "test-1.txt"), file.path(loc, "test-2.txt")) diff <- compare_state(one, dir_state(loc)) expect_that(diff$n, equals(2)) expect_that(basename(diff$deleted), equals("test-1.txt")) expect_that(basename(diff$added), equals("test-2.txt")) diff <- compare_state( c(file1 = "62da2", file2 = "e14a6", file3 = "6e6dd"), c(file1 = "62da2", file2 = "e14a6", file21 = "532fa", file3 = "3f4sa") ) expect_that(diff$n, equals(2)) expect_that(basename(diff$added), equals("file21")) expect_that(basename(diff$modified), equals("file3")) }) test_that("watcher works correctly", { skip_on_ci() skip_on_cran() if (Sys.which("bash") == "") { skip("bash not available") } if (system("bash -c 'which touch'", ignore.stdout = TRUE) != 0L) { skip("touch (or which) not available") } loc <- tempfile("watcher", tmpdir = "/tmp") dir.create(loc) code_path <- file.path(loc, "R") test_path <- file.path(loc, "tests") dir.create(code_path) dir.create(test_path) delayed.bash.cmd <- function(command) { system(paste0("bash -c 'sleep 1;", command, "'"), wait = FALSE) } add.code.file <- function(file.name) { delayed.bash.cmd(paste0("touch ", file.path(code_path, file.name))) } remove.code.file <- function(file.name) { delayed.bash.cmd(paste0("rm ", file.path(code_path, file.name))) } test.added <- function(added, deleted, modified) { expect_that(length(added), equals(1)) expect_true(grepl("test1.R", added)) expect_that(length(deleted), equals(0)) expect_that(length(modified), equals(0)) FALSE } test.removed <- function(added, deleted, modified) { expect_that(length(added), equals(0)) expect_that(length(deleted), equals(1)) expect_true(grepl("test1.R", deleted)) expect_that(length(modified), equals(0)) FALSE } add.code.file("test1.R") watch(c(code_path, test_path), test.added) remove.code.file("test1.R") watch(c(code_path, test_path), test.removed) }) testthat/tests/testthat/test-label.R0000644000176200001440000000046413547563465017321 0ustar liggesuserstest_that("labelling compound {} expression gives single string", { out <- expr_label(quote({ 1 + 2 })) expect_length(out, 1) expect_type(out, "character") }) test_that("can label multiline functions", { expect_equal( expr_label(quote(function(x, y) {})), "function(x, y) ..." ) }) testthat/tests/testthat/test-evaluate-promise.R0000644000176200001440000000166613547563465021531 0ustar liggesuserstest_that("captures warnings, messages and output", { out <- evaluate_promise({ message("m", appendLF = FALSE) warning("w") cat("out") }) expect_equal(out$output, "out") expect_equal(out$messages, "m") expect_equal(out$warnings, "w") }) test_that("capture_warnings captures warnings", { out <- capture_warnings({ warning("a") warning("b") }) expect_equal(out, c("a", "b")) }) test_that("capture_messages captures messages", { out <- capture_messages({ message("a") message("b") }) expect_equal(out, c("a\n", "b\n")) # message adds LF by default }) test_that("capture output captures output", { out1 <- capture_output(print(1:5)) out2 <- capture_output(1:5, print = TRUE) expect_equal(out1, "[1] 1 2 3 4 5") expect_equal(out2, "[1] 1 2 3 4 5") }) test_that("capture output doesn't print invisible things", { out <- capture_output(invisible(1), print = TRUE) expect_equal(out, "") }) testthat/tests/testthat/setup.R0000644000176200001440000000007613165405551016407 0ustar liggesuserswrite_lines("I should be automatically deleted", "DELETE-ME") testthat/tests/testthat/test-path-missing/0000755000176200001440000000000012673600534020502 5ustar liggesuserstestthat/tests/testthat/test-path-missing/empty0000644000176200001440000000000012666267031021554 0ustar liggesuserstestthat/tests/testthat/test-expect-messages.R0000644000176200001440000000460513564563701021330 0ustar liggesusers# Messages ---------------------------------------------------------------- test_that("inputs evaluated in correct scope", { expect_message({ message("a") x <- 10 }) expect_identical(x, 10) }) test_that("regexp = NULL checks for presence of message", { expect_success(expect_message(message("!"))) expect_failure(expect_message(null()), "did not produce any messages") }) test_that("regexp = NA checks for absence of message", { expect_success(expect_message(null(), NA)) expect_failure(expect_message(message("!"), NA)) }) test_that("regexp = string matches _any_ message", { f <- function() { message("a") message("b") } expect_success(expect_message(f())) expect_success(expect_message(f(), "a")) expect_success(expect_message(f(), "b")) expect_failure(expect_message(f(), "c")) expect_failure(expect_message("", "c"), "did not produce any messages") }) test_that("... passed on to grepl", { expect_success(expect_message(message("X"), "x", ignore.case = TRUE)) }) test_that("returns first argument", { expect_equal(expect_message(1, NA), 1) }) # Warnings ---------------------------------------------------------------- test_that("regexp = NULL checks for presence of warning", { expect_success(expect_warning(warning("!"))) expect_failure(expect_warning(null()), "did not produce any warnings") }) test_that("regexp = NA checks for absence of warning", { expect_success(expect_warning(null(), NA)) expect_failure(expect_warning(warning("!"), NA)) }) test_that("regexp = string matches _any_ warning", { f <- function() { warning("a") warning("b") } expect_success(expect_warning(f())) expect_success(expect_warning(f(), "a")) expect_success(expect_warning(f(), "b")) expect_failure(expect_warning(f(), "c")) expect_failure(expect_warning("", "c"), "did not produce any warnings") }) test_that("... passed on to grepl", { expect_success(expect_warning(warning("X"), "x", ignore.case = TRUE)) }) test_that("returns first argument", { expect_equal(expect_warning(1, NA), 1) }) test_that("generates informative failures", { skip_if_not(l10n_info()$`UTF-8`) expect_known_failure("test-expect-messages-warning.txt", { foo <- function() { warning("xxx") warning("yyy") } expect_warning(null()) expect_warning(foo(), NA) expect_warning(foo(), "zzz") expect_warning(foo(), "xxx", all = TRUE) }) }) testthat/tests/testthat/test-reporter-list.R0000644000176200001440000000562213564563701021046 0ustar liggesusers # regression test: test_file() used to crash with a NULL reporter test_that("ListReporter with test_file and NULL reporter", { test_file_path <- 'test-list-reporter/test-exercise-list-reporter.R' expect_error(test_file(test_path(test_file_path), reporter = NULL), NA) }) # regression: check that an exception is reported if it is raised in the test file outside # of a test (test_that() call). # N.B: the exception here happens between two tests: "before" and "after" test_that("ListReporter - exception outside of test_that()", { test_file_path <- 'test-list-reporter/test-exception-outside-tests.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_true(is.list(res)) # 2 results: first test "before" + the exception. N.B: the 2nd test "after" is not reported expect_length(res, 2) df <- as.data.frame(res) # the first result should be the results of test "before", that was successful expect_identical(df$test[1], 'before') expect_equal(df$passed[1], 1) expect_false(df$error[1]) # the 2nd result should be the exception expect_true(is.na(df$test[2])) # no test name expect_true(df$error[2]) # it was an error expect_identical(conditionMessage(res[[2]]$results[[1]]), 'dying outside of tests') }) test_that("captures error if only thing in file", { test_file_path <- 'test-list-reporter/test-only-error.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_length(res, 1) expect_s3_class(res[[1]]$results[[1]], "expectation_error") }) # ListReporter on a "standard" test file: 2 contexts, passing, failing and crashing tests test_that("exercise ListReporter", { test_file_path <- 'test-list-reporter/test-exercise-list-reporter.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_is(res, "testthat_results") expect_length(res, 5) # 5 tests # we convert the results to data frame for convenience df <- as.data.frame(res) expect_identical(unique(df$context), c("context1", "context2")) expect_identical(unique(df$file), basename(test_file_path)) # test "A failing test" is the only failing test failed_idx <- which(df$failed != 0) expect_identical(df$test[failed_idx], "A failing test") failed_test <- res[[failed_idx]] expect_identical(expectation_type(failed_test$results[[1]]), "failure") # test "A crashing test" is the only crashing test crashed_idx <- which(df$error) expect_identical(df$test[crashed_idx], "A crashing test") crashed_test <- res[[crashed_idx]] expect_identical(expectation_type(crashed_test$results[[1]]), "error") }) # bare expectations are ignored test_that("ListReporter and bare expectations", { test_file_path <- 'test-list-reporter/test-bare-expectations.R' res <- test_file(test_path(test_file_path), reporter = NULL) df <- as.data.frame(res) # 2 tests, "before" and "after". no result for the bare expectation expect_identical(df$test, c("before", "after")) }) testthat/tests/testthat/test-expect-setequal.R0000644000176200001440000000446113547563465021354 0ustar liggesusers# setequal ---------------------------------------------------------------- test_that("ignores order and duplicates", { expect_success(expect_setequal(letters, rev(letters))) expect_success(expect_setequal(c("a", "a", "b"), c("b", "b", "a"))) }) test_that("checks both directions of containment", { expect_failure(expect_setequal(letters, letters[-1])) expect_failure(expect_setequal(letters[-1], letters)) }) test_that("truncates long differences", { cnd <- catch_cnd(expect_setequal("a", letters)) expect_match(cnd$message, "...") }) test_that("warns if both inputs are named", { expect_warning(expect_setequal(c(a = 1), c(b = 1)), "ignores names") }) test_that("error for non-vectors", { expect_error(expect_setequal(sum, sum), "be vectors") }) # mapequal ---------------------------------------------------------------- test_that("ignores order", { expect_success(expect_mapequal(list(a = 1, b = 2), list(b = 2, a = 1))) }) test_that("error if any names are duplicated", { expect_error(expect_mapequal(list(a = 1, b = 2, b = 3), list(b = 2, a = 1))) expect_error(expect_mapequal(list(a = 1, b = 2), list(b = 3, b = 2, a = 1))) expect_error(expect_mapequal(list(a = 1, b = 2, b = 3), list(b = 3, b = 2, a = 1))) }) test_that("handling NULLs", { expect_success(expect_mapequal(list(a = 1, b = NULL), list(b = NULL, a = 1))) }) test_that("fail if names don't match", { expect_failure(expect_mapequal(list(a = 1, b = 2), list(a = 1))) expect_failure(expect_mapequal(list(a = 1), list(a = 1, b = 2))) }) test_that("fails if values don't match", { expect_failure(expect_mapequal(list(a = 1, b = 2), list(a = 1, b = 3))) }) test_that("error for non-vectors", { expect_error(expect_mapequal(sum, sum), "be vectors") expect_error(expect_mapequal(NULL, NULL), "be vectors") }) test_that("error if any unnamed values", { expect_error(expect_mapequal(list(1, b = 2), list(1, b = 2))) expect_error(expect_mapequal(list(1, b = 2), list(b = 2, 1))) }) test_that("succeeds if comparing empty named and unnamed vectors", { x1 <- list() x2 <- setNames(list(), character()) expect_warning(expect_success(expect_mapequal(x1, x1))) expect_warning(expect_success(expect_mapequal(x1, x2))) expect_warning(expect_success(expect_mapequal(x2, x1))) expect_warning(expect_success(expect_mapequal(x2, x2))) }) testthat/tests/testthat/one.rds0000644000176200001440000000005412666267031016417 0ustar liggesusersb```b`fdd`b2Ctestthat/tests/testthat/test-expectation.R0000644000176200001440000000272413564563701020556 0ustar liggesuserscontext("test-expectation") test_that("expectation contains failure message even when successful", { e <- expect(TRUE, "I failed") expect_equal(e$message, "I failed") }) test_that("expect warns if no `failure_message`", { expect_warning(expect(TRUE), "missing, with no default") }) test_that("info only evaluated on failure", { expect_error(expect(TRUE, "fail", info = stop("!")), NA) }) test_that("can subclass expectation", { exp <- new_expectation("failure", "didn't work", .subclass = "foo", bar = "baz") expect_true(inherits_all(exp, c("foo", "expectation_failure", "expectation", "error", "condition"))) expect_identical(attr(exp, "bar"), "baz") }) test_that("`expect()` and `exp_signal()` signal expectations", { expect_error(expect(TRUE, ""), regexp = NA) expect_error(expect(FALSE, ""), class = "expectation_failure") expect_error(exp_signal(new_expectation("success", "")), regexp = NA) expect_error(exp_signal(new_expectation("failure", "")), class = "expectation_failure") }) test_that("conditionMessage() is called during conversion", { local_methods(conditionMessage.foobar = function(...) "dispatched") wrn <- warning_cnd("foobar", message = "wrong") expect_identical(as.expectation(wrn)$message, "dispatched") err <- error_cnd("foobar", message = "wrong") expect_identical(as.expectation(err)$message, "dispatched") err <- cnd(c("foobar", "skip"), message = "wrong") expect_identical(as.expectation(err)$message, "dispatched") }) testthat/tests/testthat/too-many-failures.R0000644000176200001440000000050113547563465020630 0ustar liggesuserscontext("too many failures") test_that("SummaryReport gives up if too many errors", { expect_equal(Inf, 1) expect_equal(Inf, 2) expect_equal(Inf, 3) expect_equal(Inf, 4) expect_equal(Inf, 5) expect_equal(Inf, 6) expect_equal(Inf, 7) expect_equal(Inf, 8) expect_equal(Inf, 9) expect_equal(Inf, 10) }) testthat/tests/testthat/test-expect-invisible.R0000644000176200001440000000071013547563465021506 0ustar liggesuserscontext("test-expect-invisible") test_that("basically principles of visibilty hold", { expect_success(expect_invisible(x <- 10)) expect_failure(expect_invisible(x)) expect_success(expect_visible(x)) expect_failure(expect_visible(x <- 1)) }) test_that("invisibly returns evaluated value", { out <- expect_invisible(expect_invisible(x <- 2 + 2)) expect_equal(out, 4) out <- expect_invisible(expect_visible(2 + 2)) expect_equal(out, 4) }) testthat/tests/testthat/test-test_dir.R0000644000176200001440000000375513564563701020055 0ustar liggesuserstest_that("R_TESTS envar is unset", { expect_equal(Sys.getenv("R_TESTS"), "") }) test_that("TESTHAT env var set to true", { expect_true(is_testing()) }) # https://github.com/r-lib/devtools/issues/2015 # test_that("TESTHAT_PKG env var set to the package being tested", { # expect_equal(testing_package(), "testthat") # }) test_that("test_dir()", { res <- test_dir(test_path("test_dir"), reporter = "silent") df <- as.data.frame(res) df$user <- df$system <- df$real <- df$result <- NULL verify_output(test_path("test-test_dir.txt"), print(df), width = 200) }) test_that("test_dir() filter", { res <- test_dir("test_dir", reporter = "silent", filter = "basic|empty") df <- as.data.frame(res) expect_identical(unique(df$context), c("Basic", "empty")) }) test_that("test_dir() helpers", { res <- test_dir("test_dir", reporter = "silent", filter = "helper") df <- as.data.frame(res) expect_true(all(!df$error & df$failed == 0)) }) test_that("filter_test_scripts() with tricky names", { files <- c( "test-basic.R", "test-blah.really.Rtrick.R", "test-hello.rtest.R" ) expect_equal(filter_test_scripts(files, filter = "basic|Rtrick|rtest"), files) expect_equal(filter_test_scripts(files, filter = "Rtrick|rtest"), files[2:3]) expect_equal( filter_test_scripts(files, filter = "Rtrick|rtest", invert = TRUE), files[1] ) }) # errors ------------------------------------------------------------------ test_that("can control if failures generate errors", { test_error <- function(...) { test_dir(test_path("test-error"), reporter = "silent", ...) } expect_error(test_error(stop_on_failure = TRUE), "Test failures") expect_error(test_error(stop_on_failure = FALSE), NA) }) test_that("can control if warnings errors", { test_warning <- function(...) { test_dir(test_path("test-warning"), reporter = "silent", ...) } expect_error(test_warning(stop_on_warning = TRUE), "Tests generated warnings") expect_error(test_warning(stop_on_warning = FALSE), NA) }) testthat/tests/testthat/teardown.R0000644000176200001440000000002413165405551017063 0ustar liggesusersunlink("DELETE-ME") testthat/tests/testthat/test-path-installed/0000755000176200001440000000000013165647525021020 5ustar liggesuserstestthat/tests/testthat/test-path-installed/testthat-tests/0000755000176200001440000000000013165647525024020 5ustar liggesuserstestthat/tests/testthat/test-path-installed/testthat-tests/testthat/0000755000176200001440000000000013165647525025660 5ustar liggesuserstestthat/tests/testthat/test-path-installed/testthat-tests/testthat/empty0000644000176200001440000000000013164532741026717 0ustar liggesuserstestthat/tests/testthat/test-expect-known-value.R0000644000176200001440000000302213547563465021767 0ustar liggesuserstest_that("correctly matches to a file", { x <- 1 expect_success(expect_known_value(x, "one.rds")) x <- 2 expect_failure(expect_known_value(x, "one.rds", update = FALSE)) }) test_that("first run is successful", { expect_success( expect_warning( expect_known_value(2, "two.rds"), "Creating reference" ) ) unlink("two.rds") }) test_that("equal_to_ref does not overwrite existing", { tmp_rds <- tempfile(fileext=".rds") on.exit(unlink(tmp_rds)) ref_obj1 <- 1:3 ref_obj2 <- 2:4 saveRDS(ref_obj1, tmp_rds) expect_success(expect_equal_to_reference(ref_obj1, tmp_rds)) # Failure does not update object expect_failure(expect_equal_to_reference(ref_obj2, tmp_rds)) expect_equal(readRDS(tmp_rds), ref_obj1) # Now failure does update object expect_failure(expect_equal_to_reference(ref_obj2, tmp_rds, update=TRUE)) expect_success(expect_equal_to_reference(ref_obj2, tmp_rds)) }) test_that("serializes to version 2 by default", { skip_if(getRversion() < 3.5) tmp_rds <- tempfile(fileext = ".rds") on.exit(unlink(tmp_rds)) expect_warning( expect_known_value("a", tmp_rds), "Creating reference" ) expect_identical(tools:::get_serialization_version(tmp_rds)[[1]], 2L) }) test_that("version 3 is possible", { skip_if(getRversion() < 3.5) tmp_rds <- tempfile(fileext = ".rds") on.exit(unlink(tmp_rds)) expect_warning( expect_known_value("a", tmp_rds, version = 3), "Creating reference" ) expect_identical(tools:::get_serialization_version(tmp_rds)[[1]], 3L) }) testthat/tests/testthat/test-expect-known-hash.R0000644000176200001440000000047713547563465021611 0ustar liggesuserstest_that("empty hash succeeds with warning", { expect_success( expect_warning( expect_known_hash(1:10), "No recorded hash" ) ) }) test_that("only succeeds if hash is correct", { expect_success(expect_known_hash(1:10, "c08951d2c2")) expect_failure(expect_known_hash(1:10, "c08951d2c3")) }) testthat/tests/testthat/utf8.R0000644000176200001440000000011713547563465016146 0ustar liggesuserstest_that("sourced with correct encoding", { expect_equal("ä", "\u00e4") }) testthat/tests/testthat/test-teardown-2.R0000644000176200001440000000014513547563465020220 0ustar liggesuserstest_that("teardown is completed by previous test", { expect_false(file.exists("teardown.txt")) }) testthat/tests/testthat/test-compare-character.R0000644000176200001440000000444513547563465021625 0ustar liggesusers# Metadata ---------------------------------------------------------------- test_that("types must be the same", { expect_match(compare("a", 1L)$message, "character is not integer") }) test_that("base lengths must be identical", { expect_match(compare("a", letters)$message, "1 is not 26") }) test_that("classes must be identical", { c1 <- "a" c2 <- structure("a", class = "mycharacter") expect_match(compare(c1, c2)$message, "character is not mycharacter") }) test_that("attributes must be identical", { x1 <- "a" x2 <- c(a = "a") x3 <- c(b = "a") x4 <- structure("a", a = 1) x5 <- structure("a", b = 1) expect_match(compare(x1, x2)$message, "names for current") expect_match(compare(x2, x3)$message, "Names: 1 string mismatch") expect_match(compare(x1, x4)$message, "target is NULL") expect_match(compare(x4, x5)$message, "Names: 1 string mismatch") }) # Values ------------------------------------------------------------------ test_that("two identical vectors are the same", { expect_true(compare(letters, letters)$equal) }) test_that("equal if both missing or both the same (multiple values)", { expect_true(compare(c("ABC", NA), c("ABC", NA))$equal) expect_false(compare(c(NA, NA), c("ABC", NA))$equal) expect_false(compare(c("AB", NA), c("ABC", NA))$equal) expect_false(compare(c("AB", "AB"), c("ABC", "AB"))$equal) }) # Output ------------------------------------------------------------------ test_that("computes correct number of mismatches", { x <- mismatch_character(c("a", "b", "c"), c("c", "d", "e")) expect_equal(x$n, 3) }) test_that("only differences are shown", { x <- mismatch_character(letters, c(letters[-26], "a")) lines <- strsplit(format(x), "\n")[[1]] expect_equal(lines[1], "1/26 mismatches") expect_equal(lines[2], 'x[26]: "z"') }) test_that("not all lines are shown", { a <- "1234567890" b <- paste(rep(a, 10), collapse = "") x <- mismatch_character(a, b) lines <- strsplit(format(x, width = 16), "\n")[[1]] expect_equal(lines[1], "1/1 mismatches") expect_equal(length(lines), 8) }) test_that("vectors longer than `max_diffs` (#513)", { comp <- compare(letters[1:2], LETTERS[1:2], max_diffs = 1) expect_is(comp, "comparison") expect_false(comp$equal) expect_equal(comp$message, "2/2 mismatches\nx[1]: \"a\"\ny[1]: \"A\"") }) testthat/tests/testthat/test-try-again.R0000644000176200001440000000046413547563465020135 0ustar liggesuserssucceed_after <- function(i) { function() { i <<- i - 1 if (i > 0) fail(paste0("i is ", i)) } } test_that("tries multiple times", { third_try <- succeed_after(3) expect_true(try_again(3, third_try())) third_try <- succeed_after(3) expect_failure(try_again(2, third_try()), "i is 1") }) testthat/tests/testthat/test-path-present/0000755000176200001440000000000012673600534020511 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/0000755000176200001440000000000012673601414021651 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/testthat/0000755000176200001440000000000012673601636023517 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/testthat/empty0000644000176200001440000000000012666267031024565 0ustar liggesuserstestthat/tests/testthat/test-expect-known-output.R0000644000176200001440000000331513547563465022220 0ustar liggesuserstest_that("uses specified width", { old <- options(width = 20) on.exit(options(old), add = TRUE) x <- 1:100 expect_known_output(print(x), "width-80.txt") }) test_that("creates file on first run", { file <- tempfile() expect_success( expect_warning( expect_known_output(cat("ok!\n"), file), "Creating reference" ) ) expect_true(file.exists(file)) }) test_that("igores incomplete last line", { file <- tempfile() write_lines("Hi!", file) expect_success(expect_known_output(cat("Hi!"), file)) expect_success(expect_known_output(cat("Hi!\n"), file)) expect_failure(expect_known_output(cat("Hi!\n\n"), file)) expect_failure(expect_known_output(cat("oops"), file)) }) test_that("updates by default", { file <- tempfile() write_lines("Hi!", file) expect_failure(expect_known_output(cat("oops"), file, update = FALSE)) expect_equal(read_lines(file), "Hi!") expect_failure(expect_known_output(cat("oops"), file, update = TRUE)) expect_success(expect_known_output(cat("oops"), file)) }) test_that("works in non-UTF-8 locale", { text <- c("\u00fc", "\u2a5d", "\u6211", "\u0438") file <- tempfile() write_lines(text, file) expect_success(expect_known_output(cat(text, sep = "\n"), file, update = FALSE)) withr::with_locale( c(LC_CTYPE = "C"), { expect_false(l10n_info()$`UTF-8`) expect_success(expect_known_output(cat(text, sep = "\n"), file, update = FALSE)) } ) }) test_that("Warning for non-UTF-8 reference files", { x <- "\xe9\xe1\xed\xf6\xfc" Encoding(x) <- "latin1" tmp <- tempfile() on.exit(unlink(tmp), add = TRUE) writeBin(x, tmp) expect_failure( expect_warning(expect_known_output("foobar", tmp, update = FALSE)) ) }) testthat/tests/testthat/test-compare.R0000644000176200001440000000133413547563465017665 0ustar liggesuserstest_that("list comparison truncates to max_diffs", { x <- as.list(as.character(1:1e3)) y <- lapply(x, paste0, ".") lines1 <- strsplit(compare(x, y)$message, "\n")[[1]] expect_length(lines1, 10) lines2 <- strsplit(compare(x, y, max_diffs = 99)$message, "\n")[[1]] expect_length(lines2, 100) }) test_that("no diff", { expect_equal(compare(1,1), no_difference()) }) test_that("vector_equal_tol handles infinity", { expect_true(vector_equal_tol(Inf, Inf)) expect_true(vector_equal_tol(-Inf, -Inf)) expect_false(vector_equal_tol(Inf, -Inf)) expect_false(vector_equal_tol(Inf, 0)) }) test_that("vector_equal_tol handles na", { expect_true(vector_equal_tol(NA, NA)) expect_false(vector_equal_tol(NA, 0)) }) testthat/tests/testthat/test-reporter-junit.R0000644000176200001440000000025013547563465021224 0ustar liggesuserscontext("test-reporter-junit") test_that("permit Java-style class names", { class <- "package_name_or_domain.ClassName" expect_equal(classnameOK(class), class) }) testthat/tests/testthat/test-verify-output.R0000644000176200001440000000367113564563701021077 0ustar liggesuserstest_that("can record all types of output", { verify_output(test_path("test-verify-output.txt"), { "Output" 1 + 2 invisible(1:10) 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 "# Header" "Other output" letters }) }) test_that("can record all types of output", { scoped_bindings( .env = global_env(), conditionMessage.foobar = function(cnd) { paste("Dispatched!", cnd$message) } ) verify_output(test_path("test-verify-conditions.txt"), { message("Message") "With calls" warning("Warning") stop("Error") "Without calls" warning("Warning", call. = FALSE) stop("Error", call. = FALSE) "With `conditionMessage()` method" cnd_signal(message_cnd("foobar", message = "Message")) cnd_signal(warning_cnd("foobar", message = "Warning")) cnd_signal(error_cnd("foobar", message = "Error")) }) }) test_that("can't record plots", { skip_if(interactive()) expect_error(verify_output(tempfile(), plot(1:10)), "Plots") }) test_that("verify_output() splits condition messages on newlines", { verify_output(test_path("test-verify-conditions-lines.txt"), { message("First.\nSecond.") warning("First.\nSecond.") stop("First.\nSecond.") }) }) test_that("can use constructed calls in verify_output() (#945)", { verify_output(test_path("test-verify-constructed-calls.txt"), { expr(foo(!!c("bar", "baz"))) # Can unquote local objects binding <- quote(foo) expr(foo(!!binding)) }) }) test_that("verify_output() doesn't use cli unicode by default", { verify_output( test_path("test-verify-unicode-false.txt"), { cat(cli::symbol$info, cli::symbol$cross, "\n") } ) skip_if(!cli::is_utf8_output()) verify_output( test_path("test-verify-unicode-true.txt"), unicode = TRUE, { cat(cli::symbol$info, cli::symbol$cross, "\n") }) }) testthat/tests/testthat/test-quasi-label.R0000644000176200001440000000105213564563701020423 0ustar liggesuserstest_that("atomic scalars deparsed to single values", { expect_equal(expr_label(NULL), "NULL") expect_equal(expr_label(TRUE), "TRUE") expect_equal(expr_label(1L), "1L") expect_equal(expr_label(1), "1") expect_equal(expr_label("a"), '"a"') }) test_that("atomic vectors deparsed to c()", { expect_equal(expr_label(c(1, 2, 3)), "c(1, 2, 3)") }) test_that("long vectors get ...", { long <- "123456789_123456789_123456789_123456789_123456789_123456789_" expect_equal( expr_label(c(long, long)), paste0('c("', long, '", ...)') ) }) testthat/tests/testthat/test-verify-conditions.txt0000644000176200001440000000111413564563701022314 0ustar liggesusers> message("Message") Message: Message > # With calls > warning("Warning") Warning in eval(expr, envir, enclos): Warning > stop("Error") Error in eval(expr, envir, enclos): Error > # Without calls > warning("Warning", call. = FALSE) Warning: Warning > stop("Error", call. = FALSE) Error: Error > # With `conditionMessage()` method > cnd_signal(message_cnd("foobar", message = "Message")) Message: Dispatched! Message > cnd_signal(warning_cnd("foobar", message = "Warning")) Warning: Dispatched! Warning > cnd_signal(error_cnd("foobar", message = "Error")) Error: Dispatched! Error testthat/tests/testthat/test-colour.R0000644000176200001440000000075113547563465017544 0ustar liggesuserstest_that("can supress colours", { op <- options( crayon.enabled = TRUE, testthat.use_colours = TRUE ) check <- crayon::has_style(colourise("X")) # Must restore original options before expectation is triggered options(op) expect_true(check) }) test_that("We don't have colours if we don't want to", { op <- options( crayon.enabled = TRUE, testthat.use_colours = FALSE ) check <- crayon::has_style(colourise("X")) options(op) expect_false(check) }) testthat/tests/testthat/test-skip.R0000644000176200001440000000263313547563465017210 0ustar liggesusersexpect_skip <- function(code) { tryCatch( { skipped <- TRUE code skipped <- FALSE }, skip = function(e) NULL ) expect(skipped, "skip not active") } test_that("Package checks", { expect_skip(skip_if_not_installed("testthat", "9999.9999.999")) expect_skip(skip_if_not(FALSE)) expect_skip(skip_if(TRUE)) }) test_that("Skip env vars", { expect_skip_with_env <- function(new, skip_fun) { withr::with_envvar(new, expect_skip(skip_fun())) } expect_skip_with_env(c("NOT_CRAN" = "false"), skip_on_cran) expect_skip_with_env(c("TRAVIS" = "true"), skip_on_travis) expect_skip_with_env(c("APPVEYOR" = "True"), skip_on_appveyor) expect_skip_with_env(c("R_COVR" = "true"), skip_on_covr) expect_skip_with_env(c("BBS_HOME" = "asdf"), skip_on_bioc) }) test_that("autogenerated message is always single line", { a_very_long_argument_name <- FALSE cnd <- capture_condition(skip_if_not( a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name )) expect_length(cnd$message, 1) }) testthat/tests/testthat/test-expect-equality.R0000644000176200001440000000263413547563465021366 0ustar liggesuserstest_that("basically principles of equality hold", { expect_success(expect_equal(1, 1)) expect_failure(expect_equal(1, 2)) }) test_that("default labels use unquoting", { x <- 2 expect_failure(expect_equal(1, !! x), "1 not equal to 2") }) test_that("expect_equivalent ignores attributes for integers", { x1 <- 1:10 x2 <- structure(x1, a = 1, b = 2) expect_failure(expect_equal(x1, x2)) expect_success(expect_equivalent(x1, x2)) }) test_that("expect_equivalent ignores attributes for characters", { x1 <- letters x2 <- structure(x1, a = 1, b = 2) expect_failure(expect_equal(x1, x2)) expect_success(expect_equivalent(x1, x2)) }) test_that("expect_equal and expect_equivalent pass on ... to compare", { x1 <- 1 x2 <- x1 + 1e-6 expect_success(expect_equal(x1, x2, tol = 1e-5)) expect_success(expect_equivalent(x1, x2, tol = 1e-5)) }) test_that("useful message if objects equal but not identical", { f <- function() x g <- function() x environment(g) <- globalenv() expect_failure(expect_identical(f, g), "not identical") }) test_that("% is not treated as sprintf format specifier (#445)", { expect_failure(expect_equal("+", "%")) expect_failure(expect_equal("%", "+")) expect_equal("%", "%") }) test_that("attributes for object (#452)", { oops <- structure(0, oops = "oops") expect_equal(oops, oops) expect_failure(expect_equal(oops, 0)) expect_equal(as.numeric(oops), 0) }) testthat/tests/testthat/test-describe.R0000644000176200001440000000260513547563465020021 0ustar liggesuserssomeExternalVariable <- 1 describe("describe", { it("can contain nested describe blocks", { describe("addition", { it("should be able to add two numbers", { expect_equivalent(2, 1 + 1) }) describe("sub feature", { it("should also work", { expect_equivalent(2, 1 + 1) }) }) }) }) it("can have not yet implemented specs", { describe("Millennium Prize Problems", { it("can be shown that P != NP") }) }) it("has to have a description for the block", { expect_that(describe({ }), throws_error()) expect_that(describe("", { }), throws_error()) expect_that(describe("test", { it() }), throws_error()) expect_that(describe("test", { it("") }), throws_error()) }) it("has to have a description of length 1", { expect_that(describe(c("a", "b"), {}), throws_error()) expect_that(describe("test", { it(c("a", "b")) }), throws_error()) }) someInternalVariable <- 1 it("should be possible to use variables from outer environments", { expect_equivalent(1, someExternalVariable) expect_equivalent(1, someInternalVariable) }) it("should not be possible to access variables from other specs (1)", { some_test_var <- 5 }) it("should not be possible to access variables from other specs (2)", { expect_false(exists("some_test_var")) }) }) testthat/tests/testthat/test-expect-silent.R0000644000176200001440000000045313547563465021024 0ustar liggesuserstest_that("checks for any type of output", { expect_failure(expect_silent(warning("!"))) expect_failure(expect_silent(message("!"))) expect_failure(expect_silent(print("!"))) expect_success(expect_silent("")) }) test_that("returns first argument", { expect_equal(expect_silent(1), 1) }) testthat/tests/testthat/test-expect-condition-custom.txt0000644000176200001440000000050713564563701023432 0ustar liggesusers> expect_error(custom_err(), "an error") Warning: `custom_err()` generated an S3 error and you are testing the error message. * The error has class = c("custom_err", "rlang_error", "error", "condition") * Testing with `class` is more robust than testing with `regexp`. * Do you want `expect_error(..., class = "custom_err")`? testthat/tests/testthat/test-debug-reporter.R0000644000176200001440000001064513547563465021172 0ustar liggesusersget_vars_from_debug_reporter <- function(choice, fun, envir = parent.frame()) { frame <- get_frame_from_debug_reporter(choice, fun, envir) ls(frame) } get_frame_from_debug_reporter <- function(choice, fun, envir = parent.frame()) { force(choice) test_debug_reporter_parent_frame <- NULL with_mock( show_menu = function(choices, title = NULL) { # if (choice > 0) print(choices) my_choice <- choice choice <<- 0L my_choice }, browse_frame = function(frame, skip) { test_debug_reporter_parent_frame <<- frame }, sink_number = function() 0L, with_reporter( "debug", test_that("debug_reporter_test", fun()) ) ) test_debug_reporter_parent_frame } success_fun <- function() { aa <- 1 expect_true(TRUE) } test_that("debug reporter is not called for successes", { expect_null(get_frame_from_debug_reporter(2, success_fun)) }) test_that("browser() is called for the correct frame for failures", { fun_1 <- function() { aa <- 1 expect_true(FALSE) } fun_2 <- function() { f <- function() expect_true(FALSE) f() } fun_3 <- function() { f <- function() { g <- function() expect_true(FALSE) g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for warnings", { fun_1 <- function() { aa <- 1 warning("warn") } fun_2 <- function() { f <- function() warning("warn") f() } fun_3 <- function() { f <- function() { g <- function() warning("warn") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for errors", { fun_1 <- function() { aa <- 1 stop("error") } fun_2 <- function() { f <- function() stop("error") f() } fun_3 <- function() { f <- function() { g <- function() stop("error") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for skips", { fun_1 <- function() { aa <- 1 skip("skip") } fun_2 <- function() { f <- function() skip("skip") f() } fun_3 <- function() { f <- function() { g <- function() skip("skip") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) testthat/tests/testthat/test-test-path.R0000644000176200001440000000171513547563465020153 0ustar liggesuserstest_that("returns local path when called in tests", { expect_equal(test_path("test-test-path.R"), "test-test-path.R") # even if path doesn't (yet) exists expect_equal(test_path("xxxx"), "xxxx") }) test_that("returns local path when called from tools::testInstalledPackages", { old <- setwd("test-path-installed/testthat-tests/testthat") on.exit(setwd(old)) expect_true(in_testing_dir(".")) expect_equal(test_path("test-test-path.R"), "test-test-path.R") expect_equal(test_path("xxxx"), "xxxx") }) test_that("returns full path when called outside tests", { old <- setwd("test-path-present") on.exit(setwd(old)) expect_equal(test_path("empty"), "tests/testthat/empty") # even when file doesn't exist expect_equal(test_path("xxx"), "tests/testthat/xxx") }) test_that("throws error if can't find tests/testthat", { old <- setwd("test-path-missing") on.exit(setwd(old)) expect_error(test_path("empty"), "Can't find `tests/testthat`") }) testthat/tests/testthat/helper-reporter.R0000644000176200001440000000104213564563701020365 0ustar liggesusersexpect_report_unchanged <- function(name, reporter = find_reporter(name), file = "reporters/tests.R") { path <- test_path("reporters", paste0(name, ".txt")) withr::local_options(c(cli.unicode = TRUE)) expect_known_output( test_file(test_path(file), reporter, wrap = FALSE), path ) } expect_report_to_file <- function(reporter, ...) { path <- tempfile() on.exit(unlink(path)) reporter <- reporter$new(file = path, ...) test_file(test_path("reporters/tests.R"), reporter, wrap = FALSE) expect_true(file.exists(path)) } testthat/tests/testthat/context.R0000644000176200001440000000053712666267031016741 0ustar liggesuserscontext("First context.") test_that("Logical equivalence", { x <- TRUE expect_that(x, equals(TRUE)) }) test_that("Numerical equivalence", { x <- 1 expect_that(x, equals(1)) }) context("Second context.") test_that("A passing test", { expect_that(TRUE, equals(TRUE)) }) test_that("A failing test", { expect_that(TRUE, equals(FALSE)) }) testthat/tests/testthat/width-80.txt0000644000176200001440000000066413564530570017237 0ustar liggesusers [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 [19] 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 [37] 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 [55] 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 [73] 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 [91] 91 92 93 94 95 96 97 98 99 100 testthat/tests/testthat/test-verify-output.txt0000644000176200001440000000052413564563701021507 0ustar liggesusers> # Output > 1 + 2 [1] 3 > invisible(1:10) > 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + + 12345678 + 12345678 + 12345678 + 12345678 [1] 135802458 Header ====== > # Other output > letters [1] "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" [20] "t" "u" "v" "w" "x" "y" "z" testthat/tests/testthat/test-expect-output.R0000644000176200001440000000234713564563701021062 0ustar liggesusersf <- function() NULL g <- function() cat("!") test_that("expect = NA checks for no output", { expect_success(expect_output(f(), NA)) expect_failure(expect_output(g(), NA), "produced output") }) test_that("expect = NULL checks for some output", { expect_failure(expect_output(f(), NULL), "produced no output") expect_success(expect_output(g(), NULL)) }) test_that("expect = string checks for match", { expect_success(expect_output(g(), "!")) expect_failure(expect_output(g(), "x"), 'does not match "x"') expect_failure(expect_output("a", "x"), "produced no output") }) test_that("multiline outputs captures and matches", { expect_success(expect_output(cat("1\n2"), "1\n2")) }) test_that("expect_output sets width", { x <- expect_output(getOption("width"), NA) expect_equal(x, 80) x <- expect_output(getOption("width"), NA, width = 20) expect_equal(x, 20) }) test_that("... passed on to grepl", { expect_success(expect_output(print("X"), "x", ignore.case = TRUE)) }) test_that("returns first argument", { expect_equal(expect_output(1, NA), 1) }) test_that("uses unicode characters in output where available", { skip_if_not(l10n_info()$`UTF-8`) bar <- "\u2551" expect_success(expect_output(cat(bar), "\u2551")) }) testthat/tests/testthat/helper-junitmock.R0000644000176200001440000000126113456034771020531 0ustar liggesusers# Fix components of JunitReporter that otherwise vary from run-to-run # # The following functions need to be mocked out to run a unit test # against static contents of reporters/junit.txt: # - proctime - originally wrapper for proc.time() # - timestamp - originally wrapper for toString(Sys.time()) # - hostname - originally wrapper for Sys.info()[["nodename"]] # JunitReporterMock <- R6::R6Class("JunitReporterMock", inherit = JunitReporter, public = list(), private = list( proctime = function() { c(user = 0, system = 0, elapsed = 0) }, timestamp = function() { "1999:12:31 23:59:59" }, hostname = function() { "nodename" } ) ) testthat/tests/testthat/test-verify-unicode-false.txt0000644000176200001440000000006713564563701022667 0ustar liggesusers> cat(cli::symbol$info, cli::symbol$cross, "\n") i x testthat/tests/testthat/test-expect-inheritance.R0000644000176200001440000000335413547563465022022 0ustar liggesuserstest_that("expect_type checks typeof", { expect_success(expect_type(factor("a"), "integer")) expect_failure(expect_type(factor("a"), "double")) }) test_that("expect_is checks class", { expect_success(expect_is(factor("a"), "factor")) expect_failure(expect_is(factor("a"), "integer")) }) test_that("expect_s3/s4_class fails if appropriate type", { A <- methods::setClass("A", contains = "list") on.exit(methods::removeClass("A")) expect_failure(expect_s3_class(1, "double"), "not an S3 object") expect_failure(expect_s3_class(A(), "double"), "not an S3 object") expect_failure(expect_s4_class(factor(), "double"), "not an S4 object") }) test_that("test_s4_class respects class hierarchy", { A <- methods::setClass("A", contains = "list") B <- methods::setClass("B", contains = "list") C <- methods::setClass("C", contains = c("A", "B")) on.exit({ methods::removeClass("A") methods::removeClass("B") methods::removeClass("C") }) expect_success(expect_s4_class(C(), "A")) expect_success(expect_s4_class(C(), "B")) expect_failure(expect_s4_class(C(), "D"), "inherits from `C/A/B/list/vector`") }) test_that("test_s3_class respects class hierarchy", { x <- structure(list(), class = c("a", "b")) expect_success(expect_s3_class(x, "a")) expect_success(expect_s3_class(x, "b")) expect_failure(expect_s3_class(x, "c"), "inherits from `a/b`") }) test_that("test_s3_class can request exact match", { x <- structure(list(), class = c("a", "b")) expect_failure(expect_s3_class(x, "a", exact = TRUE)) expect_success(expect_s3_class(x, c("a", "b"), exact = TRUE)) }) test_that("expect_s3_class allows unquoting of first argument", { f <- factor("a") expect_success(expect_s3_class(!! rlang::quo(f), "factor")) }) testthat/tests/testthat/reporters/0000755000176200001440000000000013564563701017153 5ustar liggesuserstestthat/tests/testthat/reporters/junit.txt0000644000176200001440000000773413564563701021060 0ustar liggesusers Failure has been forced Failure has been forced FALSE isn't true. `i` not equal to 2. 1/1 mismatches [1] 1 - 2 == -1 stop ! Backtrace: 1. f() 2. g() 3. h() This is deep Backtrace: 1. f(25) 2. f(x - 1) 3. f(x - 1) 4. f(x - 1) 5. f(x - 1) 6. f(x - 1) 7. f(x - 1) 8. f(x - 1) 9. f(x - 1) 10. f(x - 1) ... 17. f(x - 1) 18. f(x - 1) 19. f(x - 1) 20. f(x - 1) 21. f(x - 1) 22. f(x - 1) 23. f(x - 1) 24. f(x - 1) 25. f(x - 1) 26. f(x - 1) Throwable testthat/tests/testthat/reporters/tap.txt0000644000176200001440000000205513564563701020502 0ustar liggesusers1..19 ok 1 Success # Context Expectations ok 2 Success not ok 3 Failure:1 Failure has been forced not ok 4 Failure:2a Failure has been forced not ok 5 Failure:2b FALSE isn't true. not ok 6 Failure:loop `i` not equal to 2. 1/1 mismatches [1] 1 - 2 == -1 ok 7 Failure:loop # Context Errors not ok 8 Error:1 stop not ok 9 Error:3 ! Backtrace: 1. f() 2. g() 3. h() # Context Recursion not ok 10 Recursion:1 This is deep Backtrace: 1. f(25) 2. f(x - 1) 3. f(x - 1) 4. f(x - 1) 5. f(x - 1) 6. f(x - 1) 7. f(x - 1) 8. f(x - 1) 9. f(x - 1) 10. f(x - 1) ... 17. f(x - 1) 18. f(x - 1) 19. f(x - 1) 20. f(x - 1) 21. f(x - 1) 22. f(x - 1) 23. f(x - 1) 24. f(x - 1) 25. f(x - 1) 26. f(x - 1) # Context Skips ok 11 # SKIP Reason: skip ok 12 # SKIP Reason: skip ok 13 # SKIP Reason: empty test # Context Warnings ok 14 # WARNING abc ok 15 # WARNING def ok 16 # WARNING ghi # Context Output ok 17 Output:1 ok 18 Output:1 # Context Throwable errors not ok 19 Error:4 Throwable testthat/tests/testthat/reporters/check.txt0000644000176200001440000000447213564563701021000 0ustar liggesusers── 1. Failure: Failure:1 (@tests.R#12) ──────────────────────────────────────── Failure has been forced ── 2. Failure: Failure:2a (@tests.R#16) ─────────────────────────────────────── Failure has been forced ── 3. Failure: Failure:2b (@tests.R#19) ─────────────────────────────────────── FALSE isn't true. ── 4. Failure: Failure:loop (@tests.R#24) ───────────────────────────────────── `i` not equal to 2. 1/1 mismatches [1] 1 - 2 == -1 ── 5. Error: Error:1 (@tests.R#33) ──────────────────────────────────────────── stop ── 6. Error: Error:3 (@tests.R#47) ──────────────────────────────────────────── ! Backtrace: 1. f() 2. g() 3. h() ── 7. Error: Recursion:1 (@tests.R#56) ──────────────────────────────────────── This is deep Backtrace: 1. f(25) 2. f(x - 1) 3. f(x - 1) 4. f(x - 1) 5. f(x - 1) 6. f(x - 1) 7. f(x - 1) 8. f(x - 1) 9. f(x - 1) 10. f(x - 1) ... 17. f(x - 1) 18. f(x - 1) 19. f(x - 1) 20. f(x - 1) 21. f(x - 1) 22. f(x - 1) 23. f(x - 1) 24. f(x - 1) 25. f(x - 1) 26. f(x - 1) ── 8. Error: Error:4 (@tests.R#111) ─────────────────────────────────────────── Throwable ══ testthat results ═══════════════════════════════════════════════════════════ [ OK: 5 | SKIPPED: 3 | WARNINGS: 3 | FAILED: 8 ] 1. Failure: Failure:1 (@tests.R#12) 2. Failure: Failure:2a (@tests.R#16) 3. Failure: Failure:2b (@tests.R#19) 4. Failure: Failure:loop (@tests.R#24) 5. Error: Error:1 (@tests.R#33) 6. Error: Error:3 (@tests.R#47) 7. Error: Recursion:1 (@tests.R#56) 8. Error: Error:4 (@tests.R#111) testthat/tests/testthat/reporters/rstudio.txt0000644000176200001440000000137513564563701021413 0ustar liggesusersreporters/tests.R#12:1 [failure] Failure:1. Failure has been forced reporters/tests.R#16:1 [failure] Failure:2a. Failure has been forced reporters/tests.R#19:1 [failure] Failure:2b. FALSE isn't true. reporters/tests.R#24:1 [failure] Failure:loop. `i` not equal to 2. reporters/tests.R#33:1 [error] Error:1. stop reporters/tests.R#47:1 [error] Error:3. ! reporters/tests.R#56:1 [error] Recursion:1. This is deep reporters/tests.R#62:1 [skip] Skip:1. Reason: skip reporters/tests.R#69:1 [skip] Skip:2. Reason: skip reporters/tests.R#72:1 [skip] Skip:3. Reason: empty test reporters/tests.R#78:1 [warning] Warning:1. abc reporters/tests.R#84:1 [warning] Warning:2. def reporters/tests.R#85:1 [warning] Warning:2. ghi reporters/tests.R#111:1 [error] Error:4. Throwable testthat/tests/testthat/reporters/backtraces.R0000644000176200001440000000226413564563701021404 0ustar liggesuserscontext("Backtraces") test_that("errors thrown at block level are entraced", { f <- function() g() g <- function() stop("foo") f() }) test_that("errors thrown from a quasi-labelled argument are entraced", { foo <- function() stop("foo") expect_is(foo(), "foo") }) test_that("errors thrown from a quasi-labelled argument are entraced (deep case)", { foo <- function() stop("foo") f <- function() g() g <- function() expect_is(foo(), "foo") expect_is(f(), "foo") }) test_that("errors thrown from a quasi-labelled argument are entraced (deep deep case)", { foo <- function() bar() bar <- function() stop("foobar") f <- function() g() g <- function() expect_is(foo(), "foo") f() }) test_that("failed expect_error() prints a backtrace", { f <- function() signaller() signaller <- function() signalCondition(structure(list(), class = "bar")) expect_condition(f(), class = "foo") signaller <- function() stop("bar") expect_error(f(), "foo") }) test_that("Errors are inspected with `conditionMessage()`", { rlang::scoped_bindings( .env = globalenv(), conditionMessage.foobar = function(...) "dispatched" ) rlang::abort("Wrong message", "foobar") }) testthat/tests/testthat/reporters/progress-backtraces.txt0000644000176200001440000000412013564563701023655 0ustar liggesusers✔ | OK F W S | Context ⠏ | 0 | Backtraces ⠋ | 0 1 | Backtraces ⠙ | 0 2 | Backtraces ⠹ | 0 3 | Backtraces ⠸ | 0 4 | Backtraces ⠼ | 0 5 | Backtraces ⠴ | 0 6 | Backtraces ⠦ | 0 7 | Backtraces ✖ | 0 7 | Backtraces ──────────────────────────────────────────────────────────────────────────────── backtraces.R:6: error: errors thrown at block level are entraced foo Backtrace: 1. f() 2. g() backtraces.R:11: error: errors thrown from a quasi-labelled argument are entraced foo Backtrace: 1. testthat::expect_is(foo(), "foo") 4. foo() backtraces.R:18: error: errors thrown from a quasi-labelled argument are entraced (deep case) foo Backtrace: 1. testthat::expect_is(f(), "foo") 4. f() 5. g() 9. foo() backtraces.R:28: error: errors thrown from a quasi-labelled argument are entraced (deep deep case) foobar Backtrace: 1. f() 2. g() 6. foo() 7. bar() backtraces.R:35: failure: failed expect_error() prints a backtrace `f()` threw an condition with unexpected class. Backtrace: 1. testthat::expect_condition(f(), class = "foo") 6. f() 7. signaller() backtraces.R:38: failure: failed expect_error() prints a backtrace `f()` threw an error with unexpected message. Expected match: "foo" Actual message: "bar" Backtrace: 1. testthat::expect_error(f(), "foo") 6. f() 7. signaller() backtraces.R:46: error: Errors are inspected with `conditionMessage()` dispatched ──────────────────────────────────────────────────────────────────────────────── ══ Results ═════════════════════════════════════════════════════════════════════ OK: 0 Failed: 7 Warnings: 0 Skipped: 0 testthat/tests/testthat/reporters/summary-no-dots.txt0000644000176200001440000000577113564563701023004 0ustar liggesusersExpectations: 1234 Expectations2: Errors: 56 Recursion: 7 Skips: SSS Warnings: WWW Output: Throwable errors: 8 End: ══ Skipped ═════════════════════════════════════════════════════════════════════ 1. Skip:1 (@tests.R#62) - Reason: skip 2. Skip:2 (@tests.R#69) - Reason: skip 3. Skip:3 (@tests.R#72) - Reason: empty test ══ Warnings ════════════════════════════════════════════════════════════════════ 1. Warning:1 (@tests.R#78) - abc 2. Warning:2 (@tests.R#84) - def 3. Warning:2 (@tests.R#85) - ghi ══ Failed ══════════════════════════════════════════════════════════════════════ ── 1. Failure: Failure:1 (@tests.R#12) ──────────────────────────────────────── Failure has been forced ── 2. Failure: Failure:2a (@tests.R#16) ─────────────────────────────────────── Failure has been forced ── 3. Failure: Failure:2b (@tests.R#19) ─────────────────────────────────────── FALSE isn't true. ── 4. Failure: Failure:loop (@tests.R#24) ───────────────────────────────────── `i` not equal to 2. 1/1 mismatches [1] 1 - 2 == -1 ── 5. Error: Error:1 (@tests.R#33) ──────────────────────────────────────────── stop ── 6. Error: Error:3 (@tests.R#47) ──────────────────────────────────────────── ! Backtrace: 1. f() 2. g() 3. h() ── 7. Error: Recursion:1 (@tests.R#56) ──────────────────────────────────────── This is deep Backtrace: 1. f(25) 2. f(x - 1) 3. f(x - 1) 4. f(x - 1) 5. f(x - 1) 6. f(x - 1) 7. f(x - 1) 8. f(x - 1) 9. f(x - 1) 10. f(x - 1) ... 17. f(x - 1) 18. f(x - 1) 19. f(x - 1) 20. f(x - 1) 21. f(x - 1) 22. f(x - 1) 23. f(x - 1) 24. f(x - 1) 25. f(x - 1) 26. f(x - 1) ── 8. Error: Error:4 (@tests.R#111) ─────────────────────────────────────────── Throwable ══ DONE ════════════════════════════════════════════════════════════════════════ testthat/tests/testthat/reporters/tests.R0000644000176200001440000000333513564563701020444 0ustar liggesuserstest_that("Success", { succeed() }) context("Expectations") test_that("Success", { succeed() }) test_that("Failure:1", { fail() }) test_that("Failure:2a", { f <- function() fail() f() }) test_that("Failure:2b", { expect_true(FALSE) }) test_that("Failure:loop", { for (i in 1:2) { expect_equal(i, 2) } }) context("Expectations2") context("Errors") test_that("Error:1", { stop("stop") }) test_that("Error:3", { f <- function() { g() } g <- function() { h() } h <- function() { stop("!") } f() }) context("Recursion") test_that("Recursion:1", { f <- function(x) { if (x > 0) f(x - 1) else stop("This is deep") } f(25) }) context("Skips") test_that("Skip:1", { skip("skip") }) test_that("Skip:2", { f <- function() { skip("skip") } f() }) test_that("Skip:3", { }) context("Warnings") test_that("Warning:1", { warning("abc") }) test_that("Warning:2", { f <- function() { warning("ghi") } warning("def") f() }) context("Output") test_that("Output:1", { expect_output(expect_false(FALSE), NA) }) context("Throwable errors") test_that("Error:4", { local_methods <- function(..., .frame = caller_env()) { rlang::scoped_bindings(.env = globalenv(), .frame = .frame, ...) } local_Throwable_methods <- function(frame = caller_env()) { local_methods( .frame = frame, conditionMessage.Throwable = function(c, ...) unclass(c)$message, conditionCall.Throwable = function(c, ...) unclass(c)$call, `$.Throwable` = function(...) stop("forbidden"), `$<-.Throwable` = function(...) stop("forbidden") ) } throw <- function(msg) stop(rlang::error_cnd("Throwable", message = msg)) throw("Throwable") }) context("End") testthat/tests/testthat/reporters/teamcity.txt0000644000176200001440000001163113564563701021535 0ustar liggesusers##teamcity[testSuiteStarted name='Success'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Success'] ##teamcity[testSuiteStarted name='Expectations'] ##teamcity[testSuiteStarted name='Success'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Success'] ##teamcity[testSuiteStarted name='Failure:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Failure has been forced' details=''] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Failure:1'] ##teamcity[testSuiteStarted name='Failure:2a'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Failure has been forced' details=''] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Failure:2a'] ##teamcity[testSuiteStarted name='Failure:2b'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='FALSE isn|'t true.' details=''] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Failure:2b'] ##teamcity[testSuiteStarted name='Failure:loop'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='`i` not equal to 2.' details='1/1 mismatches|n|[1|] 1 - 2 == -1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testStarted name='expectation 2'] ##teamcity[testFinished name='expectation 2'] ##teamcity[testSuiteFinished name='Failure:loop'] ##teamcity[testSuiteFinished name='Expectations'] ##teamcity[testSuiteStarted name='Expectations2'] ##teamcity[testSuiteFinished name='Expectations2'] ##teamcity[testSuiteStarted name='Errors'] ##teamcity[testSuiteStarted name='Error:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='stop' details=''] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Error:1'] ##teamcity[testSuiteStarted name='Error:3'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='!' details='Backtrace:|n 1. f()|n 2. g()|n 3. h()'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Error:3'] ##teamcity[testSuiteFinished name='Errors'] ##teamcity[testSuiteStarted name='Recursion'] ##teamcity[testSuiteStarted name='Recursion:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='This is deep' details='Backtrace:|n 1. f(25)|n 2. f(x - 1)|n 3. f(x - 1)|n 4. f(x - 1)|n 5. f(x - 1)|n 6. f(x - 1)|n 7. f(x - 1)|n 8. f(x - 1)|n 9. f(x - 1)|n 10. f(x - 1)|n ...|n 17. f(x - 1)|n 18. f(x - 1)|n 19. f(x - 1)|n 20. f(x - 1)|n 21. f(x - 1)|n 22. f(x - 1)|n 23. f(x - 1)|n 24. f(x - 1)|n 25. f(x - 1)|n 26. f(x - 1)'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Recursion:1'] ##teamcity[testSuiteFinished name='Recursion'] ##teamcity[testSuiteStarted name='Skips'] ##teamcity[testSuiteStarted name='Skip:1'] ##teamcity[testIgnored name='expectation 1' message='Reason: skip'] ##teamcity[testSuiteFinished name='Skip:1'] ##teamcity[testSuiteStarted name='Skip:2'] ##teamcity[testIgnored name='expectation 1' message='Reason: skip'] ##teamcity[testSuiteFinished name='Skip:2'] ##teamcity[testSuiteStarted name='Skip:3'] ##teamcity[testIgnored name='expectation 1' message='Reason: empty test'] ##teamcity[testSuiteFinished name='Skip:3'] ##teamcity[testSuiteFinished name='Skips'] ##teamcity[testSuiteStarted name='Warnings'] ##teamcity[testSuiteStarted name='Warning:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Warning:1'] ##teamcity[testSuiteStarted name='Warning:2'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testStarted name='expectation 2'] ##teamcity[testFinished name='expectation 2'] ##teamcity[testSuiteFinished name='Warning:2'] ##teamcity[testSuiteFinished name='Warnings'] ##teamcity[testSuiteStarted name='Output'] ##teamcity[testSuiteStarted name='Output:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testStarted name='expectation 2'] ##teamcity[testFinished name='expectation 2'] ##teamcity[testSuiteFinished name='Output:1'] ##teamcity[testSuiteFinished name='Output'] ##teamcity[testSuiteStarted name='Throwable errors'] ##teamcity[testSuiteStarted name='Error:4'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Throwable' details=''] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Error:4'] ##teamcity[testSuiteFinished name='Throwable errors'] ##teamcity[testSuiteStarted name='End'] ##teamcity[testSuiteFinished name='End'] testthat/tests/testthat/reporters/silent.txt0000644000176200001440000000000013564530574021202 0ustar liggesuserstestthat/tests/testthat/reporters/summary.txt0000644000176200001440000000577613564563701021430 0ustar liggesusers.Expectations: .1234. Expectations2: Errors: 56 Recursion: 7 Skips: SSS Warnings: WWW Output: .. Throwable errors: 8 End: ══ Skipped ═════════════════════════════════════════════════════════════════════ 1. Skip:1 (@tests.R#62) - Reason: skip 2. Skip:2 (@tests.R#69) - Reason: skip 3. Skip:3 (@tests.R#72) - Reason: empty test ══ Warnings ════════════════════════════════════════════════════════════════════ 1. Warning:1 (@tests.R#78) - abc 2. Warning:2 (@tests.R#84) - def 3. Warning:2 (@tests.R#85) - ghi ══ Failed ══════════════════════════════════════════════════════════════════════ ── 1. Failure: Failure:1 (@tests.R#12) ──────────────────────────────────────── Failure has been forced ── 2. Failure: Failure:2a (@tests.R#16) ─────────────────────────────────────── Failure has been forced ── 3. Failure: Failure:2b (@tests.R#19) ─────────────────────────────────────── FALSE isn't true. ── 4. Failure: Failure:loop (@tests.R#24) ───────────────────────────────────── `i` not equal to 2. 1/1 mismatches [1] 1 - 2 == -1 ── 5. Error: Error:1 (@tests.R#33) ──────────────────────────────────────────── stop ── 6. Error: Error:3 (@tests.R#47) ──────────────────────────────────────────── ! Backtrace: 1. f() 2. g() 3. h() ── 7. Error: Recursion:1 (@tests.R#56) ──────────────────────────────────────── This is deep Backtrace: 1. f(25) 2. f(x - 1) 3. f(x - 1) 4. f(x - 1) 5. f(x - 1) 6. f(x - 1) 7. f(x - 1) 8. f(x - 1) 9. f(x - 1) 10. f(x - 1) ... 17. f(x - 1) 18. f(x - 1) 19. f(x - 1) 20. f(x - 1) 21. f(x - 1) 22. f(x - 1) 23. f(x - 1) 24. f(x - 1) 25. f(x - 1) 26. f(x - 1) ── 8. Error: Error:4 (@tests.R#111) ─────────────────────────────────────────── Throwable ══ DONE ════════════════════════════════════════════════════════════════════════ testthat/tests/testthat/reporters/progress.txt0000644000176200001440000001215213564563701021561 0ustar liggesusers✔ | OK F W S | Context ⠏ | 0 | tests ⠋ | 1 | tests ✔ | 1 | tests ⠏ | 0 | Expectations ⠋ | 1 | Expectations ⠙ | 1 1 | Expectations ⠹ | 1 2 | Expectations ⠸ | 1 3 | Expectations ⠼ | 1 4 | Expectations ⠴ | 2 4 | Expectations ✖ | 2 4 | Expectations ──────────────────────────────────────────────────────────────────────────────── tests.R:12: failure: Failure:1 Failure has been forced tests.R:16: failure: Failure:2a Failure has been forced tests.R:19: failure: Failure:2b FALSE isn't true. tests.R:24: failure: Failure:loop `i` not equal to 2. 1/1 mismatches [1] 1 - 2 == -1 ──────────────────────────────────────────────────────────────────────────────── ⠏ | 0 | Expectations2 ✔ | 0 | Expectations2 ⠏ | 0 | Errors ⠋ | 0 1 | Errors ⠙ | 0 2 | Errors ✖ | 0 2 | Errors ──────────────────────────────────────────────────────────────────────────────── tests.R:33: error: Error:1 stop tests.R:47: error: Error:3 ! Backtrace: 1. f() 2. g() 3. h() ──────────────────────────────────────────────────────────────────────────────── ⠏ | 0 | Recursion ⠋ | 0 1 | Recursion ✖ | 0 1 | Recursion ──────────────────────────────────────────────────────────────────────────────── tests.R:56: error: Recursion:1 This is deep Backtrace: 1. f(25) 2. f(x - 1) 3. f(x - 1) 4. f(x - 1) 5. f(x - 1) 6. f(x - 1) 7. f(x - 1) 8. f(x - 1) 9. f(x - 1) 10. f(x - 1) ... 17. f(x - 1) 18. f(x - 1) 19. f(x - 1) 20. f(x - 1) 21. f(x - 1) 22. f(x - 1) 23. f(x - 1) 24. f(x - 1) 25. f(x - 1) 26. f(x - 1) ──────────────────────────────────────────────────────────────────────────────── ⠏ | 0 | Skips ⠋ | 0 1 | Skips ⠙ | 0 2 | Skips ⠹ | 0 3 | Skips ✔ | 0 3 | Skips ──────────────────────────────────────────────────────────────────────────────── tests.R:62: skip: Skip:1 Reason: skip tests.R:69: skip: Skip:2 Reason: skip tests.R:72: skip: Skip:3 Reason: empty test ──────────────────────────────────────────────────────────────────────────────── ⠏ | 0 | Warnings ⠋ | 0 1 | Warnings ⠙ | 0 2 | Warnings ⠹ | 0 3 | Warnings ✔ | 0 3 | Warnings ──────────────────────────────────────────────────────────────────────────────── tests.R:78: warning: Warning:1 abc tests.R:84: warning: Warning:2 def tests.R:85: warning: Warning:2 ghi ──────────────────────────────────────────────────────────────────────────────── ⠏ | 0 | Output ⠋ | 1 | Output ⠙ | 2 | Output ✔ | 2 | Output ⠏ | 0 | Throwable errors ⠋ | 0 1 | Throwable errors ✖ | 0 1 | Throwable errors ──────────────────────────────────────────────────────────────────────────────── tests.R:111: error: Error:4 Throwable ──────────────────────────────────────────────────────────────────────────────── ⠏ | 0 | End ✔ | 0 | End ══ Results ═════════════════════════════════════════════════════════════════════ OK: 5 Failed: 8 Warnings: 3 Skipped: 3 testthat/tests/testthat/reporters/location.txt0000644000176200001440000000213713564563701021527 0ustar liggesusersStart test: Success tests.R#2:1 [success] End test: Success Start test: Success tests.R#8:1 [success] End test: Success Start test: Failure:1 tests.R#12:1 [failure] End test: Failure:1 Start test: Failure:2a tests.R#16:1 [failure] End test: Failure:2a Start test: Failure:2b tests.R#19:1 [failure] End test: Failure:2b Start test: Failure:loop tests.R#24:1 [failure] tests.R#24:1 [success] End test: Failure:loop Start test: Error:1 tests.R#33:1 [error] End test: Error:1 Start test: Error:3 tests.R#47:1 [error] End test: Error:3 Start test: Recursion:1 tests.R#56:1 [error] End test: Recursion:1 Start test: Skip:1 tests.R#62:1 [skip] End test: Skip:1 Start test: Skip:2 tests.R#69:1 [skip] End test: Skip:2 Start test: Skip:3 tests.R#72:1 [skip] End test: Skip:3 Start test: Warning:1 tests.R#78:1 [warning] End test: Warning:1 Start test: Warning:2 tests.R#84:1 [warning] tests.R#85:1 [warning] End test: Warning:2 Start test: Output:1 tests.R#91:1 [success] tests.R#91:1 [success] End test: Output:1 Start test: Error:4 tests.R#111:1 [error] End test: Error:4 testthat/tests/testthat/reporters/debug.txt0000644000176200001440000000152413564563701021004 0ustar liggesusers1: fail() 2: expect(FALSE, message, info = info) 1: f() 2: fail() 3: expect(FALSE, message, info = info) 1: expect_true(FALSE) 2: expect(identical(as.vector(act$val), TRUE), sprintf("%s isn't true.", act$l 1: expect_equal(i, 2) 2: expect(comp$equal, sprintf("%s not equal to %s.\n%s", act$lab, exp$lab, com 1: stop("stop") 1: f() 2: g() 3: h() 4: stop("!") 1: f(25) 2: f(x - 1) 3: f(x - 1) 4: f(x - 1) 5: f(x - 1) 6: f(x - 1) 7: f(x - 1) 8: f(x - 1) 9: f(x - 1) 10: f(x - 1) 11: f(x - 1) 12: f(x - 1) 13: f(x - 1) 14: f(x - 1) 15: f(x - 1) 16: f(x - 1) 17: f(x - 1) 18: f(x - 1) 19: f(x - 1) 20: f(x - 1) 21: f(x - 1) 22: f(x - 1) 23: f(x - 1) 24: f(x - 1) 25: f(x - 1) 26: f(x - 1) 27: stop("This is deep") 1: skip("skip") 1: f() 2: skip("skip") 1: warning("abc") 1: warning("def") 1: f() 2: warning("ghi") 1: throw("Throwable") testthat/tests/testthat/reporters/stop.txt0000644000176200001440000000013113564563701020674 0ustar liggesusersTest failed: 'two failures' * fail.R:4: FALSE isn't true. * fail.R:5: TRUE isn't false. testthat/tests/testthat/reporters/summary-2.txt0000644000176200001440000000327013564563701021552 0ustar liggesusers.Expectations: .1234. Expectations2: Errors: 56 Recursion: 7 Skips: SSS Warnings: WWW Output: .. Throwable errors: 8 End: ══ Skipped ═════════════════════════════════════════════════════════════════════ 1. Skip:1 (@tests.R#62) - Reason: skip 2. Skip:2 (@tests.R#69) - Reason: skip 3. Skip:3 (@tests.R#72) - Reason: empty test ══ Warnings ════════════════════════════════════════════════════════════════════ 1. Warning:1 (@tests.R#78) - abc 2. Warning:2 (@tests.R#84) - def 3. Warning:2 (@tests.R#85) - ghi ══ Failed ══════════════════════════════════════════════════════════════════════ ── 1. Failure: Failure:1 (@tests.R#12) ──────────────────────────────────────── Failure has been forced ── 2. Failure: Failure:2a (@tests.R#16) ─────────────────────────────────────── Failure has been forced ... and 6 more Maximum number of 2 failures reached, some test results may be missing. ══ DONE ════════════════════════════════════════════════════════════════════════ testthat/tests/testthat/reporters/fail.R0000644000176200001440000000022213564563701020205 0ustar liggesuserscontext("fail") test_that("two failures", { expect_true(FALSE) expect_false(TRUE) }) test_that("another failure", { expect_true(FALSE) }) testthat/tests/testthat/reporters/minimal.txt0000644000176200001440000000002413564563701021336 0ustar liggesusers..FFFF.EEESSSWWW..E testthat/tests/testthat/test-verify-unicode-true.txt0000644000176200001440000000007313564563701022551 0ustar liggesusers> cat(cli::symbol$info, cli::symbol$cross, "\n") ℹ ✖ testthat/tests/testthat/test_dir/0000755000176200001440000000000013176617410016737 5ustar liggesuserstestthat/tests/testthat/test_dir/test-helper.R0000644000176200001440000000022713176113117021312 0ustar liggesusers# test that the companion helper script is sourced by test_dir context("helper") test_that("helper test", { expect_equal(hello(), "Hello World") }) testthat/tests/testthat/test_dir/test-failures.R0000644000176200001440000000034613176113117021647 0ustar liggesuserscontext("failures") test_that("just one failure", { expect_true(FALSE) }) test_that("one failure on two", { expect_false(FALSE) expect_true(FALSE) }) test_that("no failure", { expect_false(FALSE) expect_true(TRUE) }) testthat/tests/testthat/test_dir/test-bare-expectations.R0000644000176200001440000000007612666267031023462 0ustar liggesuserscontext("Bare") expect_that(1, equals(1)) expect_equal(2, 2) testthat/tests/testthat/test_dir/test-skip.R0000644000176200001440000000015612666267031021012 0ustar liggesuserscontext("skip") test_that("Skips skip", { skip("Skipping to avoid certain failure") expect_true(FALSE) })testthat/tests/testthat/test_dir/test-errors.R0000644000176200001440000000053213176113117021346 0ustar liggesuserscontext("error") test_that("simple", { stop("argh") }) test_that("after one success", { expect_true(TRUE) stop("argh") expect_true(TRUE) }) test_that("after one failure", { expect_true(FALSE) stop("argh") }) test_that("in the test", { expect_true(stop("Argh")) }) test_that("in expect_error", { expect_error(stop("Argh")) }) testthat/tests/testthat/test_dir/helper_hello.R0000644000176200001440000000004213176113117021513 0ustar liggesusershello <- function() "Hello World" testthat/tests/testthat/test_dir/test-empty.R0000644000176200001440000000014213176113117021165 0ustar liggesuserscontext("empty") test_that("empty test", NULL) test_that("empty test with error", stop("Argh")) testthat/tests/testthat/test_dir/test-basic.R0000644000176200001440000000102312666267031021117 0ustar liggesuserscontext("Basic") test_that("logical tests act as expected", { expect_that(TRUE, is_true()) expect_that(FALSE, is_false()) }) test_that("logical tests ignore attributes", { expect_that(c(a = TRUE), is_true()) expect_that(c(a = FALSE), is_false()) }) test_that("equality holds", { expect_that(5, equals(5)) expect_that(10, is_identical_to(10)) }) test_that("can't access variables from other tests 2", { a <- 10 }) test_that("can't access variables from other tests 1", { expect_that(exists("a"), is_false()) }) testthat/tests/testthat/test-expect-condition.txt0000644000176200001440000000320113564563701022114 0ustar liggesusers── 1. Failure: (@test-expect-condition.R#62) ───────────────────────────────── `null()` did not throw an error. ── 2. Failure: (@test-expect-condition.R#63) ───────────────────────────────── `fail("!")` threw an error. Message: ! Class: simpleError/error/condition Backtrace: 1. testthat::expect_error(fail("!"), NA) 6. testthat::fail("!") ── 3. Failure: (@test-expect-condition.R#65) ───────────────────────────────── `fail("xxx")` threw an error with unexpected message. Expected match: "zzz" Actual message: "xxx" Backtrace: 1. testthat::expect_error(fail("xxx"), regexp = "zzz") 6. testthat::fail("xxx") ── 4. Failure: (@test-expect-condition.R#66) ───────────────────────────────── `fail("xxx")` threw an error with unexpected class. Expected class: zzz Actual class: simpleError/error/condition Message: xxx Backtrace: 1. testthat::expect_error(fail("xxx"), class = "zzz") 6. testthat::fail("xxx") ── 5. Failure: (@test-expect-condition.R#67) ───────────────────────────────── `fail("xxx")` threw an error with unexpected class and message. Expected class: zzz Actual class: simpleError/error/condition Message: xxx Expected match: "zzz" Actual message: "xxx" Backtrace: 1. testthat::expect_error(fail("xxx"), regexp = "zzz", class = "zzz") 6. testthat::fail("xxx") testthat/tests/testthat/test-expect-condition.R0000644000176200001440000001121313564563701021500 0ustar liggesuserstest_that("regexp = NULL checks for presence of error", { expect_success(expect_error(stop())) expect_failure(expect_error(null()), "did not throw an error") }) test_that("regexp = NA checks for absence of error", { expect_success(expect_error(null(), NA)) expect_failure(expect_error(stop("Yes"), NA)) }) test_that("regexp = string matches for error message", { expect_success(expect_error(stop("Yes"), "Yes")) expect_failure(expect_error(stop("Yes"), "No")) expect_failure(expect_error("OK", "No"), "did not throw an error") }) test_that("classed error generates useful warning", { custom_err <- function() abort("This is an error", .subclass = "custom_err") verify_output(test_path("test-expect-condition-custom.txt"), { expect_error(custom_err(), "an error") }) }) test_that("class = string matches class of error", { blah <- function() { cond <- structure( list(message = "hi"), class = c("blah", "error", "condition") ) stop(cond) } expect_success(expect_error(blah(), class = "blah")) expect_failure( expect_error(blah(), class = "blech"), "threw an error with unexpected class" ) expect_failure( expect_condition(blah(), class = "blech"), "threw an condition with unexpected class" ) }) test_that("... passed on to grepl", { expect_success(expect_error(stop("X"), "x", ignore.case = TRUE)) }) test_that("generates informative failures", { skip_if_not(l10n_info()$`UTF-8`) # rlang backtraces are sensitive to upstream changes skip_on_cran() # Disable srcrefs because they differ across systems withr::local_options(list(rlang_trace_format_srcrefs = FALSE)) expect_known_failure("test-expect-condition.txt", { # Call `stop()` indirectly to create more realistic backtraces in # captured output fail <- function(msg) stop(msg) expect_error(null()) expect_error(fail("!"), NA) expect_error(fail("xxx"), regexp = "zzz") expect_error(fail("xxx"), class = "zzz") expect_error(fail("xxx"), regexp = "zzz", class = "zzz") }) }) test_that("warnings are converted to errors when options('warn') >= 2", { withr::with_options(c(warn = 2), { expect_warning(warning("foo")) expect_error(warning("foo")) }) }) test_that("can silence warnings", { expect_warning(suppressWarnings(warning("foo")), NA) # Can't test with `expect_warning()` because the warning is still # signalled, it's just not printed # https://github.com/wch/r-source/blob/886ab4a0/src/main/errors.c#L388-L484 withr::with_options(c(warn = -1), warning("foo")) }) local({ # Define method in the global environment so it's consistently reached scoped_bindings( conditionMessage.foobar = function(err) "dispatched!", .env = global_env() ) foobar <- error_cnd("foobar") test_that("message method is called when expecting error", { expect_error(stop(foobar), "dispatched!", class = "foobar") }) test_that("message method is called with unexpected message", { expect_error( expect_error(stop(foobar), "unexpected", class = "foobar"), "Actual message: \"dispatched!\"", fixed = TRUE, class = "expectation_failure" ) }) test_that("message method is called with unexpected error", { expect_error( expect_error(stop(foobar), NA, class = "foobar"), "dispatched!", class = "expectation_failure" ) }) test_that("message method is called with expected warnings", { foobar <- warning_cnd("foobar") expect_warning(warning(foobar), "dispatched!") }) test_that("message method is called with expected messages", { foobar <- message_cnd("foobar") expect_message(message(foobar), "dispatched!") }) }) test_that("rlang backtrace reminders are not included in error message", { f <- function() g() g <- function() h() h <- function() abort("foo") expect_error(f(), "foo$") }) test_that("is_informative_error returns TRUE for basic errors", { is_informative <- function(x) is_informative_error(catch_cnd(x)) expect_false(is_informative(stop("!"))) expect_false(is_informative(abort("!"))) expect_false(is_informative(abort("!", .subclass = "Rcpp::eval_error"))) expect_false(is_informative(abort("!", .subclass = "Rcpp::exception"))) expect_true(is_informative(abort("!", .subclass = "error_custom"))) with_bindings( .env = global_env(), is_informative_error.error_custom = function(...) FALSE, expect_false(is_informative(abort("!", .subclass = "error_custom"))) ) }) test_that("can capture Throwable conditions from rJava", { local_Throwable_methods() throw <- function(msg) stop(error_cnd("Throwable", message = msg)) expect_error(throw("foo"), "foo", class = "Throwable") }) testthat/tests/testthat/test-reporter-multi.R0000644000176200001440000000054513547563465021234 0ustar liggesuserstest_that("MultiReporter", { reports <- lapply(seq_len(3), function(x) ListReporter$new()) reporter <- MultiReporter$new(reporters = reports) test_file(test_path("context.R"), reporter) dfs <- lapply(reports, function(x) as.data.frame(x$get_results())) expect_equal(dfs[[2]][1:7], dfs[[1]][1:7]) expect_equal(dfs[[3]][1:7], dfs[[1]][1:7]) }) testthat/tests/testthat/test-context.R0000644000176200001440000000173713547563465017732 0ustar liggesusersCountReporter <- R6::R6Class("CountReporter", inherit = Reporter, public = list( context_i = 0, context_count = 0, test_i = 0, test_count = 0, start_context = function(context) { self$context_count <- self$context_count + 1 self$context_i <- self$context_i + 1 }, end_context = function(context) { self$context_i <- self$context_i - 1 stopifnot(self$context_i >= 0) }, start_test = function(context, test) { self$test_count <- self$test_count + 1 self$test_i <- self$test_i + 1 }, end_test = function(context, test) { self$test_i <- self$test_i - 1 stopifnot(self$test_i >= 0) } ) ) test_that("contexts are opened, then closed", { report <- CountReporter$new() test_file("context.R", report, wrap = FALSE) expect_that(report$context_count, equals(2)) expect_that(report$context_i, equals(0)) expect_that(report$test_count, equals(4)) expect_that(report$test_i, equals(0)) }) testthat/tests/testthat/helper-assign.R0000644000176200001440000000002012666267031020001 0ustar liggesusersabcdefghi <- 10 testthat/tests/testthat/test-reporter.R0000644000176200001440000000571113564563701020074 0ustar liggesusers # Disable srcrefs because they differ across systems withr::local_options(list(rlang_trace_format_srcrefs = FALSE)) skip_if(getRversion() < "3.4", "Fails because of new `eval()` call structure") # rlang backtraces are sensitive to upstream changes skip_on_cran() test_that("reporters produce consistent output", { expect_report_unchanged("location") expect_report_unchanged("minimal") expect_report_unchanged("tap") expect_report_unchanged("teamcity") expect_report_unchanged("silent") expect_report_unchanged("rstudio") expect_report_unchanged("check", CheckReporter$new(stop_on_failure = FALSE)) expect_report_unchanged("junit", reporter = JunitReporterMock) expect_report_unchanged("progress", ProgressReporter$new(show_praise = FALSE, min_time = Inf, update_interval = 0)) expect_report_unchanged("summary", SummaryReporter$new(show_praise = FALSE, omit_dots = FALSE)) expect_report_unchanged("summary-2", SummaryReporter$new(show_praise = FALSE, max_reports = 2)) expect_report_unchanged("summary-no-dots", SummaryReporter$new(show_praise = FALSE, omit_dots = TRUE)) # Test that MultiReporter can write to two different places tap_file <- tempfile() expect_report_unchanged("summary", reporter = MultiReporter$new(list( SummaryReporter$new(show_praise = FALSE, omit_dots = FALSE), TapReporter$new(file = tap_file) ))) expect_identical( read_lines(tap_file), read_lines(test_path("reporters", "tap.txt")) ) }) test_that('debug reporter produces consistent output', { withr::local_options(c(testthat_format_srcrefs = FALSE)) with_mock( show_menu = function(choices, title = NULL) { cat(paste0(format(seq_along(choices)), ": ", choices, sep = "\n"), "\n", sep = "") 0L }, sink_number = function() 0L, expect_report_unchanged("debug") ) }) test_that("reporters accept a 'file' argument and write to that location", { expect_report_to_file(CheckReporter, stop_on_failure = FALSE) expect_report_to_file(JunitReporterMock) expect_report_to_file(LocationReporter) expect_report_to_file(MinimalReporter) expect_report_to_file(ProgressReporter, show_praise = FALSE, min_time = Inf, update_interval = 0) expect_report_to_file(SummaryReporter, show_praise = FALSE, omit_dots = FALSE) expect_report_to_file(TapReporter) expect_report_to_file(TeamcityReporter) expect_report_to_file(RstudioReporter) }) test_that("reporters write to 'testthat.output_file', if specified", { path <- tempfile() withr::local_options(c(testthat.output_file = path)) test_file(test_path("reporters/tests.R"), MinimalReporter$new(), wrap = FALSE) expect_true(file.exists(path)) }) test_that("backtraces are reported", { expect_report_unchanged("progress-backtraces", file = "reporters/backtraces.R", ProgressReporter$new(show_praise = FALSE, min_time = Inf, update_interval = 0)) }) test_that("stop reporter stops at first failure", { expect_report_unchanged("stop", find_reporter("stop"), file = "reporters/fail.R") }) testthat/tests/testthat/test-expect-vector.R0000644000176200001440000000033213547563465021024 0ustar liggesuserscontext("test-expect-vector") test_that("basic properties upheld", { skip_if_not_installed("vctrs", "0.1.0.9002") expect_success(expect_vector(1:10, size = 10)) expect_failure(expect_vector(1:10, size = 5)) }) testthat/tests/testthat/test-expect-comparison.R0000644000176200001440000000310613547563465021676 0ustar liggesuserstest_that("basic comparisons work", { expect_success(expect_lt(10, 11)) expect_failure(expect_lt(10, 10)) expect_success(expect_lte(10, 10)) expect_success(expect_gt(11, 10)) expect_failure(expect_gt(10, 10)) expect_success(expect_gte(10, 10)) }) test_that("comparison result object invisibly", { out <- expect_invisible(expect_lt(1, 10)) expect_equal(out, 1) }) test_that("comparisons with Inf work", { expect_success(expect_lt(10, Inf)) expect_failure(expect_lt(Inf, Inf)) expect_success(expect_lte(Inf, Inf)) expect_success(expect_gt(Inf, 10)) expect_failure(expect_gt(Inf, Inf)) expect_success(expect_gte(Inf, Inf)) }) test_that("comparisons with NA work", { expect_failure(expect_lt(10, NA_real_)) expect_failure(expect_lt(NA_real_, 10)) expect_failure(expect_lt(NA_real_, NA_real_)) expect_failure(expect_lte(NA_real_, NA_real_)) expect_failure(expect_gt(10, NA_real_)) expect_failure(expect_gt(NA_real_, 10)) expect_failure(expect_gt(NA_real_, NA_real_)) expect_failure(expect_gte(NA_real_, NA_real_)) }) test_that("comparisons with more complicated objects work", { time <- Sys.time() time2 <- time + 1 expect_success(expect_lt(time, time2)) expect_success(expect_lte(time, time2)) expect_success(expect_gt(time2, time)) expect_success(expect_gte(time2, time)) }) test_that("comparison must yield a single logical", { expect_error(expect_lt(1:10, 5), "single logical") }) test_that("wordly versions are deprecated", { expect_warning(expect_less_than(1, 2), "Deprecated") expect_warning(expect_more_than(2, 1), "Deprecated") }) testthat/tests/testthat/helper-testthat.R0000644000176200001440000000072113564563701020366 0ustar liggesusers local_methods <- function(..., .frame = caller_env()) { scoped_bindings(.env = global_env(), .frame = .frame, ...) } local_Throwable_methods <- function(frame = caller_env()) { local_methods( .frame = frame, conditionMessage.Throwable = function(c, ...) unclass(c)$message, conditionCall.Throwable = function(c, ...) unclass(c)$call, `$.Throwable` = function(...) stop("forbidden"), `$<-.Throwable` = function(...) stop("forbidden") ) } testthat/tests/testthat/test-expect-reference.R0000644000176200001440000000032513547563465021462 0ustar liggesuserstest_that("can check for references", { skip_if_not_installed("rlang", "0.1.4.9000") x1 <- 1:10 x2 <- x1 y <- 1:10 expect_success(expect_reference(x1, x2)) expect_failure(expect_reference(x1, y)) }) testthat/tests/testthat/test-test-example.R0000644000176200001440000000060613547563465020650 0ustar liggesuserstest_that("can test documentation from path or Rd object", { rd_path <- test_path("../../man/expect_length.Rd") skip_if_not(file.exists(rd_path)) test_example(rd_path) test_rd(tools::parse_Rd(rd_path)) }) test_that("returns false if no examples", { rd_path <- test_path("../../man/test_examples.Rd") skip_if_not(file.exists(rd_path)) expect_false(test_example(rd_path)) }) testthat/tests/testthat/test-mock.R0000644000176200001440000000662213547563465017175 0ustar liggesuserstest_that("can make 3 = 5", { with_mock( compare = function(x, y, ...) list(equal = TRUE, message = "TRUE"), expect_equal(3, 5) ) }) test_that("mocked function is restored on error", { expect_error( with_mock( compare = function(x, y, ...) list(equal = TRUE, message = "TRUE"), stop("Simulated error") ), "Simulated error" ) }) test_that("non-empty mock with return value", { expect_true(with_mock( compare = function(x, y, ...) list(equal = TRUE, message = "TRUE"), TRUE )) }) test_that("nested mock", { with_mock( all.equal = function(x, y, ...) TRUE, { with_mock( expect_warning = expect_error, { expect_warning(stopifnot(!compare(3, "a")$equal)) } ) }, .env = asNamespace("base") ) expect_false(isTRUE(all.equal(3, 5))) expect_warning(warning("test")) }) test_that("can't mock non-existing", { expect_error(with_mock(..bogus.. = identity, TRUE), "Function [.][.]bogus[.][.] not found in environment testthat") }) test_that("can't mock non-function", { expect_error(with_mock(pkg_and_name_rx = FALSE, TRUE), "Function pkg_and_name_rx not found in environment testthat") }) test_that("empty or no-op mock", { expect_warning( expect_null(with_mock()), "Not mocking anything. Please use named parameters to specify the functions you want to mock.", fixed = TRUE ) expect_warning( expect_true(with_mock(TRUE)), "Not mocking anything. Please use named parameters to specify the functions you want to mock.", fixed = TRUE ) }) test_that("visibility", { expect_warning(expect_false(withVisible(with_mock())$visible)) expect_true(withVisible(with_mock(compare = function() {}, TRUE))$visible) expect_false(withVisible(with_mock(compare = function() {}, invisible(5)))$visible) }) test_that("multiple return values", { expect_true(with_mock(FALSE, TRUE, compare = function() {})) expect_equal(with_mock(3, compare = function() {}, 5), 5) }) test_that("can access variables defined in function", { x <- 5 expect_equal(with_mock(x, compare = function() {}), 5) }) test_that("can mock if package is not loaded", { if ("package:devtools" %in% search()) { skip("devtools is loaded") } skip_if_not_installed("devtools") with_mock(`devtools::test` = identity, expect_identical(devtools::test, identity)) }) test_that("changes to variables are preserved between calls and visible outside", { x <- 1 with_mock( show_menu = function() {}, x <- 3, expect_equal(x, 3) ) expect_equal(x, 3) }) test_that("mock extraction", { expect_identical( extract_mocks(list(compare = compare), .env = asNamespace("testthat"))$compare$name, as.name("compare") ) expect_error( extract_mocks(list(..bogus.. = identity), "testthat"), "Function [.][.]bogus[.][.] not found in environment testthat" ) expect_equal( length(extract_mocks(list(not = identity, show_menu = identity), "testthat")), 2 ) }) test_that("mocks can access local variables", { value <- compare(0, 0) with_mock( expect_equal(2 * 3, 4), compare = function(x, y, ...) { value } ) }) # local_mock -------------------------------------------------------------- test_that("local_mock operates locally", { f <- function() { local_mock(compare = function(x, y) FALSE) compare(1, 1) } expect_false(f()) expect_equal(compare(1, 1), no_difference()) }) testthat/tests/testthat.R0000644000176200001440000000054113564563701015251 0ustar liggesuserslibrary(testthat) options(testthat.use_colours = FALSE) out <- test_that("running succeeding test outside of test reporter works", { expect_true(TRUE) }) expect_true(out) err <- rlang::catch_cnd( test_that("running failing test outside of test reporter is an error", { expect_true(FALSE) }) ) expect_is(err, "error") test_check("testthat") testthat/src/0000755000176200001440000000000013564761040012707 5ustar liggesuserstestthat/src/reassign.c0000644000176200001440000000122612666267031014672 0ustar liggesusers#define USE_RINTERNALS #include #include #include SEXP reassign_function(SEXP name, SEXP env, SEXP old_fun, SEXP new_fun) { if (TYPEOF(name) != SYMSXP) error("name must be a symbol"); if (TYPEOF(env) != ENVSXP) error("env must be an environment"); if (TYPEOF(old_fun) != CLOSXP) error("old_fun must be a function"); if (TYPEOF(new_fun) != CLOSXP) error("new_fun must be a function"); SET_FORMALS(old_fun, FORMALS(new_fun)); SET_BODY(old_fun, BODY(new_fun)); SET_CLOENV(old_fun, CLOENV(new_fun)); DUPLICATE_ATTRIB(old_fun, new_fun); return R_NilValue; } SEXP duplicate_(SEXP x) { return duplicate(x); } testthat/src/test-example.cpp0000644000176200001440000000216012666267031016025 0ustar liggesusers/* * This file uses the Catch unit testing library, alongside * testthat's simple bindings, to test a C++ function. * * For your own packages, ensure that your test files are * placed within the `src/` folder, and that you include * `LinkingTo: testthat` within your DESCRIPTION file. */ // All test files should include the // header file. #include // Normally this would be a function from your package's // compiled library -- you might instead just include a header // file providing the definition, and let R CMD INSTALL // handle building and linking. int twoPlusTwo() { return 2 + 2; } // Initialize a unit test context. This is similar to how you // might begin an R test file with 'context()', expect the // associated context should be wrapped in braced. context("Sample unit tests") { // The format for specifying tests is similar to that of // testthat's R functions. Use 'test_that()' to define a // unit test, and use 'expect_true()' and 'expect_false()' // to test the desired conditions. test_that("two plus two equals four") { expect_true(twoPlusTwo() == 4); } } testthat/src/init.c0000644000176200001440000000116713173076020014014 0ustar liggesusers#include #include #include // for NULL #include /* .Call calls */ extern SEXP duplicate_(SEXP); extern SEXP reassign_function(SEXP, SEXP, SEXP, SEXP); extern SEXP run_testthat_tests(); static const R_CallMethodDef CallEntries[] = { {"duplicate_", (DL_FUNC) &duplicate_, 1}, {"reassign_function", (DL_FUNC) &reassign_function, 4}, {"run_testthat_tests", (DL_FUNC) &run_testthat_tests, 0}, {NULL, NULL, 0} }; void R_init_testthat(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } testthat/src/Makevars0000644000176200001440000000006412666267031014406 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include -DCOMPILING_TESTTHAT testthat/src/Makevars.win0000644000176200001440000000006412666267031015202 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include -DCOMPILING_TESTTHAT testthat/src/test-runner.cpp0000644000176200001440000000036712666267031015712 0ustar liggesusers/* * Please do not edit this file -- it ensures that your package will export a * 'run_testthat_tests()' C routine that can be used to run the Catch unit tests * available in your package. */ #define TESTTHAT_TEST_RUNNER #include testthat/src/test-catch.cpp0000644000176200001440000000170213137625470015454 0ustar liggesusers#include #include #include #include namespace { void ouch() { std::string message = "logic"; throw std::logic_error(message); } } // anonymous namespace context("Example Unit Test") { test_that("4 + 4 == 8") { expect_true((4 + 4) == 8); } } context("A second context") { test_that("2 - 2 == 0") { expect_true((2 - 2) == 0); } test_that("-1 is negative") { expect_true((-1 < 0)); } } context("Respect 'src/Makevars'") { bool compiling_testthat; #ifdef COMPILING_TESTTHAT compiling_testthat = true; #else compiling_testthat = false; #endif test_that("COMPILING_TESTTHAT is inherited from 'src/Makevars'") { expect_true(compiling_testthat); } } context("Exception handling") { test_that("we can use Catch to test for exceptions") { expect_error(ouch()); expect_error_as(ouch(), std::exception); expect_error_as(ouch(), std::logic_error); } } testthat/vignettes/0000755000176200001440000000000013564761040014130 5ustar liggesuserstestthat/vignettes/custom-expectation.Rmd0000644000176200001440000000644313564563701020442 0ustar liggesusers--- title: "Custom expectations" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Custom expectations} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` This vignette shows you how to create custom expectations that work identically to the built-in `expect_` functions. ## Creating an expectation There are three main parts to writing an expectation, as illustrated by `expect_length()`: ```{r} expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object), arg = "object") # 2. Call expect() act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) # 3. Invisibly return the value invisible(act$val) } ``` ## Quasi-labelling The first step in any expectation is to capture the actual object, and generate a label for it to use if a failure occur. All testthat expectations support quasiquotation so that you can unquote variables. This makes it easier to generate good labels when the expectation is called from a function or within a for loop. By convention, the first argument to every `expect_` function is called `object`, and you capture it's value (`val`) and label (`lab`) with `act <- quasi_label(enquo(object))`, where `act` is short for actual. ### Verify the expectation Next, you should verify the expectation. This often involves a little computation (here just figuring out the `length`), and you should typically store the results back into the `act` object. Next you call `expect()`. This has two arguments: 1. `ok`: was the expectation successful? This is usually easy to write 2. `failure_message`: What informative error message should be reported to the user so that they can diagnose the problem. This is often hard to write! For historical reasons, most built-in expectations generate these with `sprintf()`, but today I'd recommend using the [glue](http://glue.tidyverse.org) package ### Invisibly return the input Expectation functions are called primarily for their side-effects (triggering a failure), so should invisibly return their input, `act$val`. This allows expectations to be chained: ```{r} mtcars %>% expect_type("list") %>% expect_s3_class("data.frame") %>% expect_length(11) ``` ## `succeed()` and `fail()` For expectations with more complex logic governing when success or failure occurs, you can use `succeed()` and `fail()`. These are simple wrappers around `expect()` that allow you to write code that looks like this: ```{r} expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object), arg = "object") act$n <- length(act$val) if (act$n == n) { succeed() return(invisible(act$val)) } message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n) fail(message) } ``` ## Testing your expectations Use the expectations `expect_success()` and `expect_failure()` to test your expectation. ```{r} test_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) ``` testthat/R/0000755000176200001440000000000013564761040012321 5ustar liggesuserstestthat/R/verify-output.R0000644000176200001440000001256013564563701015316 0ustar liggesusers#' Verify output #' #' @description #' This is a regression test that records interwoven code and output into a #' file, in a similar way to kniting an `.Rmd` (but see caveats below). #' #' `verify_output()` designed particularly for testing print methods and error #' messages, where the primary goal is to ensure that the output is helpful to #' a human. Obviously, you can't test that with code, so the best you can do is #' make the results explicit by saving them to text file. This makes the output #' easy to see in code reviews, and ensures that you don't change the output #' accidentally. #' #' `verify_output()` is designed to be used with git: to see what has changed #' from the previous run, you'll need to use `git diff` or similar. #' #' @section Syntax: #' `verify_output()` can only capture the abstract syntax tree, losing all #' whitespace and comments. To mildy offset this limitation: #' #' - Strings are converted to R comments in the output. #' - Strings starting with `# ` are converted to headers in the output. #' #' @section CRAN: #' On CRAN, `verify_output()` will never fail, even if the output changes. #' This avoids false positives because tests of print methods and error #' messages are often fragile due to implicit dependencies on other packages, #' and failure does not imply incorrect computation, just a change in #' presentation. #' #' @param path Path to record results. #' #' This should usually be a call to [test_path()] to ensures that same path #' is used when run interactively (when the working directory is typically #' the project root), and when run as an autmated test (when the working #' directory will be `tests/testthat`). #' @param code Code to execute. This will usually be a multiline expression #' contained within `{}` (similarly to `test_that()` calls). #' @param width Width of console output #' @param crayon Enable crayon package colouring? #' @param unicode Enable cli package UTF-8 symbols? If you set this to #' `TRUE`, call `skip_if(!cli::is_utf8_output())` to disable the #' test on your CI platforms that don't support UTF-8 (e.g. Windows). #' @param env The environment to evaluate `code` in. #' @export #' @examples #' # The first argument would usually be `test_path("informative-name.txt"`) #' # but that is not permitted in examples #' path <- tempfile() #' verify_output(path, { #' head(mtcars) #' log(-10) #' "a" * 3 #' }) #' writeLines(readLines(path)) #' #' # Use strings to create comments in the output #' verify_output(tempfile(), { #' "Print method" #' head(mtcars) #' #' "Warning" #' log(-10) #' #' "Error" #' "a" * 3 #' }) #' #' # Use strings starting with # to create headings #' verify_output(tempfile(), { #' "# Base functions" #' head(mtcars) #' log(-10) #' "a" * 3 #' }) verify_output <- function(path, code, width = 80, crayon = FALSE, unicode = FALSE, env = caller_env()) { expr <- substitute(code) if (is_call(expr, "{")) { exprs <- as.list(expr[-1]) } else { exprs <- list(expr) } withr::local_options(list( width = width, crayon.enabled = crayon, cli.unicode = unicode )) withr::local_envvar(list(RSTUDIO_CONSOLE_WIDTH = width)) exprs <- lapply(exprs, function(x) if (is.character(x)) paste0("# ", x) else expr_deparse(x)) source <- unlist(exprs, recursive = FALSE) # Open temporary new device grDevices::png(filename = tempfile()) grDevices::dev.control(displaylist = "enable") dev <- grDevices::dev.cur() on.exit(grDevices::dev.off(dev), add = TRUE) results <- evaluate::evaluate(source, envir = env, new_device = FALSE) output <- unlist(lapply(results, output_replay)) if (is_testing() && on_cran()) { skip("On CRAN") } compare_file(path, output, update = TRUE) invisible() } output_replay <- function(x) { UseMethod("output_replay", x) } #' @export output_replay.character <- function(x) { c(split_lines(x), "") } #' @export output_replay.source <- function(x) { lines <- split_lines(x$src) # Remove header of lines so they don't get prefixed first <- lines[[1]] if (grepl("^# # ", first)) { header <- gsub("^# # ", "", first) lines <- lines[-1] } else { header <- NULL } n <- length(lines) if (n > 0) { lines[1] <- paste0("> ", lines[1]) if (n > 1) { lines[2:n] <- paste0("+ ", lines[2:n]) } } if (!is.null(header)) { underline <- strrep("=", nchar(header)) lines <- c("", header, underline, "", lines) } lines } #' @export output_replay.error <- function(x) { msg <- cnd_message(x) if (is.null(x$call)) { msg <- paste0("Error: ", msg) } else { call <- deparse(x$call) msg <- paste0("Error in ", call, ": ", msg) } c(split_lines(msg), "") } #' @export output_replay.warning <- function(x) { msg <- cnd_message(x) if (is.null(x$call)) { msg <- paste0("Warning: ", msg) } else { call <- deparse(x$call) msg <- paste0("Warning in ", call, ": ", msg) } c(split_lines(msg), "") } #' @export output_replay.message <- function(x) { # Messages are the only conditions where a new line is appended automatically msg <- paste0("Message: ", sub("\n$", "", cnd_message(x))) c(split_lines(msg), "") } #' @export output_replay.recordedplot <- function(x) { abort("Plots are not supported") } # Helpers ------------------------------------------------------------ split_lines <- function(x) { strsplit(x, "\n")[[1]] } testthat/R/reporter-debug.R0000644000176200001440000000127713564563701015405 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: start recovery. #' #' This reporter will call a modified version of [recover()] on all #' broken expectations. #' #' @export #' @family reporters DebugReporter <- R6::R6Class("DebugReporter", inherit = Reporter, public = list( add_result = function(context, test, result) { if (!expectation_success(result) && !is.null(result$start_frame)) { if (sink_number() > 0) { sink(self$out) on.exit(sink(), add = TRUE) } recover2( start_frame = result$start_frame, end_frame = result$end_frame ) } } ) ) sink_number <- function() { sink.number(type = "output") } testthat/R/expectation.R0000644000176200001440000001621013564563701014773 0ustar liggesusers#' The building block of all `expect_` functions #' #' Call `expect()` when writing your own expectations. See #' `vignette("custom-expectation")` for details. #' #' @param ok `TRUE` or `FALSE` indicating if the expectation was successful. #' @param failure_message Message to show if the expectation failed. #' @param info Character vector continuing additional information. Included #' for backward compatibility only and new expectations should not use it. #' @param srcref Location of the failure. Should only needed to be explicitly #' supplied when you need to forward a srcref captured elsewhere. #' @param trace An optional backtrace created by [rlang::trace_back()]. #' When supplied, the expectation is displayed with the backtrace. #' @return An expectation object. Signals the expectation condition #' with a `continue_test` restart. #' #' @details #' #' While `expect()` creates and signals an expectation in one go, #' `exp_signal()` separately signals an expectation that you #' have manually created with [new_expectation()]. Expectations are #' signalled with the following protocol: #' #' * If the expectation is a failure or an error, it is signalled with #' [base::stop()]. Otherwise, it is signalled with #' [base::signalCondition()]. #' #' * The `continue_test` restart is registered. When invoked, failing #' expectations are ignored and normal control flow is resumed to #' run the other tests. #' #' @seealso [exp_signal()] #' @export expect <- function(ok, failure_message, info = NULL, srcref = NULL, trace = NULL) { type <- if (ok) "success" else "failure" # Preserve existing API which appear to be used in package test code # Can remove in next major release if (missing(failure_message)) { warn("`failure_message` is missing, with no default.") message <- "unknown failure" } else { # A few packages include code in info that errors on evaluation if (ok) { message <- paste(failure_message, collapse = "\n") } else { message <- paste(c(failure_message, info), collapse = "\n") } } exp <- expectation(type, message, srcref = srcref, trace = trace) exp_signal(exp) } #' Construct an expectation object #' #' For advanced use only. If you are creating your own expectation, you should #' call [expect()] instead. See `vignette("custom-expectation")` for more #' details. #' #' Create an expectation with `expectation()` or `new_expectation()` #' and signal it with `exp_signal()`. #' #' @param type Expectation type. Must be one of "success", "failure", "error", #' "skip", "warning". #' @param message Message describing test failure #' @param srcref Optional `srcref` giving location of test. #' @inheritParams expect #' @keywords internal #' @export expectation <- function(type, message, srcref = NULL, trace = NULL) { new_expectation(type, message, srcref = srcref, trace = trace) } #' @rdname expectation #' @param ... Additional attributes for the expectation object. #' @param .subclass An optional subclass for the expectation object. #' @export new_expectation <- function(type, message, ..., srcref = NULL, trace = NULL, .subclass = NULL) { type <- match.arg(type, c("success", "failure", "error", "skip", "warning")) structure( list( message = message, srcref = srcref, trace = trace ), class = c( .subclass, paste0("expectation_", type), "expectation", # Make broken expectations catchable by try() if (type %in% c("failure", "error")) "error", "condition" ), ... ) } #' @rdname expectation #' @param exp An expectation object, as created by #' [new_expectation()]. #' @export exp_signal <- function(exp) { withRestarts( if (expectation_broken(exp)) { stop(exp) } else { signalCondition(exp) }, continue_test = function(e) NULL ) invisible(exp) } #' @export #' @rdname expectation #' @param x object to test for class membership is.expectation <- function(x) inherits(x, "expectation") #' @export print.expectation <- function(x, ...) { cat(format(x), "\n") } #' @export format.expectation_success <- function(x, ...) { "As expected" } # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields #' @export format.expectation <- function(x, ...) { if (is.null(x[["trace"]]) || trace_length(x[["trace"]]) == 0L) { x$message } else { format_with_trace(x) } } format_with_trace <- function(exp) { trace_lines <- format( exp$trace, simplify = "branch", max_frames = 20, dir = Sys.getenv("TESTTHAT_DIR") %||% getwd() ) paste_line( exp$message, crayon::bold("Backtrace:"), !!!trace_lines ) } # as.expectation ---------------------------------------------------------- as.expectation <- function(x, ...) UseMethod("as.expectation", x) #' @export as.expectation.default <- function(x, ..., srcref = NULL) { stop( "Don't know how to convert '", paste(class(x), collapse = "', '"), "' to expectation.", call. = FALSE ) } #' @export as.expectation.expectation <- function(x, ..., srcref = NULL) { x$srcref <- x$srcref %||% srcref x } #' @export as.expectation.error <- function(x, ..., srcref = NULL) { error <- cnd_message(x) msg <- gsub("Error.*?: ", "", as.character(error)) # Remove trailing newline to be consistent with other conditons msg <- gsub("\n$", "", msg) expectation("error", msg, srcref, trace = x[["trace"]]) } #' @export as.expectation.warning <- function(x, ..., srcref = NULL) { expectation("warning", cnd_message(x), srcref) } #' @export as.expectation.skip <- function(x, ..., srcref = NULL) { expectation("skip", cnd_message(x), srcref) } # expectation_type -------------------------------------------------------- expectation_type <- function(exp) { stopifnot(is.expectation(exp)) gsub("^expectation_", "", class(exp)[[1]]) } expectation_success <- function(exp) expectation_type(exp) == "success" expectation_failure <- function(exp) expectation_type(exp) == "failure" expectation_error <- function(exp) expectation_type(exp) == "error" expectation_skip <- function(exp) expectation_type(exp) == "skip" expectation_warning <- function(exp) expectation_type(exp) == "warning" expectation_broken <- function(exp) expectation_failure(exp) || expectation_error(exp) expectation_ok <- function(exp) expectation_type(exp) %in% c("success", "warning") single_letter_summary <- function(x) { switch(expectation_type(x), skip = colourise("S", "skip"), success = colourise(".", "success"), error = colourise("E", "error"), failure = colourise("F", "failure"), warning = colourise("W", "warning"), "?" ) } exp_location <- function(exp) { srcref <- exp$srcref if (is.null(srcref)) { return("") } filename <- attr(srcref, "srcfile")$filename # There is no filename when evaluating `test_that()` blocks # interactively. The line number is not significant in that case so # we return a blank. if (!nzchar(filename)) { return("") } paste0(basename(filename), ":", srcref[1], ": ") } testthat/R/reporter.R0000644000176200001440000000642013564563701014314 0ustar liggesusers#' Manage test reporting #' #' The job of a reporter is to aggregate the results from files, tests, and #' expectations and display them in an informative way. Every testtthat function #' that runs multiple tests provides a `reporter` argument which you can #' use to override the default (which is selected by [default_reporter()]). #' #' You only need to use this `Reporter` object directly if you are creating #' a new reporter. Currently, creating new Reporters is undocumented, #' so if you want to create your own, you'll need to make sure that you're #' familiar with [R6](https://adv-r.hadley.nz/R6.html) and then need read the #' source code for a few. #' #' @keywords internal #' @export #' @export Reporter #' @aliases Reporter #' @importFrom R6 R6Class #' @family reporters #' @examples #' path <- testthat_example("success") #' #' # The default reporter - doesn't display well in examples because #' # it's designed to work in an interactive console. #' test_file(path) #' #' # Override the default by supplying the name of a reporter #' test_file(path, reporter = "minimal") Reporter <- R6::R6Class("Reporter", public = list( start_reporter = function() {}, start_context = function(context) {}, start_test = function(context, test) {}, start_file = function(filename) {}, add_result = function(context, test, result) {}, end_test = function(context, test) {}, end_context = function(context) {}, end_reporter = function() {}, end_file = function() {}, is_full = function() FALSE, out = NULL, initialize = function(file = getOption("testthat.output_file", stdout())) { self$out <- file if (is.character(self$out) && file.exists(self$out)) { # If writing to a file, overwrite it if it exists file.remove(self$out) } }, cat_tight = function(...) { cat(..., sep = "", file = self$out, append = TRUE) }, cat_line = function(...) { cli::cat_line(..., file = self$out) }, rule = function(...) { cli::cat_rule(..., file = self$out) }, # The hierarchy of contexts are implied - a context starts with a # call to context(), and ends either with the end of the file, or # with the next call to context() in the same file. These private # methods paper over the details so that context appear to work # in the same way as tests and expectations. .context = NULL, .start_context = function(context) { if (!is.null(self$.context)) { self$end_context(self$.context) } self$.context <- context self$start_context(context) invisible() }, .end_context = function(context) { if (!is.null(self$.context)) { self$end_context(self$.context) self$.context <- NULL } invisible() } ) ) #' Retrieve the default reporter #' #' The defaults are: #' * [ProgressReporter] for interactive; override with `testthat.default_reporter` #' * [CheckReporter] for R CMD check; override with `testthat.default_check_reporter` #' #' @export #' @keywords internal default_reporter <- function() { getOption("testthat.default_reporter", "progress") } #' @export #' @rdname default_reporter check_reporter <- function() { getOption("testthat.default_check_reporter", "check") } testthat/R/expect-known.R0000644000176200001440000001157413564563701015102 0ustar liggesusers#' Expectations: is the output or the value equal to a known good value? #' #' For complex printed output and objects, it is often challenging to describe #' exactly what you expect to see. `expect_known_value()` and #' `expect_known_output()` provide a slightly weaker guarantee, simply #' asserting that the values have not changed since the last time that you ran #' them. #' #' These expectations should be used in conjunction with git, as otherwise #' there is no way to revert to previous values. Git is particularly useful #' in conjunction with `expect_known_output()` as the diffs will show you #' exactly what has changed. #' #' Note that known values updates will only be updated when running tests #' interactively. `R CMD check` clones the package source so any changes to #' the reference files will occur in a temporary directory, and will not be #' synchronised back to the source package. #' #' @export #' @param file File path where known value/output will be stored. #' @param update Should the file be updated? Defaults to `TRUE`, with #' the expectation that you'll notice changes because of the first failure, #' and then see the modified files in git. #' @param version The serialization format version to use. The default, 2, was #' the default format from R 1.4.0 to 3.5.3. Version 3 became the default from #' R 3.6.0 and can only be read by R versions 3.5.0 and higher. #' @keywords internal #' @inheritParams expect_equal #' @inheritParams capture_output_lines #' @examples #' tmp <- tempfile() #' #' # The first run always succeeds #' expect_known_output(mtcars[1:10, ], tmp, print = TRUE) #' #' # Subsequent runs will succeed only if the file is unchanged #' # This will succeed: #' expect_known_output(mtcars[1:10, ], tmp, print = TRUE) #' #' \dontrun{ #' # This will fail #' expect_known_output(mtcars[1:9, ], tmp, print = TRUE) #' } expect_known_output <- function(object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80) { act <- list() act$quo <- enquo(object) act$lab <- label %||% quo_label(act$quo) act <- append(act, eval_with_output(object, print = print, width = width)) compare_file(file, act$out, update = update, info = info, ...) invisible(act$val) } compare_file <- function(path, lines, ..., update = TRUE, info = NULL) { if (!file.exists(path)) { warning("Creating reference output", call. = FALSE) write_lines(lines, path) succeed() return() } old_lines <- read_lines(path) if (update) { write_lines(lines, path) if (!all_utf8(lines)) { warning("New reference output is not UTF-8 encoded", call. = FALSE) } } if (!all_utf8(old_lines)) { warning("Reference output is not UTF-8 encoded", call. = FALSE) } comp <- compare(lines, enc2native(old_lines), ...) expect( comp$equal, sprintf( "Results have changed from known value recorded in %s.\n%s", encodeString(path, quote = "'"), comp$message ), info = info ) } #' @export #' @rdname expect_known_output #' @usage NULL expect_output_file <- expect_known_output #' @export #' @rdname expect_known_output expect_known_value <- function(object, file, update = TRUE, ..., info = NULL, label = NULL, version = 2) { act <- quasi_label(enquo(object), label, arg = "object") if (!file.exists(file)) { warning("Creating reference value", call. = FALSE) saveRDS(object, file, version = version) succeed() } else { ref_val <- readRDS(file) comp <- compare(act$val, ref_val, ...) if (update && !comp$equal) { saveRDS(act$val, file, version = version) } expect( comp$equal, sprintf( "%s has changed from known value recorded in %s.\n%s", act$lab, encodeString(file, quote = "'"), comp$message ), info = info ) } invisible(act$value) } #' @export #' @rdname expect_known_output #' @usage NULL expect_equal_to_reference <- function(..., update = FALSE) { expect_known_value(..., update = update) } #' @export #' @rdname expect_known_output #' @param hash Known hash value. Leave empty and you'll be informed what #' to use in the test output. expect_known_hash <- function(object, hash = NULL) { act <- quasi_label(enquo(object), arg = "object") act_hash <- digest::digest(act$val) if (!is.null(hash)) { act_hash <- substr(act_hash, 1, nchar(hash)) } if (is.null(hash)) { warning(paste0("No recorded hash: use ", substr(act_hash, 1, 10))) succeed() } else { expect( hash == act_hash, sprintf("Value hashes to %s, not %s", act_hash, hash) ) } invisible(act$value) } testthat/R/expect-inheritance.R0000644000176200001440000000733113521025554016223 0ustar liggesusers#' Expectation: does the object inherit from a S3 or S4 class, or is it a base type? #' #' @description #' See for an overview of R's OO systems, and #' the vocabulary used here. #' #' * `expect_type(x, type)` checks that `typeof(x)` is `type`. #' * `expect_s3_class(x, class)` checks that `x` is an S3 object that #' [inherits()] from `class` #' * `expect_s4_class(x, class)` checks that `x` is an S4 object that #' [is()] `class`. #' #' @param type String giving base type (as returned by [typeof()]). #' @param class character vector of class names #' @inheritParams expect_that #' @family expectations #' @examples #' x <- data.frame(x = 1:10, y = "x") #' # A data frame is an S3 object with class data.frame #' expect_s3_class(x, "data.frame") #' show_failure(expect_s4_class(x, "data.frame")) #' # A data frame is built from a list: #' expect_type(x, "list") #' #' # An integer vector is an atomic vector of type "integer" #' expect_type(x$x, "integer") #' # It is not an S3 object #' show_failure(expect_s3_class(x$x, "integer")) #' #' # By default data.frame() converts characters to factors: #' show_failure(expect_type(x$y, "character")) #' expect_s3_class(x$y, "factor") #' expect_type(x$y, "integer") #' @name inheritance-expectations NULL #' @export #' @rdname inheritance-expectations expect_type <- function(object, type) { stopifnot(is.character(type), length(type) == 1) act <- quasi_label(enquo(object), arg = "object") act_type <- typeof(act$val) expect( identical(act_type, type), sprintf("%s has type `%s`, not `%s`.", act$lab, act_type, type) ) invisible(act$val) } #' @export #' @rdname inheritance-expectations #' @param exact If `FALSE`, the default, checks that `object` inherits #' from `class`. If `TRUE`, checks that object has a class that's identical #' to `class`. expect_s3_class <- function(object, class, exact = FALSE) { stopifnot(is.character(class)) act <- quasi_label(enquo(object), arg = "object") act$class <- klass(act$val) exp_lab <- paste(class, collapse = "/") if (!isS3(act$val)) { fail(sprintf("%s is not an S3 object", act$lab)) } if (exact) { expect( identical(class(act$val), class), sprintf("%s has class `%s`, not `%s`.", act$lab, act$class, exp_lab) ) } else { expect( inherits(act$val, class), sprintf("%s inherits from `%s` not `%s`.", act$lab, act$class, exp_lab) ) } invisible(act$val) } #' @export #' @rdname inheritance-expectations expect_s4_class <- function(object, class) { stopifnot(is.character(class)) act <- quasi_label(enquo(object), arg = "object") act_val_lab <- paste(methods::is(object), collapse = "/") exp_lab <- paste(class, collapse = "/") if (!isS4(act$val)) { fail(sprintf("%s is not an S4 object", act$lab)) } expect( methods::is(act$val, class), sprintf("%s inherits from `%s` not `%s`.", act$lab, act_val_lab, exp_lab) ) invisible(act$val) } isS3 <- function(x) is.object(x) && !isS4(x) #' Expectation: does the object inherit from a given class? #' #' `expect_is()` is an older form that uses [inherits()] without checking #' whether `x` is S3, S4, or neither. Intead, I'd recommend using #' [expect_type()], [expect_s3_class()] or [expect_s4_class()] to more clearly #' convey your intent. #' #' @keywords internal #' @inheritParams expect_type #' @export expect_is <- function(object, class, info = NULL, label = NULL) { stopifnot(is.character(class)) act <- quasi_label(enquo(object), label, arg = "object") act$class <- klass(act$val) exp_lab <- paste(class, collapse = "/") expect( inherits(act$val, class), sprintf("%s inherits from `%s` not `%s`.", act$lab, act$class, exp_lab), info = info ) invisible(act$val) } testthat/R/utils.R0000644000176200001440000000557313564563701013622 0ustar liggesusers#' @importFrom magrittr %>% #' @export magrittr::`%>%` `%||%` <- function(a, b) if (is.null(a)) b else a starts_with <- function(string, prefix) { substr(string, 1, nchar(prefix)) == prefix } is_directory <- function(x) file.info(x)$isdir is_readable <- function(x) file.access(x, 4) == 0 null <- function(...) invisible() klass <- function(x) paste(class(x), collapse = "/") first_last <- function(x, max = 10, filler = "...") { if (length(x) <= 2 * max + 1) { x } else { c( x[seq_len(max)], filler, x[seq.int(to = length(x), length.out = max)] ) } } # Tools for finding srcrefs ----------------------------------------------- show_stack <- function(star = integer(), n = sys.nframe() - 1L) { pos <- seq_len(n) fun <- vapply(sys.calls()[pos], f_name, character(1)) has_src <- vapply(sys.calls()[pos], function(x) !is.null(attr(x, "srcref")), logical(1)) env <- vapply(lapply(sys.frames()[pos], parent.env), env_name, character(1)) parent <- sys.parents()[pos] data.frame( `*` = ifelse(pos %in% star, "*", ""), fun = fun, src = ifelse(has_src, "x", ""), env = env, par = parent, stringsAsFactors = FALSE, check.names = FALSE ) } env_name <- function(x) { str <- capture_output(x, print = TRUE) gsub("", "", str) } find_first_srcref <- function(start) { calls <- sys.calls() calls <- calls[seq2(start, length(calls))] for (call in calls) { srcref <- attr(call, "srcref") if (!is.null(srcref)) { return(srcref) } } NULL } f_name <- function(x) { if (is.call(x)) { f_name(x[[1]]) } else if (is.name(x)) { as.character(x) } else { "" } } escape_regex <- function(x) { chars <- c("*", ".", "?", "^", "+", "$", "|", "(", ")", "[", "]", "{", "}", "\\") gsub(paste0("([\\", paste0(collapse = "\\", chars), "])"), "\\\\\\1", x, perl = TRUE) } # For R 3.1 dir.exists <- function(paths) { file.exists(paths) & file.info(paths)$isdir } # Simplify a test filename to a context name context_name <- function(filename) { # Remove test- prefix filename <- gsub("test-", "", filename) # Remove extension filename <- gsub("[.][Rr]", "", filename) filename } paste_line <- function(...) { paste(chr(...), collapse = "\n") } maybe_root_dir <- function(path) { tryCatch(pkgload::pkg_path(path), error = function(...) path) } maybe_restart <- function(restart) { if (!is.null(findRestart(restart))) { invokeRestart(restart) } } # Backport for R 3.2 strrep <- function(x, times) { x = as.character(x) if (length(x) == 0L) return(x) unlist(.mapply(function(x, times) { if (is.na(x) || is.na(times)) return(NA_character_) if (times <= 0L) return("") paste0(replicate(times, x), collapse = "") }, list(x = x, times = times), MoreArgs = list()), use.names = FALSE) } can_entrace <- function(cnd) { !inherits(cnd, "Throwable") } testthat/R/test-compiled-code.R0000644000176200001440000002146613564563701016142 0ustar liggesusers#' Expectation: do C++ tests past? #' #' Test compiled code in the package `package`. A call to this function will #' automatically be generated for you in `tests/testthat/test-cpp.R` after #' calling [use_catch()]; you should not need to manually call this expectation #' yourself. #' #' @param package The name of the package to test. #' @keywords internal #' @export expect_cpp_tests_pass <- function(package) { run_testthat_tests <- get_routine(package, "run_testthat_tests") output <- "" tests_passed <- TRUE tryCatch( output <- capture_output_lines(tests_passed <- .Call(run_testthat_tests)), error = function(e) { warning(sprintf("failed to call test entrypoint '%s'", run_testthat_tests)) } ) # Drop first line of output (it's jut a '####' delimiter) info <- paste(output[-1], collapse = "\n") expect(tests_passed, paste("C++ unit tests:", info, sep = "\n")) } #' Use Catch for C++ Unit Testing #' #' Add the necessary infrastructure to enable C++ unit testing #' in \R packages with #' \href{https://github.com/philsquared/Catch}{Catch} and `testthat`. #' #' Calling `use_catch()` will: #' #' 1. Create a file `src/test-runner.cpp`, which ensures that the #' `testthat` package will understand how to run your package's #' unit tests, #' #' 2. Create an example test file `src/test-example.cpp`, which #' showcases how you might use Catch to write a unit test, #' #' 3. Add a test file `tests/testthat/test-cpp.R`, which ensures that #' `testthat` will run your compiled tests during invocations of #' `devtools::test()` or `R CMD check`, and #' #' 4. Create a file `R/catch-routine-registration.R`, which ensures that #' \R will automatically register this routine when #' `tools::package_native_routine_registration_skeleton()` is invoked. #' #' C++ unit tests can be added to C++ source files within the #' `src` directory of your package, with a format similar #' to \R code tested with `testthat`. Here's a simple example #' of a unit test written with `testthat` + Catch: #' #' \preformatted{ #' context("C++ Unit Test") { #' test_that("two plus two is four") { #' int result = 2 + 2; #' expect_true(result == 4); #' } #' } #' } #' #' When your package is compiled, unit tests alongside a harness #' for running these tests will be compiled into your \R package, #' with the C entry point `run_testthat_tests()`. `testthat` #' will use that entry point to run your unit tests when detected. #' #' @section Functions: #' #' All of the functions provided by Catch are #' available with the `CATCH_` prefix -- see #' \href{https://github.com/philsquared/Catch/blob/master/docs/assertions.md}{here} #' for a full list. `testthat` provides the #' following wrappers, to conform with `testthat`'s #' \R interface: #' #' \tabular{lll}{ #' \strong{Function} \tab \strong{Catch} \tab \strong{Description} \cr #' `context` \tab `CATCH_TEST_CASE` \tab The context of a set of tests. \cr #' `test_that` \tab `CATCH_SECTION` \tab A test section. \cr #' `expect_true` \tab `CATCH_CHECK` \tab Test that an expression evaluates to `true`. \cr #' `expect_false` \tab `CATCH_CHECK_FALSE` \tab Test that an expression evalutes to `false`. \cr #' `expect_error` \tab `CATCH_CHECK_THROWS` \tab Test that evaluation of an expression throws an exception. \cr #' `expect_error_as` \tab `CATCH_CHECK_THROWS_AS` \tab Test that evaluation of an expression throws an exception of a specific class. \cr #' } #' #' In general, you should prefer using the `testthat` #' wrappers, as `testthat` also does some work to #' ensure that any unit tests within will not be compiled or #' run when using the Solaris Studio compilers (as these are #' currently unsupported by Catch). This should make it #' easier to submit packages to CRAN that use Catch. #' #' @section Symbol Registration: #' #' If you've opted to disable dynamic symbol lookup in your #' package, then you'll need to explicitly export a symbol #' in your package that `testthat` can use to run your unit #' tests. `testthat` will look for a routine with one of the names: #' #' \preformatted{ #' C_run_testthat_tests #' c_run_testthat_tests #' run_testthat_tests #' } #' #' See [Controlling Visibility](https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Controlling-visibility) #' and [Registering Symbols](https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-symbols) #' in the **Writing R Extensions** manual for more information. #' #' @section Advanced Usage: #' #' If you'd like to write your own Catch test runner, you can #' instead use the `testthat::catchSession()` object in a file #' with the form: #' #' \preformatted{ #' #define TESTTHAT_TEST_RUNNER #' #include #' #' void run() #' { #' Catch::Session& session = testthat::catchSession(); #' // interact with the session object as desired #' } #' } #' #' This can be useful if you'd like to run your unit tests #' with custom arguments passed to the Catch session. #' #' @param dir The directory containing an \R package. #' #' @section Standalone Usage: #' #' If you'd like to use the C++ unit testing facilities provided #' by Catch, but would prefer not to use the regular `testthat` #' \R testing infrastructure, you can manually run the unit tests #' by inserting a call to: #' #' \preformatted{ #' .Call("run_testthat_tests", PACKAGE = ) #' } #' #' as necessary within your unit test suite. #' #' @export #' @seealso \href{https://github.com/philsquared/Catch}{Catch}, the #' library used to enable C++ unit testing. use_catch <- function(dir = getwd()) { desc_path <- file.path(dir, "DESCRIPTION") if (!file.exists(desc_path)) { stop("no DESCRIPTION file at path '", desc_path, "'", call. = FALSE) } desc <- read.dcf(desc_path, all = TRUE) pkg <- desc$Package if (!nzchar(pkg)) { stop("no 'Package' field in DESCRIPTION file '", desc_path, "'", call. = FALSE) } src_dir <- file.path(dir, "src") if (!file.exists(src_dir) && !dir.create(src_dir)) { stop("failed to create 'src/' directory '", src_dir, "'", call. = FALSE) } test_runner_path <- file.path(src_dir, "test-runner.cpp") # Copy the test runner. success <- file.copy( system.file(package = "testthat", "resources", "test-runner.cpp"), test_runner_path, overwrite = TRUE ) if (!success) { stop("failed to copy 'test-runner.cpp' to '", src_dir, "'", call. = FALSE) } # Copy the test example. success <- file.copy( system.file(package = "testthat", "resources", "test-example.cpp"), file.path(src_dir, "test-example.cpp"), overwrite = TRUE ) if (!success) { stop("failed to copy 'test-example.cpp' to '", src_dir, "'", call. = FALSE) } # Copy the 'test-cpp.R' file. test_dir <- file.path(dir, "tests", "testthat") if (!file.exists(test_dir) && !dir.create(test_dir, recursive = TRUE)) { stop("failed to create 'tests/testthat/' directory '", test_dir, "'", call. = FALSE) } template_file <- system.file(package = "testthat", "resources", "test-cpp.R") contents <- readChar(template_file, file.info(template_file)$size, TRUE) transformed <- sprintf(contents, pkg) output_path <- file.path(test_dir, "test-cpp.R") cat(transformed, file = output_path) # Copy the 'test-runner.R file. template_file <- system.file(package = "testthat", "resources", "catch-routine-registration.R") contents <- readChar(template_file, file.info(template_file)$size, TRUE) transformed <- sprintf(contents, pkg) output_path <- file.path(dir, "R", "catch-routine-registration.R") cat(transformed, file = output_path) message("> Added C++ unit testing infrastructure.") message("> Please ensure you have 'LinkingTo: testthat' in your DESCRIPTION.") message("> Please ensure you have 'useDynLib(", pkg, ", .registration = TRUE)' in your NAMESPACE.") } get_routine <- function(package, routine) { # check to see if the package has explicitly exported # the associated routine (check common prefixes as we # don't necessarily have access to the NAMESPACE and # know what the prefix is) namespace <- asNamespace(package) prefixes <- c("C_", "c_", "C", "c", "_", "") for (prefix in prefixes) { name <- paste(prefix, routine, sep = "") if (exists(name, envir = namespace)) { symbol <- get(name, envir = namespace) if (inherits(symbol, "NativeSymbolInfo")) { return(symbol) } } } # otherwise, try to resolve the symbol dynamically for (prefix in prefixes) { name <- paste(prefix, routine, sep = "") resolved <- tryCatch( getNativeSymbolInfo(routine, PACKAGE = package), error = function(e) NULL ) if (inherits(resolved, "NativeSymbolInfo")) { return(resolved) } } # if we got here, we failed to find the symbol -- throw an error fmt <- "failed to locate routine '%s' in package '%s'" stop(sprintf(fmt, routine, package), call. = FALSE) } (function() { .Call(run_testthat_tests) }) testthat/R/quasi-label.R0000644000176200001440000000507613564563701014657 0ustar liggesusers#' Quasi-labelling #' #' The first argument to every `expect_` function can use unquoting to #' construct better labels. This makes it easy to create informative labels when #' expectations are used inside a function or a for loop. `quasi_label()` wraps #' up the details, returning the expression and label. #' #' @section Limitations: #' Because all `expect_` function use unquoting to generate more informative #' labels, you can not use unquoting for other purposes. Instead, you'll need #' to perform all other unquoting outside of the expectation and only test #' the results. #' #' @param quo A quosure created by `rlang::enquo()`. #' @param label An optional label to override the default. This is #' only provided for internal usage. Modern expectations should not #' include a `label` parameter. #' @param arg Argument name shown in error message if `quo` is missing. #' @keywords internal #' @return A list containing two elements: #' \item{val}{The evaluate value of `quo`} #' \item{lab}{The quasiquoted label generated from `quo`} #' @export #' @examples #' f <- function(i) if (i > 3) i * 9 else i * 10 #' i <- 10 #' #' # This sort of expression commonly occurs inside a for loop or function #' # And the failure isn't helpful because you can't see the value of i #' # that caused the problem: #' show_failure(expect_equal(f(i), i * 10)) #' #' # To overcome this issue, testthat allows you to unquote expressions using #' # !!. This causes the failure message to show the value rather than the #' # variable name #' show_failure(expect_equal(f(!!i), !!(i * 10))) quasi_label <- function(quo, label = NULL, arg = "quo") { force(quo) if (quo_is_missing(quo)) { stop("argument `", arg, "` is missing, with no default.", call. = FALSE) } expr <- quo_get_expr(quo) list( val = eval_bare(expr, quo_get_env(quo)), lab = label %||% expr_label(expr) ) } quasi_capture <- function(.quo, .label, .capture, ...) { act <- list() act$lab <- .label %||% quo_label(.quo) act$cap <- .capture(act$val <- eval_bare(quo_get_expr(.quo), quo_get_env(.quo)), ...) act } expr_label <- function(x) { if (is.atomic(x)) { x <- deparse(x) if (length(x) > 1) { x <- paste0(x[[1]], "...)") } x } else if (is.name(x)) { paste0("`", as.character(x), "`") } else { chr <- deparse(x) if (length(chr) > 1) { if (identical(x[[1]], quote(`function`))) { x[[3]] <- quote(...) chr <- paste(deparse(x), collapse = "\n") } else { chr <- paste(deparse(as.call(list(x[[1]], quote(...)))), collapse = "\n") } } chr } } testthat/R/reporter-zzz.R0000644000176200001440000000555713564563701015161 0ustar liggesusers#' @include reporter-stop.R NULL #' Get and set active reporter. #' #' `get_reporter()` and `set_reporter()` access and modify the current "active" #' reporter. Generally, these functions should not be called directly; instead #' use `with_reporter()` to temporarily change, then reset, the active reporter. #' #' #' @inheritParams test_file #' @param reporter Reporter to use to summarise output. Can be supplied #' as a string (e.g. "summary") or as an R6 object #' (e.g. `SummaryReporter$new()`). #' #' See [Reporter] for more details and a list of built-in reporters. #' @param code Code to execute. #' @return `with_reporter()` invisible returns the reporter active when `code` #' was evaluated. #' @param start_end_reporter Should the reporters `start_reporter()` and #' `end_reporter()` methods be called? For expert use only. #' @keywords internal #' @name reporter-accessors NULL testthat_env <- new.env(parent = emptyenv()) # Default has to be the stop reporter, since it is this that will be run by # default from the command line and in R CMD test. testthat_env$reporter <- StopReporter$new() #' @rdname reporter-accessors #' @export set_reporter <- function(reporter) { old <- testthat_env$reporter testthat_env$reporter <- reporter invisible(old) } #' @rdname reporter-accessors #' @export get_reporter <- function() { testthat_env$reporter } #' @rdname reporter-accessors #' @export with_reporter <- function(reporter, code, start_end_reporter = TRUE) { reporter <- find_reporter(reporter) old <- set_reporter(reporter) on.exit(set_reporter(old), add = TRUE) if (start_end_reporter) { reporter$start_reporter() } withRestarts( testthat_abort_reporter = function() NULL, force(code) ) if (start_end_reporter) { reporter$end_reporter() } invisible(reporter) } #' Find reporter object given name or object. #' #' If not found, will return informative error message. #' Pass a character vector to create a [MultiReporter] composed #' of individual reporters. #' Will return null if given NULL. #' #' @param reporter name of reporter(s), or reporter object(s) #' @keywords internal find_reporter <- function(reporter) { if (is.null(reporter)) return(NULL) if (inherits(reporter, "R6ClassGenerator")) { reporter$new() } else if (inherits(reporter, "Reporter")) { reporter } else if (is.character(reporter)) { if (length(reporter) <= 1L) { find_reporter_one(reporter) } else { MultiReporter$new(reporters = lapply(reporter, find_reporter_one)) } } else { stop("Invalid input", call. = FALSE) } } find_reporter_one <- function(reporter, ...) { stopifnot(is.character(reporter)) name <- reporter substr(name, 1, 1) <- toupper(substr(name, 1, 1)) name <- paste0(name, "Reporter") if (!exists(name)) { stop("Can not find test reporter ", reporter, call. = FALSE) } get(name)$new(...) } testthat/R/expect-self-test.R0000644000176200001440000000340313456034771015644 0ustar liggesusers#' @include reporter.R NULL #' Tools for testing expectations #' #' Use these expectations to test other expectations. #' Use `show_failure()` in examples to print the failure message without #' throwing an error. #' #' @param expr Expression that evaluates a single expectation. #' @param message Check that the failure message matches this regexp. #' @param ... Other arguments passed on to [expect_match()]. #' @keywords internal #' @export expect_success <- function(expr) { exp <- capture_expectation(expr) if (is.null(exp)) { fail("no expectation used.") } else if (!expectation_success(exp)) { fail(paste0( "Expectation did not succeed:\n", exp$message )) } else { succeed() } invisible(NULL) } #' @export #' @rdname expect_success expect_failure <- function(expr, message = NULL, ...) { exp <- capture_expectation(expr) if (is.null(exp)) { fail("No expectation used") return() } if (!expectation_failure(exp)) { fail("Expectation did not fail") return() } if (!is.null(message)) { expect_match(exp$message, message, ...) } else { succeed() } invisible(NULL) } #' @export #' @rdname expect_success show_failure <- function(expr) { exp <- capture_expectation(expr) if (!is.null(exp) && expectation_failure(exp)) { cat(crayon::bold("Failed expectation:\n")) cat(exp$message, "\n", sep = "") } invisible() } #' @export #' @rdname expect_success #' @param path Path to save failure output expect_known_failure <- function(path, expr) { FailureReporter <- R6::R6Class("FailureReporter", inherit = CheckReporter, public = list(end_reporter = function(...) {}) ) expect_known_output( with_reporter(test_that("", expr), reporter = FailureReporter$new()), path ) } testthat/R/evaluate-promise.R0000644000176200001440000000253313564563701015735 0ustar liggesusers#' Evaluate a promise, capturing all types of output. #' #' @param code Code to evaluate. #' @keywords internal #' @export #' @return A list containing #' \item{result}{The result of the function} #' \item{output}{A string containing all the output from the function} #' \item{warnings}{A character vector containing the text from each warning} #' \item{messages}{A character vector containing the text from each message} #' @examples #' evaluate_promise({ #' print("1") #' message("2") #' warning("3") #' 4 #' }) evaluate_promise <- function(code, print = FALSE) { warnings <- Stack$new() handle_warning <- function(condition) { warnings$push(condition) maybe_restart("muffleWarning") } messages <- Stack$new() handle_message <- function(condition) { messages$push(condition) maybe_restart("muffleMessage") } temp <- file() on.exit(close(temp)) result <- withr::with_output_sink( temp, withCallingHandlers( withVisible(code), warning = handle_warning, message = handle_message ) ) if (result$visible && print) { withr::with_output_sink(temp, print(result$value)) } output <- paste0(read_lines(temp), collapse = "\n") list( result = result$value, output = output, warnings = get_messages(warnings$as_list()), messages = get_messages(messages$as_list()) ) } testthat/R/recover.R0000644000176200001440000000322513564563701014117 0ustar liggesusers# Modeled after utils::recover(), which is # part of the R package, https://www.R-project.org # # Copyright (C) 1995-2016 The R Core Team # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # https://www.R-project.org/Licenses/ recover2 <- function(start_frame = 1L, end_frame = sys.nframe()) { calls <- sys.calls() if (.isMethodsDispatchOn()) { tState <- tracingState(FALSE) on.exit(tracingState(tState)) } from <- min(end_frame, length(calls)) calls <- calls[start_frame:from] if (rlang::is_false(peek_option("testthat_format_srcrefs"))) { calls <- lapply(calls, zap_srcref) } calls <- utils::limitedLabels(calls) repeat { which <- show_menu(calls, "\nEnter a frame number, or 0 to exit ") if (which) { frame <- sys.frame(start_frame - 2 + which) browse_frame(frame, skip = 7 - which) } else { break } } } zap_srcref <- function(x) { attr(x, "srcref") <- NULL x } show_menu <- function(choices, title = NULL) { utils::menu(choices, title = title) } browse_frame <- function(frame, skip) { eval( substitute(browser(skipCalls = skip), list(skip = skip)), envir = frame ) } testthat/R/expect-vector.R0000644000176200001440000000217313456034771015243 0ustar liggesusers#' Expectation: does the object have vctr properties? #' #' `expect_vector()` is a thin wrapper around [vctrs::vec_assert()], converting #' the results of that function in to the expectations used by testthat. This #' means that it used the vctrs of `ptype` (prototype) and `size`. See #' details in #' #' @inheritParams expect_that #' @param ptype (Optional) Vector prototype to test against. Should be a #' size-0 (empty) generalised vector. #' @param size (Optional) Size to check for. #' @export #' @examples #' if (requireNamespace("vctrs") && packageVersion("vctrs") > "0.1.0.9002") { #' expect_vector(1:10, ptype = integer(), size = 10) #' show_failure(expect_vector(1:10, ptype = integer(), size = 5)) #' show_failure(expect_vector(1:10, ptype = character(), size = 5)) #' } expect_vector <- function(object, ptype = NULL, size = NULL) { act <- quasi_label(enquo(object), arg = "object") tryCatch( vctrs::vec_assert(act$val, ptype = ptype, size = size, arg = act$lab), vctrs_error_assert = function(e) { expect(FALSE, e$message) } ) expect(TRUE, "success") } testthat/R/compare-numeric.R0000644000176200001440000000540613564563701015543 0ustar liggesusers#' @include compare.R #' @export #' @rdname compare #' @param tolerance Numerical tolerance: any differences smaller than this #' value will be ignored. #' #' The default tolerance is `sqrt(.Machine$double.eps)`, unless long doubles #' are not available, in which case the test is skipped. #' @examples #' # Numeric ------------------------------------------------------------------- #' #' x <- y <- runif(100) #' y[sample(100, 10)] <- 5 #' compare(x, y) #' #' x <- y <- 1:10 #' x[5] <- NA #' x[6] <- 6.5 #' compare(x, y) #' #' # Compare ignores minor numeric differences in the same way #' # as all.equal. #' compare(x, x + 1e-9) compare.numeric <- function(x, y, tolerance = testthat_tolerance(), check.attributes = TRUE, ..., max_diffs = 9) { all_equal <- all.equal( x, y, tolerance = tolerance, check.attributes = check.attributes, ... ) if (isTRUE(all_equal)) { return(no_difference()) } if (!typeof(y) %in% c("integer", "double")) { return(diff_type(x, y)) } if (!same_class(x, y)) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } if (check.attributes && !same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal_tol(x, y, tolerance = tolerance) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_numeric(x, y, diff) difference(format(mismatches, max_diffs = max_diffs)) } } #' @export #' @rdname compare testthat_tolerance <- function(x) { if (identical(capabilities("long.double"), FALSE)) { skip("Long doubles not available and `tolerance` not supplied") } .Machine$double.eps ^ 0.5 } mismatch_numeric <- function(x, y, diff = !vector_equal(x, y)) { structure( list( i = which(diff), x = x[diff], y = y[diff], n = length(diff), n_diff = sum(diff), mu_diff = mean(abs(x[diff] - y[diff]), na.rm = TRUE) ), class = "mismatch_numeric" ) } #' @export format.mismatch_numeric <- function(x, ..., max_diffs = 9, digits = 3) { summary <- paste0(x$n_diff, "/", x$n, " mismatches") if (x$n_diff > 1) { mu <- format(x$mu_diff, digits = digits, trim = TRUE) summary <- paste0(summary, " (average diff: ", mu, ")") } n_show <- seq_len(min(x$n_diff, max_diffs)) diffs <- paste0( format(paste0("[", x$i[n_show], "]")), " ", format(x$x[n_show], digits = digits), " - ", format(x$y[n_show], digits = digits), " == ", format(x$x[n_show] - x$y[n_show], digits = digits) ) if (x$n_diff > length(n_show)) { diffs <- c(diffs, "...") } paste0(summary, "\n", paste(diffs, collapse = "\n")) } #' @export print.mismatch_numeric <- function(x, ...) { cat(format(x, ...), "\n", sep = "") } testthat/R/source.R0000644000176200001440000000460713456034771013757 0ustar liggesusers#' Source a file, directory of files, or various important subsets #' #' These are used by [test_dir()] and friends #' #' @inheritSection test_dir Test files #' @param path Path to files. #' @param pattern Regular expression used to filter files. #' @param env Environment in which to evaluate code. #' @param chdir Change working directory to `dirname(path)`? #' @param encoding Deprecated. #' @param wrap Automatically wrap all code within [test_that()]? This ensures #' that all expectations are reported, even if outside a test block. #' @export #' @keywords internal source_file <- function(path, env = test_env(), chdir = TRUE, encoding = "unknown", wrap = TRUE) { stopifnot(file.exists(path)) stopifnot(is.environment(env)) if (!missing(encoding) && !identical(encoding, "UTF-8")) { warning("`encoding` is deprecated; all files now assumed to be UTF-8", call. = FALSE) } lines <- read_lines(path) srcfile <- srcfilecopy(path, lines, file.info(path)[1, "mtime"], isFile = TRUE) ## We need to parse from a connection, because parse() has a bug, ## and converts the input to the native encoding, if the text arg is used con <- textConnection(lines, encoding = "UTF-8") on.exit(try(close(con), silent = TRUE), add = TRUE) exprs <- parse(con, n = -1, srcfile = srcfile, encoding = "UTF-8") n <- length(exprs) if (n == 0L) return(invisible()) if (chdir) { old_dir <- setwd(dirname(path)) on.exit(setwd(old_dir), add = TRUE) } if (wrap) { invisible(test_code(NULL, exprs, env)) } else { invisible(eval(exprs, env)) } } #' @rdname source_file #' @export source_dir <- function(path, pattern = "\\.[rR]$", env = test_env(), chdir = TRUE, wrap = TRUE) { files <- normalizePath(sort(dir(path, pattern, full.names = TRUE))) lapply(files, source_file, env = env, chdir = chdir, wrap = wrap) } #' @rdname source_file #' @export source_test_helpers <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^helper.*\\.[rR]$", env = env, wrap = FALSE) } #' @rdname source_file #' @export source_test_setup <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^setup.*\\.[rR]$", env = env, wrap = FALSE) } #' @rdname source_file #' @export source_test_teardown <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^teardown.*\\.[rR]$", env = env, wrap = FALSE) } testthat/R/describe.R0000644000176200001440000000550013176113117014217 0ustar liggesusers#' describe: a BDD testing language #' #' A simple BDD DSL for writing tests. The language is similiar to RSpec for #' Ruby or Mocha for JavaScript. BDD tests read like sentences and it should #' thus be easier to understand what the specification of a function/component #' is. #' #' Tests using the `describe` syntax not only verify the tested code, but #' also document its intended behaviour. Each `describe` block specifies a #' larger component or function and contains a set of specifications. A #' specification is definied by an `it` block. Each `it` block #' functions as a test and is evaluated in its own environment. You #' can also have nested `describe` blocks. #' #' #' This test syntax helps to test the intented behaviour of your code. For #' example: you want to write a new function for your package. Try to describe #' the specification first using `describe`, before your write any code. #' After that, you start to implement the tests for each specification (i.e. #' the `it` block). #' #' Use `describe` to verify that you implement the right things and use #' [test_that()] to ensure you do the things right. #' #' @param description description of the feature #' @param code test code containing the specs #' @export #' @examples #' describe("matrix()", { #' it("can be multiplied by a scalar", { #' m1 <- matrix(1:4, 2, 2) #' m2 <- m1 * 2 #' expect_equivalent(matrix(1:4 * 2, 2, 2), m2) #' }) #' it("can have not yet tested specs") #' }) #' #' # Nested specs: #' ## code #' addition <- function(a, b) a + b #' division <- function(a, b) a / b #' #' ## specs #' describe("math library", { #' describe("addition()", { #' it("can add two numbers", { #' expect_equivalent(1 + 1, addition(1, 1)) #' }) #' }) #' describe("division()", { #' it("can divide two numbers", { #' expect_equivalent(10 / 2, division(10, 2)) #' }) #' it("can handle division by 0") #not yet implemented #' }) #' }) describe <- function(description, code) { is_invalid_description <- function(description) { !is.character(description) || length(description) != 1 || nchar(description) == 0 } if (is_invalid_description(description)) { stop("description must be a string of at least length 1") } # prepares a new environment for each it-block describe_environment <- new.env(parent = parent.frame()) describe_environment$it <- function(it_description, it_code = NULL) { if (is_invalid_description(it_description)) { stop("it-description must be a string of at least length 1") } if (missing(it_code)) return() test_description <- paste0(description, ": ", it_description) test_code( test_description, substitute(it_code), env = describe_environment, skip_on_empty = FALSE ) } eval(substitute(code), describe_environment) invisible() } testthat/R/compare-time.R0000644000176200001440000000135413137625470015033 0ustar liggesusers#' @rdname compare #' @export compare.POSIXt <- function(x, y, tolerance = 0.001, ..., max_diffs = 9) { if (!inherits(y, "POSIXt")) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } x <- standardise_tzone(as.POSIXct(x)) y <- standardise_tzone(as.POSIXct(y)) if (!same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal_tol(x, y, tolerance = tolerance) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_numeric(x, y, diff) difference(format(mismatches, max_diffs = max_diffs)) } } standardise_tzone <- function(x) { if (is.null(attr(x, "tzone")) || identical(attr(x, "tzone"), Sys.timezone())) { attr(x, "tzone") <- "" } x } testthat/R/expectations-matches.R0000644000176200001440000000330513521025554016571 0ustar liggesusers#' Expectation: does string match a regular expression? #' #' @details #' `expect_match()` is a wrapper around [grepl()]. See its documentation for #' more detail about the individual arguments. #' #' @inheritParams expect_that #' @inheritParams base::grepl #' @param regexp Regular expression to test against. #' @param all Should all elements of actual value match `regexp` (TRUE), #' or does only one need to match (FALSE) #' @inheritDotParams base::grepl -pattern -x -perl -fixed #' @family expectations #' @export #' @examples #' expect_match("Testing is fun", "fun") #' expect_match("Testing is fun", "f.n") #' #' \dontrun{ #' expect_match("Testing is fun", "horrible") #' #' # Zero-length inputs always fail #' expect_match(character(), ".") #' } expect_match <- function(object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") stopifnot(is.character(regexp), length(regexp) == 1) stopifnot(is.character(act$val)) if (length(object) == 0) { fail(sprintf("%s is empty.", act$lab), info = info) } matches <- grepl(regexp, act$val, perl = perl, fixed = fixed, ...) escape <- if (fixed) identity else escape_regex if (length(act$val) == 1) { values <- paste0("Actual value: \"", escape(encodeString(act$val)), "\"") } else { values <- paste0( "Actual values:\n", paste0("* ", escape(encodeString(act$val)), collapse = "\n") ) } expect( if (all) all(matches) else any(matches), sprintf( "%s does not match %s.\n%s", escape(act$lab), encodeString(regexp, quote = '"'), values ), info = info ) invisible(act$val) } testthat/R/compare-character.R0000644000176200001440000000640413216707011016020 0ustar liggesusers#' @include compare.R #' @param max_diffs Maximum number of differences to show #' @param max_lines Maximum number of lines to show from each difference #' @param check.attributes If `TRUE`, also checks values of attributes. #' @param width Width of output device #' @rdname compare #' @export #' @examples #' # Character ----------------------------------------------------------------- #' x <- c("abc", "def", "jih") #' compare(x, x) #' #' y <- paste0(x, "y") #' compare(x, y) #' #' compare(letters, paste0(letters, "-")) #' #' x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' compare(x, y) #' compare(c(x, x), c(y, y)) #' compare.character <- function(x, y, check.attributes = TRUE, ..., max_diffs = 5, max_lines = 5, width = cli::console_width()) { if (identical(x, y)) { return(no_difference()) } if (!same_type(x, y)) { return(diff_type(x, y)) } if (!same_class(x, y)) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } if (check.attributes && !same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal(x, y) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_character(x, y, diff) difference(format( mismatches, max_diffs = max_diffs, max_lines = max_lines, width = width )) } } mismatch_character <- function(x, y, diff = !vector_equal(x, y)) { structure( list( i = which(diff), x = x[diff], y = y[diff], n = length(diff), n_diff = sum(diff) ), class = "mismatch_character" ) } #' @export format.mismatch_character <- function(x, ..., max_diffs = 5, max_lines = 5, width = cli::console_width()) { width <- width - 6 # allocate space for labels n_show <- seq_len(min(x$n_diff, max_diffs)) encode <- function(x) encodeString(x, quote = '"') show_x <- str_trunc(encode(x$x[n_show]), width * max_lines) show_y <- str_trunc(encode(x$y[n_show]), width * max_lines) show_i <- x$i[n_show] sidebyside <- Map(function(x, y, pos) { x <- paste0("x[", pos, "]: ", str_chunk(x, width)) y <- paste0("y[", pos, "]: ", str_chunk(y, width)) paste(c(x, y), collapse = "\n") }, show_x, show_y, show_i) summary <- paste0(x$n_diff, "/", x$n, " mismatches") paste0(summary, "\n", paste0(sidebyside, collapse = "\n\n")) } #' @export print.mismatch_character <- function(x, ...) { cat(format(x, ...), "\n", sep = "") } str_trunc <- function(x, length) { too_long <- nchar(x) > length x[too_long] <- paste0(substr(x[too_long], 1, length - 3), "...") x } str_chunk <- function(x, length) { lines <- ceiling(nchar(x) / length) start <- (seq_len(lines) - 1) * length + 1 substring(x, start, start + length - 1) } testthat/R/try-again.R0000644000176200001440000000161413564563701014345 0ustar liggesusers#' Try evaluating an expressing multiple times until it succeeds. #' #' @param times Maximum number of attempts. #' @param code Code to evaluate #' @keywords internal #' @export #' @examples #' third_try <- local({ #' i <- 3 #' function() { #' i <<- i - 1 #' if (i > 0) fail(paste0("i is ", i)) #' } #' }) #' try_again(3, third_try()) try_again <- function(times, code) { while (times > 0) { e <- tryCatch( withCallingHandlers( { code NULL }, warning = function(e) { if (identical(e$message, "restarting interrupted promise evaluation")) { maybe_restart("muffleWarning") } } ), expectation_failure = function(e) { e }, error = function(e) { e } ) if (is.null(e)) { return(invisible(TRUE)) } times <- times - 1L } stop(e) } testthat/R/reporter-stop.R0000644000176200001440000000306013564563701015274 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: stop on error. #' #' The default reporter, executed when `expect_that` is run interactively. #' It responds by [stop()]ping on failures and doing nothing otherwise. This #' will ensure that a failing test will raise an error. #' #' This should be used when doing a quick and dirty test, or during the final #' automated testing of R CMD check. Otherwise, use a reporter that runs all #' tests and gives you more context about the problem. #' #' @export #' @family reporters StopReporter <- R6::R6Class("StopReporter", inherit = Reporter, public = list( failures = NULL, initialize = function() { super$initialize() self$failures <- Stack$new() }, end_test = function(context, test) { failures <- self$failures$as_list() if (length(failures) == 0) { return() } # reset failures for next test self$failures$initialize() messages <- vapply(failures, format, character(1)) locations <- vapply(failures, exp_location, character(1)) messages <- paste0("* ", locations, messages, collapse = "\n") message <- paste_line( paste0("Test failed: '", test, "'"), !!!messages ) if (is.null(findRestart("testthat_abort_reporter"))) { stop(message, call. = FALSE) } else { cat(message, "\n") invokeRestart("testthat_abort_reporter") } }, add_result = function(context, test, result) { if (expectation_broken(result)) { self$failures$push(result) } } ) ) testthat/R/reporter-location.R0000644000176200001440000000167713176113117016122 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: location #' #' This reporter simply prints the location of every expectation and error. #' This is useful if you're trying to figure out the source of a segfault, #' or you want to figure out which code triggers a C/C++ breakpoint #' #' @export #' @family reporters LocationReporter <- R6::R6Class("LocationReporter", inherit = Reporter, public = list( start_test = function(context, test) { self$cat_line("Start test: ", test) }, add_result = function(context, test, result) { ref <- result$srcref if (is.null(ref)) { location <- "?#?:?" } else { location <- paste0(basename(attr(ref, "srcfile")$filename), "#", ref[1], ":1") } status <- expectation_type(result) self$cat_line(" ", location, " [", status, "]") }, end_test = function(context, test) { self$cat_line("End test: ", test) self$cat_line() } ) ) testthat/R/capture-output.R0000644000176200001440000000334513456034771015456 0ustar liggesusers#' Capture output to console #' #' Evaluates `code` in a special context in which all output is captured, #' similar to [capture.output()]. #' #' @param code Code to evaluate. #' @param print If `TRUE` and the result of evaluating `code` is #' visible this will print the result, ensuring that the output of printing #' the object is included in the overall output #' @param width Number of characters per line of output. This does not #' inherit from `getOption("width")` so that tests always use the same #' output width, minimising spurious differences. #' @return `capture_output()` returns a single string. `capture_output_lines()` #' returns a character vector with one entry for each line #' @keywords internal #' @export #' @examples #' capture_output({ #' cat("Hi!\n") #' cat("Bye\n") #' }) #' #' capture_output_lines({ #' cat("Hi!\n") #' cat("Bye\n") #' }) #' #' capture_output("Hi") #' capture_output("Hi", print = TRUE) capture_output <- function(code, print = FALSE, width = 80) { output <- capture_output_lines(code, print, width = width) paste0(output, collapse = "\n") } #' @export #' @rdname capture_output capture_output_lines <- function(code, print = FALSE, width = 80) { eval_with_output(code, print = print, width = width)$out } eval_with_output <- function(code, print = FALSE, width = 80) { temp <- file() on.exit(close(temp), add = TRUE) withr::local_options(list(width = width)) withr::local_envvar(list(RSTUDIO_CONSOLE_WIDTH = width)) result <- withr::with_output_sink(temp, withVisible(code)) if (result$visible && print) { withr::with_output_sink(temp, print(result$value)) } list( val = result$value, vis = result$visible, out = read_lines(temp, encoding = "unknown") ) } testthat/R/expect-silent.R0000644000176200001440000000143613564563701015240 0ustar liggesusers#' Expectation: is the code silent? #' #' Checks that the code produces no output, messages, or warnings. #' #' @inheritParams expect_error #' @return The first argument, invisibly. #' @family expectations #' @export #' @examples #' expect_silent("123") #' #' f <- function() { #' message("Hi!") #' warning("Hey!!") #' print("OY!!!") #' } #' \dontrun{ #' expect_silent(f()) #' } expect_silent <- function(object) { act <- quasi_capture(enquo(object), NULL, evaluate_promise) outputs <- c( if (!identical(act$cap$output, "")) "output", if (length(act$cap$warnings) > 0) "warnings", if (length(act$cap$messages) > 0) "messages" ) expect( length(outputs) == 0, sprintf("%s produced %s.", act$lab, paste(outputs, collapse = ", ")) ) invisible(act$cap$result) } testthat/R/reporter-fail.R0000644000176200001440000000113313176113117015210 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: fail at end. #' #' This reporter will simply throw an error if any of the tests failed. It is #' best combined with another reporter, such as the #' [SummaryReporter]. #' #' @export #' @family reporters FailReporter <- R6::R6Class("FailReporter", inherit = Reporter, public = list( failed = FALSE, add_result = function(context, test, result) { self$failed <- self$failed || expectation_broken(result) }, end_reporter = function() { if (self$failed) { stop("Failures detected.", call. = FALSE) } } ) ) testthat/R/expect-named.R0000644000176200001440000000370313456034771015025 0ustar liggesusers#' Expectation: does object have names? #' #' You can either check for the presence of names (leaving `expected` #' blank), specific names (by suppling a vector of names), or absence of #' names (with `NULL`). #' #' @inheritParams expect_that #' @param expected Character vector of expected names. Leave missing to #' match any names. Use `NULL` to check for absence of names. #' @param ignore.order If `TRUE`, sorts names before comparing to #' ignore the effect of order. #' @param ignore.case If `TRUE`, lowercases all names to ignore the #' effect of case. #' @param ... Other arguments passed on to [has_names()]. #' @family expectations #' @export #' @examples #' x <- c(a = 1, b = 2, c = 3) #' expect_named(x) #' expect_named(x, c("a", "b", "c")) #' #' # Use options to control sensitivity #' expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) #' #' # Can also check for the absence of names with NULL #' z <- 1:4 #' expect_named(z, NULL) expect_named <- function(object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") act$names <- names(act$val) if (missing(expected)) { expect( !identical(act$names, NULL), sprintf("%s does not have names.", act$lab) ) } else { exp_names <- normalise_names(expected, ignore.order, ignore.case) act$names <- normalise_names(act$names, ignore.order, ignore.case) expect( identical(act$names, exp_names), sprintf( "Names of %s (%s) don't match %s", act$lab, paste0("'", act$names, "'", collapse = ", "), paste0("'", exp_names, "'", collapse = ", ") ), info = info ) } invisible(act$val) } normalise_names <- function(x, ignore.order = FALSE, ignore.case = FALSE) { if (is.null(x)) return() if (ignore.order) x <- sort(x) if (ignore.case) x <- tolower(x) x } testthat/R/expect-logical.R0000644000176200001440000000245213456034771015353 0ustar liggesusers#' Expectation: is the object true/false? #' #' These are fall-back expectations that you can use when none of the other #' more specific expectations apply. The disadvantage is that you may get #' a less informative error message. #' #' Attributes are ignored. #' #' @seealso [is_false()] for complement #' @inheritParams expect_that #' @family expectations #' @examples #' expect_true(2 == 2) #' # Failed expectations will throw an error #' \dontrun{ #' expect_true(2 != 2) #' } #' expect_true(!(2 != 2)) #' # or better: #' expect_false(2 != 2) #' #' a <- 1:3 #' expect_true(length(a) == 3) #' # but better to use more specific expectation, if available #' expect_equal(length(a), 3) #' @name logical-expectations NULL #' @export #' @rdname logical-expectations expect_true <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") expect( identical(as.vector(act$val), TRUE), sprintf("%s isn't true.", act$lab), info = info ) invisible(act$val) } #' @export #' @rdname logical-expectations expect_false <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") expect( identical(as.vector(act$val), FALSE), sprintf("%s isn't false.", act$lab), info = info ) invisible(act$val) } testthat/R/mock.R0000644000176200001440000001114113456034771013377 0ustar liggesusers#' Mock functions in a package. #' #' Executes code after temporarily substituting implementations of package #' functions. This is useful for testing code that relies on functions that are #' slow, have unintended side effects or access resources that may not be #' available when testing. #' #' This works by using some C code to temporarily modify the mocked function \emph{in place}. #' On exit (regular or error), all functions are restored to their previous state. #' This is somewhat abusive of R's internals, and is still experimental, so use with care. #' #' Functions in base packages cannot be mocked, but this can be #' worked around easily by defining a wrapper function. #' #' @param ... named parameters redefine mocked functions, unnamed parameters #' will be evaluated after mocking the functions #' @param .env the environment in which to patch the functions, #' defaults to the top-level environment. A character is interpreted as #' package name. #' @param .local_env Environment in which to add exit hander. #' For expert use only. #' @keywords internal #' @return The result of the last unnamed parameter #' @references Suraj Gupta (2012): \href{http://obeautifulcode.com/R/How-R-Searches-And-Finds-Stuff}{How R Searches And Finds Stuff} #' @export #' @examples #' add_one <- function(x) x + 1 #' expect_equal(add_one(2), 3) #' with_mock( #' add_one = function(x) x - 1, #' expect_equal(add_one(2), 1) #' ) #' square_add_one <- function(x) add_one(x)^2 #' expect_equal(square_add_one(2), 9) #' expect_equal( #' with_mock( #' add_one = function(x) x - 1, #' square_add_one(2) #' ), #' 1 #' ) #' #' # local_mock() ------------------------------- #' plus <- function(x, y) x + y #' test_that("plus(1, 1) == 2", { #' expect_equal(plus(1, 1), 2) #' }) #' #' test_that("plus(1, 1) == 3", { #' local_mock(plus = function(x, y) 3) #' expect_equal(plus(1, 1), 3) #' }) with_mock <- function(..., .env = topenv()) { dots <- eval(substitute(alist(...))) mock_qual_names <- names(dots) if (all(mock_qual_names == "")) { warning( "Not mocking anything. Please use named parameters to specify the functions you want to mock.", call. = FALSE ) code_pos <- rep(TRUE, length(dots)) } else { code_pos <- (mock_qual_names == "") } code <- dots[code_pos] mock_funs <- lapply(dots[!code_pos], eval, parent.frame()) mocks <- extract_mocks(mock_funs, .env = .env) on.exit(lapply(mocks, reset_mock), add = TRUE) lapply(mocks, set_mock) # Evaluate the code if (length(code) > 0) { for (expression in code[-length(code)]) { eval(expression, parent.frame()) } # Isolate last item for visibility eval(code[[length(code)]], parent.frame()) } } #' @export #' @rdname with_mock local_mock <- function(..., .env = topenv(), .local_envir = parent.frame()) { mocks <- extract_mocks(list(...), .env = .env) on_exit <- bquote( on.exit(lapply(.(mocks), .(reset_mock)), add = TRUE), ) lapply(mocks, set_mock) eval_bare(on_exit, .local_envir) invisible() } pkg_rx <- ".*[^:]" colons_rx <- "::(?:[:]?)" name_rx <- ".*" pkg_and_name_rx <- sprintf("^(?:(%s)%s)?(%s)$", pkg_rx, colons_rx, name_rx) extract_mocks <- function(funs, .env) { if (is.environment(.env)) { .env <- environmentName(.env) } mock_qual_names <- names(funs) lapply( stats::setNames(nm = mock_qual_names), function(qual_name) { pkg_name <- gsub(pkg_and_name_rx, "\\1", qual_name) if (is_base_pkg(pkg_name)) { stop( "Can't mock functions in base packages (", pkg_name, ")", call. = FALSE ) } name <- gsub(pkg_and_name_rx, "\\2", qual_name) if (pkg_name == "") { pkg_name <- .env } env <- asNamespace(pkg_name) if (!exists(name, envir = env, mode = "function")) { stop("Function ", name, " not found in environment ", environmentName(env), ".", call. = FALSE ) } mock(name = name, env = env, new = funs[[qual_name]]) } ) } mock <- function(name, env, new) { target_value <- get(name, envir = env, mode = "function") structure( list( env = env, name = as.name(name), orig_value = .Call(duplicate_, target_value), target_value = target_value, new_value = new ), class = "mock" ) } set_mock <- function(mock) { .Call(reassign_function, mock$name, mock$env, mock$target_value, mock$new_value) } reset_mock <- function(mock) { .Call(reassign_function, mock$name, mock$env, mock$target_value, mock$orig_value) } is_base_pkg <- function(x) { x %in% rownames(utils::installed.packages(priority = "base")) } testthat/R/expect-that.R0000644000176200001440000000450013521025554014665 0ustar liggesusers#' Expect that a condition holds. #' #' An old style of testing that's no longer encouraged. #' #' @param object Object to test. #' #' Supports limited unquoting to make it easier to generate readable failures #' within a function or for loop. See [quasi_label] for more details. #' @param condition, a function that returns whether or not the condition #' is met, and if not, an error message to display. #' @param label Used to customise failure messages. For expert use only. #' @param info Extra information to be included in the message. This argument #' is soft-deprecated and should not be used in new code. Instead see #' alternatives in [quasi_label]. #' @return the (internal) expectation result as an invisible list #' @keywords internal #' @export #' @seealso [fail()] for an expectation that always fails. #' @examples #' expect_that(5 * 2, equals(10)) #' expect_that(sqrt(2) ^ 2, equals(2)) #' \dontrun{ #' expect_that(sqrt(2) ^ 2, is_identical_to(2)) #' } expect_that <- function(object, condition, info = NULL, label = NULL) { condition(object) } #' Default expectations that always succeed or fail. #' #' These allow you to manually trigger success or failure. Failure is #' particularly useful to a pre-condition or mark a test as not yet #' implemented. #' #' @param message a string to display. #' @inheritParams expect #' @export #' @examples #' \dontrun{ #' test_that("this test fails", fail()) #' test_that("this test succeeds", succeed()) #' } fail <- function(message = "Failure has been forced", info = NULL) { expect(FALSE, message, info = info) } #' @rdname fail #' @export succeed <- function(message = "Success has been forced", info = NULL) { expect(TRUE, message, info = info) } #' Negate an expectation #' #' This negates an expectation, making it possible to express that you #' want the opposite of a standard expectation. This function is deprecated #' and will be removed in a future version. #' #' @param f an existing expectation function #' @keywords internal #' @export not <- function(f) { warning("`not()` is deprecated.", call. = FALSE) stopifnot(is.function(f)) negate <- function(expt) { expect( !expectation_success(expt), failure_message = paste0("NOT(", expt$message, ")"), srcref = expt$srcref ) } function(...) { negate(capture_expectation(f(...))) } } testthat/R/teardown.R0000644000176200001440000000234013216707011014256 0ustar liggesusersteardown_env <- new.env(parent = emptyenv()) teardown_env$queue <- list() #' Run code on setup/teardown #' #' Code in a `setup()` block is run immediately in a clean environment. #' Code in a `teardown()` block is run upon completion of a test file, #' even if it exits with an error. Multiple calls to `teardown()` will be #' executed in the order they were created. #' #' @param code Code to evaluate #' @param env Environment in which code will be evaluted. For expert #' use only. #' @export #' @examples #' \dontrun{ #' #' tmp <- tempfile() #' setup(writeLines(tmp, "some test data")) #' teardown(unlink(tmp)) #' #' } teardown <- function(code, env = parent.frame()) { fun <- new_function(list(), enexpr(code), env = env) teardown_env$queue <- append(teardown_env$queue, fun) invisible() } #' @export #' @rdname teardown setup <- function(code, env = parent.frame()) { out <- eval_tidy(enquo(code), env = env) invisible(out) } teardown_reset <- function() { teardown_env$queue <- list() } teardown_run <- function(path = ".") { if (length(teardown_env$queue) == 0) return() old_dir <- setwd(path) on.exit(setwd(old_dir), add = TRUE) lapply(teardown_env$queue, function(f) try(f())) teardown_reset() gc() } testthat/R/colour-text.R0000644000176200001440000000074713216707011014731 0ustar liggesuserscolourise <- function(text, as = c("success", "skip", "warning", "failure", "error")) { colour_config <- getOption("testthat.use_colours", TRUE) if (!isTRUE(colour_config)) { return(text) } crayon::style(text, testthat_style(as)) } testthat_style <- function(type = c("success", "skip", "warning", "failure", "error")) { type <- match.arg(type) c( success = "green", skip = "blue", warning = "magenta", failure = "red", error = "red" )[[type]] } testthat/R/auto-test.R0000644000176200001440000001151313521025554014366 0ustar liggesusers#' Watches code and tests for changes, rerunning tests as appropriate. #' #' The idea behind `auto_test()` is that you just leave it running while #' you develop your code. Everytime you save a file it will be automatically #' tested and you can easily see if your changes have caused any test #' failures. #' #' The current strategy for rerunning tests is as follows: #' #' - if any code has changed, then those files are reloaded and all tests #' rerun #' - otherwise, each new or modified test is run #' #' In the future, `auto_test()` might implement one of the following more #' intelligent alternatives: #' #' - Use codetools to build up dependency tree and then rerun tests only #' when a dependency changes. #' - Mimic ruby's autotest and rerun only failing tests until they pass, #' and then rerun all tests. # #' @seealso [auto_test_package()] #' @export #' @param code_path path to directory containing code #' @param test_path path to directory containing tests #' @param reporter test reporter to use #' @param env environment in which to execute test suite. #' @param hash Passed on to [watch()]. When FALSE, uses less accurate #' modification time stamps, but those are faster for large files. #' @keywords debugging auto_test <- function(code_path, test_path, reporter = default_reporter(), env = test_env(), hash = TRUE) { reporter <- find_reporter(reporter) code_path <- normalizePath(code_path) test_path <- normalizePath(test_path) # Start by loading all code and running all tests source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") test_files(tests, env = env, reporter = reporter$clone(deep = TRUE)) } TRUE } watch(c(code_path, test_path), watcher, hash = hash) } #' Watches a package for changes, rerunning tests as appropriate. #' #' @param pkg path to package #' @export #' @param reporter test reporter to use #' @param hash Passed on to [watch()]. When FALSE, uses less accurate #' modification time stamps, but those are faster for large files. #' @keywords debugging #' @seealso [auto_test()] for details on how method works auto_test_package <- function(pkg = ".", reporter = default_reporter(), hash = TRUE) { if (!requireNamespace("devtools", quietly = TRUE)) { stop( "devtools required to run auto_test_package(). Please install.", call. = FALSE ) } reporter <- find_reporter(reporter) path <- pkg pkg <- devtools::as.package(pkg) code_path <- file.path(pkg$path, c("R", "src")) code_path <- code_path[file.exists(code_path)] code_path <- normalizePath(code_path) test_path <- normalizePath(file.path(pkg$path, "tests", "testthat")) # Start by loading all code and running all tests env <- devtools::load_all(path)$env withr::with_envvar( devtools::r_env_vars(), test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) ) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] # Remove helper from test and add it to code (if a helper changed, # like for code, reload all and rerun all tests) helper <- tests[starts_with(basename(tests), "helper-")] tests <- setdiff(tests, helper) code <- c(code, helper) if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") env <<- devtools::load_all(pkg, quiet = TRUE)$env withr::with_envvar( devtools::r_env_vars(), test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) ) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") withr::with_envvar( devtools::r_env_vars(), test_files(tests, env = env, reporter = reporter$clone(deep = TRUE)) ) } TRUE } watch(c(code_path, test_path), watcher, hash = hash) } testthat/R/old-school.R0000644000176200001440000000705213266145057014516 0ustar liggesusers#' Old-style expectations. #' #' Initial testthat used a style of testing that looked like #' `expect_that(a, equals(b)))` this allowed expectations to read like #' English sentences, but was verbose and a bit too cutesy. This style #' will continue to work but has been soft-deprecated - it is no longer #' documented, and new expectations will only use the new style #' `expect_equal(a, b)`. #' #' @name oldskool #' @keywords internal NULL #' @export #' @rdname oldskool is_null <- function() { warning( "`is_null()` is deprecated. Please use `expect_null()` instead.", call. = FALSE ) function(x) expect_null(x) } #' @export #' @rdname oldskool is_a <- function(class) { function(x) expect_is(x, class) } #' @export #' @rdname oldskool is_true <- function() { function(x) { warning( "`is_true()` is deprecated. Please use `expect_true()` instead.", call. = FALSE ) expect_true(x) } } #' @export #' @rdname oldskool is_false <- function() { function(x) { warning( "`is_false()` is deprecated. Please use `expect_false()` instead.", call. = FALSE ) expect_false(x) } } #' @export #' @rdname oldskool has_names <- function(expected, ignore.order = FALSE, ignore.case = FALSE) { function(x) { expect_named(x, expected = expected, ignore.order = ignore.order, ignore.case = ignore.case) } } #' @export #' @rdname oldskool is_less_than <- function(expected, label = NULL, ...) { function(x) expect_lt(x, expected) } #' @export #' @rdname oldskool is_more_than <- function(expected, label = NULL, ...) { function(x) expect_gt(x, expected) } #' @export #' @rdname oldskool equals <- function(expected, label = NULL, ...) { function(x) expect_equal(x, expected, ..., expected.label = label) } #' @export #' @rdname oldskool is_equivalent_to <- function(expected, label = NULL) { function(x) expect_equivalent(x, expected, expected.label = label) } #' @export #' @rdname oldskool is_identical_to <- function(expected, label = NULL) { function(x) expect_identical(x, expected, expected.label = label) } #' @export #' @rdname oldskool equals_reference <- function(file, label = NULL, ...) { function(x) expect_known_value(x, file, expected.label = label, ...) } #' @export #' @rdname oldskool shows_message <- function(regexp = NULL, all = FALSE, ...) { function(x) expect_message(x, regexp = regexp, all = all, ...) } #' @export #' @rdname oldskool gives_warning <- function(regexp = NULL, all = FALSE, ...) { function(x) expect_warning(x, regexp = regexp, all = all, ...) } #' @export #' @rdname oldskool prints_text <- function(regexp = NULL, ...) { function(x) expect_output(x, regexp, ...) } #' @export #' @rdname oldskool throws_error <- function(regexp = NULL, ...) { function(x) expect_error(x, regexp, ...) } #' @export #' @rdname oldskool matches <- function(regexp, all = TRUE, ...) { warning( "`matches()` is deprecated. Please use `expect_match()` instead.", call. = FALSE ) function(x) expect_match(x, regexp, all = all, ...) } #' Expectation: does expression take less than a fixed amount of time to run? #' #' This is useful for performance regression testing. #' #' @keywords internal #' @export #' @param amount maximum duration in seconds takes_less_than <- function(amount) { warning( "takes_less_than() is deprecated because it is stochastic and unreliable", call. = FALSE ) function(expr) { duration <- system.time(force(expr))["elapsed"] expect( duration < amount, paste0("took ", duration, " seconds, which is more than ", amount) ) } } testthat/R/capture-condition.R0000644000176200001440000000506013564563701016100 0ustar liggesusers new_capture <- function(class) { exiting_handlers <- rep_named(class, list(identity)) calling_handlers <- rep_named(class, alist(function(cnd) { if (can_entrace(cnd)) { cnd <- cnd_entrace(cnd) } return_from(env, cnd) })) formals <- pairlist2(code = , entrace = FALSE) # R CMD check global variable NOTE code <- entrace <- NULL body <- expr({ if (!entrace) { return(tryCatch({ code; NULL }, !!!exiting_handlers)) } env <- environment() withCallingHandlers({ code; NULL }, !!!calling_handlers) }) new_function(formals, body, ns_env("testthat")) } #' Capture conditions, including messeages, warnings, expectations, and errors. #' #' These functions allow you to capture the side-effects of a function call #' including printed output, messages and warnings. They are used to evaluate #' code for [expect_output()], [expect_message()], #' [expect_warning()], and [expect_silent()]. #' #' @param code Code to evaluate #' @param entrace Whether to add a [backtrace][rlang::trace_back] to #' the captured condition. #' @return Singular functions (`capture_condition`, `capture_expectation` etc) #' return a condition object. `capture_messages()` and `capture_warnings` #' return a character vector of message text. #' @keywords internal #' @export #' @examples #' f <- function() { #' message("First") #' warning("Second") #' message("Third") #' } #' #' capture_message(f()) #' capture_messages(f()) #' #' capture_warning(f()) #' capture_warnings(f()) #' #' # Condition will capture anything #' capture_condition(f()) capture_condition <- new_capture("condition") #' @export #' @rdname capture_condition capture_error <- new_capture("error") #' @export #' @rdname capture_condition capture_expectation <- new_capture("expectation") #' @export #' @rdname capture_condition capture_message <- new_capture("condition") #' @export #' @rdname capture_condition capture_warning <- new_capture("warning") #' @export #' @rdname capture_condition capture_messages <- function(code) { out <- Stack$new() withCallingHandlers( code, message = function(condition) { out$push(condition) maybe_restart("muffleMessage") } ) get_messages(out$as_list()) } #' @export #' @rdname capture_condition capture_warnings <- function(code) { out <- Stack$new() withCallingHandlers( code, warning = function(condition) { out$push(condition) maybe_restart("muffleWarning") } ) get_messages(out$as_list()) } get_messages <- function(x) { vapply(x, cnd_message, FUN.VALUE = character(1)) } testthat/R/reporter-teamcity.R0000644000176200001440000000407013216707011016114 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: Teamcity format. #' #' This reporter will output results in the Teamcity message format. #' For more information about Teamcity messages, see #' http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity #' #' @export #' @family reporters TeamcityReporter <- R6::R6Class("TeamcityReporter", inherit = Reporter, public = list( i = NA_integer_, start_context = function(context) { private$report_event("testSuiteStarted", context) }, end_context = function(context) { private$report_event("testSuiteFinished", context) self$cat_line() self$cat_line() }, start_test = function(context, test) { private$report_event("testSuiteStarted", test) self$i <- 1L }, end_test = function(context, test) { private$report_event("testSuiteFinished", test) self$cat_line() }, add_result = function(context, test, result) { testName <- paste0("expectation ", self$i) self$i <- self$i + 1L if (expectation_skip(result)) { private$report_event("testIgnored", testName, message = format(result)) return() } private$report_event("testStarted", testName) if (!expectation_ok(result)) { lines <- strsplit(format(result), "\n")[[1]] private$report_event( "testFailed", testName, message = lines[1], details = paste(lines[-1], collapse = "\n") ) } private$report_event("testFinished", testName) } ), private = list( report_event = function(event, name, ...) { values <- list(name = name, ...) values <- vapply(values, teamcity_escape, character(1)) if (length(values) == 0) { value_string <- "" } else { value_string <- paste0(names(values), "='", values, "'", collapse = " ") } self$cat_line("##teamcity[", event, " ", value_string, "]") } ) ) # teamcity escape character is | teamcity_escape <- function(s) { s <- gsub("(['|]|\\[|\\])", "|\\1", s) gsub("\n", "|n", s) } testthat/R/test-path.R0000644000176200001440000000172213176113117014352 0ustar liggesusers#' Locate file in testing directory. #' #' This function is designed to work both interatively and during tests, #' locating files in the `tests/testthat` directory #' #' @param ... Character vectors giving path component. #' @return A character vector giving the path. #' @export test_path <- function(...) { if (in_testing_dir(".")) { path <- file.path(...) if (length(path) == 0) { path <- "." } } else { base <- "tests/testthat" if (!file.exists(base)) { stop( "Can't find `tests/testthat` in current directory.", call. = FALSE ) } path <- file.path(base, ...) } path } in_testing_dir <- function(path) { path <- normalizePath(path) if (basename(path) != "testthat") { return(FALSE) } parent <- dirname(path) if (grepl("-tests$", parent)) { # Probably called from tools::testInstalledPackage TRUE } else { basename(parent) %in% c("tests", "tests_x64", "tests_i386") } } testthat/R/test-example.R0000644000176200001440000000343013521025554015050 0ustar liggesusers#' Test package examples #' #' These helper functions make it easier to test the examples in a package. #' Each example counts as one test, and it succeeds if the code runs without #' an error. Generally, this is redundant with R CMD check, and is not #' recommended in routine practice. #' #' @keywords internal #' @param path For `test_examples()`, path to directory containing Rd files. #' For `test_example()`, path to a single Rd file. Remember the working #' directory for tests is `tests/testthat`. #' @param title Test title to use #' @param rd A parsed Rd object, obtained from [tools::Rd_db()] or otherwise. #' @export test_examples <- function(path = "../..") { res <- test_examples_source(path) %||% test_examples_installed(env_test$package) if (is.null(res)) { stop("Could not find examples", call. = FALSE) } invisible(res) } test_examples_source <- function(path = "../..") { if (!dir.exists(file.path(path, "man"))) { return() } Rd <- tools::Rd_db(dir = path) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } test_examples_installed <- function(package = env_test$package) { if (is.null(package)) { return() } Rd <- tools::Rd_db(package = package) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } #' @export #' @rdname test_examples test_rd <- function(rd, title = attr(rd, "Rdfile")) { test_example(rd, title) } #' @export #' @rdname test_examples test_example <- function(path, title = path) { ex_path <- tempfile(fileext = ".R") tools::Rd2ex(path, ex_path) if (!file.exists(ex_path)) { return(invisible(FALSE)) } env <- new.env(parent = globalenv()) ok <- test_code(title, parse(ex_path, encoding = "UTF-8"), env = env, skip_on_empty = FALSE ) if (ok) succeed(path) invisible(ok) } testthat/R/reporter-multi.R0000644000176200001440000000273313266145057015446 0ustar liggesusers#' @include reporter.R NULL #' Multi reporter: combine several reporters in one. #' #' This reporter is useful to use several reporters at the same time, e.g. #' adding a custom reporter without removing the current one. #' #' @export #' @family reporters MultiReporter <- R6::R6Class("MultiReporter", inherit = Reporter, public = list( reporters = list(), initialize = function(reporters = list()) { super$initialize() self$reporters <- reporters }, start_reporter = function() { o_apply(self$reporters, "start_reporter") }, start_file = function(filename) { o_apply(self$reporters, "start_file", filename) }, start_context = function(context) { o_apply(self$reporters, "start_context", context) }, start_test = function(context, test) { o_apply(self$reporters, "start_test", context, test) }, add_result = function(context, test, result) { o_apply(self$reporters, "add_result", context = context, test = test, result = result) }, end_test = function(context, test) { o_apply(self$reporters, "end_test", context, test) }, end_context = function(context) { o_apply(self$reporters, "end_context", context) }, end_reporter = function() { o_apply(self$reporters, "end_reporter") }, end_file = function() { o_apply(self$reporters, "end_file") } ) ) o_apply <- function(objects, method, ...) { lapply(objects, function(x) x[[method]](...)) } testthat/R/example.R0000644000176200001440000000114113456034771014100 0ustar liggesusers#' Retrieve paths to built-in example test files #' #' `testthat_examples()` retrieves path to directory of test files, #' `testthat_example()` retrieves path to a single test file. #' #' @keywords internal #' @param filename Name of test file #' @export #' @examples #' dir(testthat_examples()) #' testthat_example("success") testthat_examples <- function() { system.file("examples", package = "testthat") } #' @export #' @rdname testthat_examples testthat_example <- function(filename) { system.file( "examples", paste0("test-", filename, ".R"), package = "testthat", mustWork = TRUE ) } testthat/R/expect-messages.R0000644000176200001440000001003513564563701015544 0ustar liggesusers#' Expectation: does code produce warnings or messages? #' #' Use `expect_message()` and `expect_warning()` to check if the messages or #' warnings match the given regular expression. #' #' @inheritParams expect_that #' @param regexp Regular expression to test against. #' * A character vector giving a regular expression that must match the #' message/warning #' * If `NULL`, the default, asserts that there should be a messsage/warning, #' but doesn't test for a specific value. #' * If `NA`, asserts that there shouldn't be any messages or warnings. #' @inheritDotParams expect_match -object -regexp -info -label #' @param all Do messages/warnings need to match the `regexp` (`TRUE`), or #' does only one need to match (`FALSE`)? #' @family expectations #' @return The first argument, invisibly. #' @examples #' # Messages ------------------------------------------------------------------ #' #' f <- function(x) { #' if (x < 0) { #' message("*x* is already negative") #' return(x) #' } #' #' -x #' } #' expect_message(f(-1)) #' expect_message(f(-1), "already negative") #' expect_message(f(1), NA) #' #' # To test message and output, store results to a variable #' expect_message(out <- f(-1), "already negative") #' expect_equal(out, -1) #' #' # You can use the arguments of grepl to control the matching #' expect_message(f(-1), "*x*", fixed = TRUE) #' expect_message(f(-1), "NEGATIVE", ignore.case = TRUE) #' #' # Warnings ------------------------------------------------------------------ #' f <- function(x) { #' if (x < 0) { #' warning("*x* is already negative") #' return(x) #' } #' -x #' } #' expect_warning(f(-1)) #' expect_warning(f(-1), "already negative") #' expect_warning(f(1), NA) #' #' # To test message and output, store results to a variable #' expect_warning(out <- f(-1), "already negative") #' expect_equal(out, -1) #' #' # You can use the arguments of grepl to control the matching #' expect_warning(f(-1), "*x*", fixed = TRUE) #' expect_warning(f(-1), "NEGATIVE", ignore.case = TRUE) #' #' @export expect_message <- function(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL ) { act <- quasi_capture(enquo(object), label, capture_messages) msg <- compare_messages(act$cap, act$lab, regexp = regexp, all = all, ...) expect(is.null(msg), msg, info = info) invisible(act$val) } #' @export #' @rdname expect_message expect_warning <- function(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL ) { act <- quasi_capture(enquo(object), label, capture_warnings) msg <- compare_messages( act$cap, act$lab, regexp = regexp, all = all, ..., cond_type = "warnings" ) expect(is.null(msg), msg, info = info) invisible(act$val) } # Helpers ----------------------------------------------------------------- compare_messages <- function(messages, lab, regexp = NA, ..., all = FALSE, cond_type = "messages") { bullets <- paste0("* ", messages, collapse = "\n") # Expecting no messages if (identical(regexp, NA)) { if (length(messages) > 0) { return(sprintf("%s generated %s:\n%s", lab, cond_type, bullets)) } else { return() } } # Otherwise we're definitely expecting messages if (length(messages) == 0) { return(sprintf("%s did not produce any %s.", lab, cond_type)) } if (is.null(regexp)) { return() } matched <- grepl(regexp, messages, ...) # all/any ok if ((all && all(matched)) || (!all && any(matched))) { return() } sprintf( "%s produced unexpected %s.\n%s\n%s", lab, cond_type, paste0("Expected match: ", encodeString(regexp)), paste0("Actual values:\n", bullets) ) } testthat/R/expect-comparison.R0000644000176200001440000000511713456034771016114 0ustar liggesusers#' Expectation: is returned value less or greater than specified value? #' #' @inheritParams expect_that #' @inheritParams expect_equal #' @param expected Single numeric value to compare. #' @family expectations #' @examples #' a <- 9 #' expect_lt(a, 10) #' #' \dontrun{ #' expect_lt(11, 10) #' } #' #' a <- 11 #' expect_gt(a, 10) #' \dontrun{ #' expect_gt(9, 10) #' } #' @name comparison-expectations NULL expect_compare <- function(operator = c("<", "<=", ">", ">="), act, exp) { operator <- match.arg(operator) op <- match.fun(operator) msg <- c( "<" = "not strictly less than", "<=" = "not less than", ">" = "not strictly more than", ">=" = "not more than" )[[operator]] cmp <- op(act$val, exp$val) if (length(cmp) != 1 || !is.logical(cmp)) { abort("Result of comparison must be a single logical value") } expect( if (!is.na(cmp)) cmp else FALSE, sprintf("%s is %s %s. Difference: %.3g", act$lab, msg, exp$lab, act$val - exp$val) ) invisible(act$val) } #' @export #' @rdname comparison-expectations expect_lt <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare("<", act, exp) } #' @export #' @rdname comparison-expectations expect_lte <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare("<=", act, exp) } #' @export #' @rdname comparison-expectations expect_gt <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare(">", act, exp) } #' @export #' @rdname comparison-expectations expect_gte <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare(">=", act, exp) } # Wordy names ------------------------------------------------------------- #' @export #' @rdname comparison-expectations #' @usage NULL expect_less_than <- function(...) { warning("Deprecated: please use `expect_lt()` instead", call. = FALSE) expect_lt(...) } #' @export #' @rdname comparison-expectations #' @usage NULL expect_more_than <- function(...) { warning("Deprecated: please use `expect_gt()` instead", call. = FALSE) expect_gt(...) } testthat/R/expect-invisible.R0000644000176200001440000000235613564563701015730 0ustar liggesusers#' Expectation: does expression return visibily or invisibly? #' #' Use this to test whether a function returns a visible or invisible #' output. Typically you'll use this to check that functions called primarily #' for their side-effects return their data argument invisibly. #' #' @param call A function call. #' @inheritParams expect_that #' @return The evaluated `call`, invisibly. #' @export #' @examples #' expect_invisible(x <- 10) #' expect_visible(x) #' #' # Typically you'll assign the result of the expectation so you can #' # also check that the value is as you expect. #' greet <- function(name) { #' message("Hi ", name) #' invisible(name) #' } #' out <- expect_invisible(greet("Hadley")) #' expect_equal(out, "Hadley") expect_invisible <- function(call, label = NULL) { lab <- label %||% expr_label(enexpr(call)) vis <- withVisible(call) expect( identical(vis$visible, FALSE), sprintf("%s does not return invisibly", lab) ) invisible(vis$value) } #' @export #' @rdname expect_invisible expect_visible <- function(call, label = NULL) { lab <- label %||% expr_label(enexpr(call)) vis <- withVisible(call) expect( identical(vis$visible, TRUE), sprintf("%s does not invisibly", lab) ) invisible(vis$value) } testthat/R/utils-io.R0000644000176200001440000000062413216707011014203 0ustar liggesusersreadLines <- function(...) stop("Use read_lines!") writeLines <- function(...) stop("Use write_lines!") read_lines <- function(path, n = -1L, encoding = "UTF-8") { base::readLines(path, n = n, encoding = encoding, warn = FALSE) } write_lines <- function(text, path) { base::writeLines(enc2utf8(text), path, useBytes = TRUE) } all_utf8 <- function(x) { ! any(is.na(iconv(x, "UTF-8", "UTF-8"))) } testthat/R/watcher.R0000644000176200001440000000607213176113117014101 0ustar liggesusers#' Watch a directory for changes (additions, deletions & modifications). #' #' This is used to power the [auto_test()] and #' [auto_test_package()] functions which are used to rerun tests #' whenever source code changes. #' #' Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to #' stop the watcher. #' #' @param path character vector of paths to watch. Omit trailing backslash. #' @param pattern file pattern passed to [dir()] #' @param callback function called everytime a change occurs. It should #' have three parameters: added, deleted, modified, and should return #' TRUE to keep watching, or FALSE to stop. #' @param hash hashes are more accurate at detecting changes, but are slower #' for large files. When FALSE, uses modification time stamps #' @export #' @keywords internal watch <- function(path, callback, pattern = NULL, hash = TRUE) { prev <- dir_state(path, pattern, hash = hash) repeat { Sys.sleep(1) curr <- dir_state(path, pattern, hash = hash) changes <- compare_state(prev, curr) if (changes$n > 0) { # cat("C") keep_going <- TRUE try(keep_going <- callback(changes$added, changes$deleted, changes$modified)) if (!isTRUE(keep_going)) return(invisible()) } else { # cat(".") } prev <- curr } } #' Compute a digest of a filename, returning NA if the file doesn't #' exist. #' #' @param filename filename to compute digest on #' @return a digest of the file, or NA if it doesn't exist. #' @keywords internal safe_digest <- function(path) { if (!file.exists(path)) return(NA_character_) if (is_directory(path)) return(NA_character_) if (!is_readable(path)) return(NA_character_) digest::digest(path, file = TRUE) } #' Capture the state of a directory. #' #' @param path path to directory #' @param pattern regular expression with which to filter files #' @param hash use hash (slow but accurate) or time stamp (fast but less #' accurate) #' @keywords internal dir_state <- function(path, pattern = NULL, hash = TRUE) { files <- dir(path, pattern, full.names = TRUE) # It's possible for any of the files to be deleted between the dir() # call above and the calls below; `file.info` handles this # gracefully, but digest::digest doesn't -- so we wrap it. Both # cases will return NA for files that have gone missing. if (hash) { file_states <- vapply(files, safe_digest, character(1)) } else { file_states <- stats::setNames(file.info(files)$mtime, files) } file_states[!is.na(file_states)] } #' Compare two directory states. #' #' @param old previous state #' @param new current state #' @return list containing number of changes and files which have been #' `added`, `deleted` and `modified` #' @keywords internal compare_state <- function(old, new) { added <- setdiff(names(new), names(old)) deleted <- setdiff(names(old), names(new)) same <- intersect(names(old), names(new)) modified <- names(new[same])[new[same] != old[same]] n <- length(added) + length(deleted) + length(modified) list(n = n, added = added, deleted = deleted, modified = modified) } testthat/R/praise.R0000644000176200001440000000112513266145057013731 0ustar liggesusers# nocov start praise <- function() { x <- c( "You rock!", "You are a coding rockstar!", "Keep up the good work.", ":)", "Woot!", "Way to go!", "Nice code.", praise::praise("Your tests are ${adjective}!"), praise::praise("${EXCLAMATION} - ${adjective} code.") ) sample(x, 1) } encourage <- function() { x <- c( "Keep trying!", "Don't worry, you'll get it.", "No one is perfect!", "No one gets it right on their first try", "Frustration is a natural part of programming :)", "I believe in you!" ) sample(x, 1) } # nocov end testthat/R/reporter-check.R0000644000176200001440000000513513552020261015353 0ustar liggesusers#' @include reporter.R NULL #' Check reporter: 13 line summary of problems #' #' `R CMD check` displays only the last 13 lines of the result, so this #' report is design to ensure that you see something useful there. #' #' @export #' @family reporters CheckReporter <- R6::R6Class("CheckReporter", inherit = Reporter, public = list( failures = list(), n_ok = 0L, n_skip = 0L, n_fail = 0L, n_warn = 0L, stop_on_failure = TRUE, initialize = function(stop_on_failure = TRUE, ...) { self$stop_on_failure <- stop_on_failure super$initialize(...) }, add_result = function(context, test, result) { if (expectation_skip(result)) { self$n_skip <- self$n_skip + 1L return() } if (expectation_warning(result)) { self$n_warn <- self$n_warn + 1L return() } if (expectation_ok(result)) { self$n_ok <- self$n_ok + 1L return() } self$n_fail <- self$n_fail + 1L self$failures[[self$n_fail]] <- result self$cat_line(failure_summary(result, self$n_fail)) self$cat_line() }, end_reporter = function() { self$rule("testthat results ", line = 2) self$cat_line( "[ ", "OK: ", self$n_ok, " | ", "SKIPPED: ", self$n_skip, " | ", "WARNINGS: ", self$n_warn, " | ", "FAILED: ", self$n_fail, " ]" ) if (self$n_fail == 0) return() if (self$n_fail > 10) { show <- self$failures[1:9] } else { show <- self$failures } fails <- vapply(show, failure_header, character(1)) if (self$n_fail > 10) { fails <- c(fails, "...") } labels <- format(paste0(1:length(show), ".")) self$cat_line(paste0(labels, " ", fails, collapse = "\n")) self$cat_line() if (self$stop_on_failure) { stop("testthat unit tests failed", call. = FALSE) } } ) ) skip_summary <- function(x, label) { header <- paste0(label, ". ", x$test) paste0( colourise(header, "skip"), src_loc(x$srcref), " - ", x$message ) } failure_summary <- function(x, label, width = cli::console_width()) { header <- paste0(label, ". ", failure_header(x)) paste0( cli::rule(header, col = testthat_style("error")), "\n", format(x) ) } failure_header <- function(x) { type <- switch(expectation_type(x), error = "Error", failure = "Failure" ) paste0(type, ": ", x$test, src_loc(x$srcref), " ") } src_loc <- function(ref) { if (is.null(ref)) { "" } else { paste0(" (@", basename(attr(ref, "srcfile")$filename), "#", ref[1], ")") } } testthat/R/context.R0000644000176200001440000000136213456034771014136 0ustar liggesusers#' Describe the context of a set of tests. #' #' Use of `context()` is no longer recommend. Instead omit it, and messages #' will use the name of the file instead. This ensures that the context and #' test file name are always in sync. #' #' A context defines a set of tests that test related functionality. Usually #' you will have one context per file, but you may have multiple contexts #' in a single file if you so choose. #' #' @param desc description of context. Should start with a capital letter. #' @keywords internal #' @export #' @examples #' context("String processing") #' context("Remote procedure calls") context <- function(desc) { get_reporter()$.start_context(desc) } end_context <- function() { get_reporter()$.end_context() } testthat/R/reporter-silent.R0000644000176200001440000000133513176113117015577 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: gather all errors silently. #' #' This reporter quietly runs all tests, simply gathering all expectations. #' This is helpful for programmatically inspecting errors after a test run. #' You can retrieve the results with the `expectations()` #' method. #' #' @export #' @family reporters SilentReporter <- R6::R6Class("SilentReporter", inherit = Reporter, public = list( .expectations = NULL, initialize = function(...) { super$initialize(...) self$.expectations <- Stack$new() }, add_result = function(context, test, result) { self$.expectations$push(result) }, expectations = function() { self$.expectations$as_list() } ) ) testthat/R/reporter-rstudio.R0000644000176200001440000000140313176113117015766 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: RStudio #' #' This reporter is designed for output to RStudio. It produces results in #' any easily parsed form. #' #' @export #' @family reporters RstudioReporter <- R6::R6Class("RstudioReporter", inherit = Reporter, public = list( add_result = function(context, test, result) { if (expectation_success(result)) { return() } ref <- result$srcref if (is.null(ref)) { location <- "?#?:?" } else { location <- paste0(attr(ref, "srcfile")$filename, "#", ref[1], ":1") } status <- expectation_type(result) first_line <- strsplit(result$message, "\n")[[1]][1] self$cat_line(location, " [", status, "] ", test, ". ", first_line) } ) ) testthat/R/expect-equality.R0000644000176200001440000000766413456034771015610 0ustar liggesusers#' Expectation: is the object equal to a value? #' #' - `expect_identical()` compares values with [identical()]. #' - `expect_equal()` compares values with [all.equal()] #' - `expect_equivalent()` compares values with [all.equal()] and #' `check.attributes = FALSE` #' - `expect_reference()` compares the underlying memory addresses. # #' @param object,expected Computation and value to compare it to. #' #' Both arguments supports limited unquoting to make it easier to generate #' readable failures within a function or for loop. See [quasi_label] for #' more details. #' @param label,expected.label Used to customise failure messages. For expert #' use only. #' @seealso `expect_setequal()` to test for set equality. #' @inheritParams expect_that #' @family expectations #' @examples #' a <- 10 #' expect_equal(a, 10) #' #' # Use expect_equal() when testing for numeric equality #' sqrt(2) ^ 2 - 1 #' expect_equal(sqrt(2) ^ 2, 2) #' # Neither of these forms take floating point representation errors into #' # account #' \dontrun{ #' expect_true(sqrt(2) ^ 2 == 2) #' expect_identical(sqrt(2) ^ 2, 2) #' } #' #' # You can pass on additional arguments to all.equal: #' \dontrun{ #' # Test the ABSOLUTE difference is within .002 #' expect_equal(10.01, 10, tolerance = .002, scale = 1) #' } #' #' # Test the RELATIVE difference is within .002 #' x <- 10 #' expect_equal(10.01, expected = x, tolerance = 0.002, scale = x) #' #' # expect_equivalent ignores attributes #' a <- b <- 1:3 #' names(b) <- letters[1:3] #' expect_equivalent(a, b) #' @name equality-expectations NULL #' @export #' @rdname equality-expectations #' @param ... For `expect_equal()` and `expect_equivalent()`, passed on #' [compare()], for `expect_identical()` passed on to [identical()]. #' Used to control the details of the comparison. expect_equal <- function(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") comp <- compare(act$val, exp$val, ...) expect( comp$equal, sprintf("%s not equal to %s.\n%s", act$lab, exp$lab, comp$message), info = info ) invisible(act$val) } #' @export #' @rdname equality-expectations expect_equivalent <- function(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") comp <- compare(act$val, exp$val, ..., check.attributes = FALSE) expect( comp$equal, sprintf("%s not equivalent to %s.\n%s", act$lab, exp$lab, comp$message), info = info ) invisible(act$val) } #' @export #' @rdname equality-expectations expect_identical <- function(object, expected, info = NULL, label = NULL, expected.label = NULL, ...) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") ident <- identical(act$val, exp$val, ...) if (ident) { msg <- "" } else { compare <- compare(act$val, exp$val) if (compare$equal) { msg <- "Objects equal but not identical" } else { msg <- compare$message } } expect( ident, sprintf("%s not identical to %s.\n%s", act$lab, exp$lab, msg), info = info ) invisible(act$val) } #' @export #' @rdname equality-expectations expect_reference <- function(object, expected, info = NULL, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect( is_reference(act$val, exp$val), sprintf("%s not a reference %s.", act$lab, exp$lab), info = info ) invisible(act$val) } # expect_reference() needs dev version of rlang utils::globalVariables("is_reference") testthat/R/test-that.R0000644000176200001440000001443013564563701014367 0ustar liggesusers#' Create a test. #' #' A test encapsulates a series of expectations about small, self-contained #' set of functionality. Each test is contained in a \link{context} and #' contains multiple expectations. #' #' Tests are evaluated in their own environments, and should not affect #' global state. #' #' When run from the command line, tests return `NULL` if all #' expectations are met, otherwise it raises an error. #' #' @param desc test name. Names should be kept as brief as possible, as they #' are often used as line prefixes. #' @param code test code containing expectations #' @export #' @examples #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1 / sqrt(2)) #' expect_equal(cos(pi / 4), 1 / sqrt(2)) #' expect_equal(tan(pi / 4), 1) #' }) #' # Failing test: #' \dontrun{ #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1) #' }) #' } test_that <- function(desc, code) { code <- substitute(code) test_code(desc, code, env = parent.frame()) } # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields test_code <- function(test, code, env = test_env(), skip_on_empty = TRUE) { if (!is.null(test)) { get_reporter()$start_test(context = get_reporter()$.context, test = test) on.exit(get_reporter()$end_test(context = get_reporter()$.context, test = test)) } ok <- TRUE # @param debug_end How many frames should be skipped to find the # last relevant frame call. Only useful for the DebugReporter. register_expectation <- function(e, debug_end) { # Find test environment on the stack start <- eval_bare(quote(base::sys.nframe()), test_env) + 1L srcref <- e[["srcref"]] %||% find_first_srcref(start) e <- as.expectation(e, srcref = srcref) # Data for the DebugReporter if (debug_end >= 0) { e$start_frame <- start e$end_frame <- sys.nframe() - debug_end - 1L } e$test <- test %||% "(unknown)" ok <<- ok && expectation_ok(e) get_reporter()$add_result(context = get_reporter()$.context, test = test, result = e) } frame <- sys.nframe() # Any error will be assigned to this variable first # In case of stack overflow, no further processing (not even a call to # signalCondition() ) might be possible test_error <- NULL expressions_opt <- getOption("expressions") expressions_opt_new <- min(expressions_opt + 500L, 500000L) # If no handlers are called we skip: BDD (`describe()`) tests are often # nested and the top level might not contain any expectations, so we need # some way to disable handled <- !skip_on_empty handle_error <- function(e) { handled <<- TRUE # First thing: Collect test error test_error <<- e # Increase option(expressions) to handle errors here if possible, even in # case of a stack overflow. This is important for the DebugReporter. # Call options() manually, avoid withr overhead. options(expressions = expressions_opt_new) on.exit(options(expressions = expressions_opt), add = TRUE) # Add structured backtrace to the expectation if (can_entrace(e)) { e <- cnd_entrace(e) } test_error <<- e # Error will be handled by handle_fatal() if this fails; need to do it here # to be able to debug with the DebugReporter register_expectation(e, 2) e[["handled"]] <- TRUE test_error <<- e } handle_fatal <- function(e) { handled <<- TRUE # Error caught in handle_error() has precedence if (!is.null(test_error)) { e <- test_error if (isTRUE(e[["handled"]])) { return() } } register_expectation(e, 0) } handle_expectation <- function(e) { handled <<- TRUE register_expectation(e, 6) invokeRestart("continue_test") } handle_warning <- function(e) { # When options(warn) < 0, warnings are expected to be ignored. if (getOption("warn") < 0) { handled <<- TRUE return() } # When options(warn) >= 2, warnings are converted to errors. # So, do not handle it here so that it will be handled by handle_error. if (getOption("warn") >= 2) { return() } handled <<- TRUE register_expectation(e, 5) maybe_restart("muffleWarning") } handle_message <- function(e) { handled <<- TRUE maybe_restart("muffleMessage") } handle_skip <- function(e) { handled <<- TRUE if (inherits(e, "skip_empty")) { # If we get here, `code` has already finished its evaluation. # Find the srcref in the `test_that()` frame above. e$srcref <- find_first_srcref(frame - 1) debug_end <- -1 } else { debug_end <- 2 } register_expectation(e, debug_end) signalCondition(e) } test_env <- new.env(parent = env) old <- options(rlang_trace_top_env = test_env)[[1]] on.exit(options(rlang_trace_top_env = old), add = TRUE) tryCatch( withCallingHandlers( { eval(code, test_env) if (!handled && !is.null(test)) { skip_empty() } }, expectation = handle_expectation, skip = handle_skip, warning = handle_warning, message = handle_message, error = handle_error ), # some errors may need handling here, e.g., stack overflow error = handle_fatal, # skip silently terminate code skip = function(e) {} ) invisible(ok) } #' R package to make testing fun! #' #' Try the example below. Have a look at the references and learn more #' from function documentation such as [expect_that()]. #' #' @section Options: #' - `testthat.use_colours`: Should the output be coloured? (Default: `TRUE`). #' - `testthat.summary.max_reports`: The maximum number of detailed test #' reports printed for the summary reporter (default: 10). #' - `testthat.summary.omit_dots`: Omit progress dots in the summary reporter #' (default: `FALSE`). #' #' @import rlang #' @keywords internal #' @useDynLib testthat, .registration = TRUE #' @references Wickham, H (2011). testthat: Get Started with Testing. #' \strong{The R Journal} \emph{3/1} 5-10. #' \url{https://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf} #' #' \url{http://adv-r.had.co.nz/Testing.html} #' #' @examples #' library(testthat) #' a <- 9 #' expect_that(a, is_less_than(10)) #' expect_lt(a, 10) "_PACKAGE" testthat/R/test-files.R0000644000176200001440000001147413564563701014536 0ustar liggesusers#' Generate default testing environment. #' #' We use a new environment which inherits from [globalenv()]. #' In an ideal world, we'd avoid putting the global environment on the #' search path for tests, but it's not currently possible without losing #' the ability to load packages in tests. #' #' @keywords internal #' @export test_env <- function() { new.env(parent = globalenv()) } test_files <- function(paths, reporter = default_reporter(), env = test_env(), stop_on_failure = FALSE, stop_on_warning = FALSE, wrap = TRUE) { if (length(paths) == 0) { stop("No matching test file in dir") } current_reporter <- find_reporter(reporter) with_reporter( reporter = current_reporter, results <- lapply( paths, test_file, env = env, reporter = current_reporter, start_end_reporter = FALSE, load_helpers = FALSE, wrap = wrap ) ) results <- unlist(results, recursive = FALSE) results <- testthat_results(results) if (stop_on_failure && !all_passed(results)) { stop("Test failures", call. = FALSE) } if (stop_on_warning && any_warnings(results)) { stop("Tests generated warnings", call. = FALSE) } invisible(results) } # Filter File List for Tests, used by find_test_scripts filter_test_scripts <- function(files, filter = NULL, invert = FALSE, ...) { if (!is.null(filter)) { test_names <- basename(files) test_names <- sub("^test-?", "", test_names) test_names <- sub("\\.[rR]$", "", test_names) which_files <- grepl(filter, test_names, ...) if (isTRUE(invert)) { which_files <- !which_files } files <- files[which_files] } files } #' Find the test files. #' @param path path to tests #' @param filter cf [test_dir()] #' @param invert If \sQuote{TRUE} return files which do \emph{not} match. #' @param ... Additional arguments passed to [grepl()] to control filtering. #' @return the test file paths #' @keywords internal #' @export find_test_scripts <- function(path, filter = NULL, invert = FALSE, ...) { files <- dir(path, "^test.*\\.[rR]$", full.names = TRUE) filter_test_scripts(files, filter, invert, ...) } #' Run all tests in specified file #' #' Execute code in the specified file, displaying results using a `reporter`. #' Use this function when you want to run a single file's worth of tests. #' You are responsible for ensuring that the functions to test are available #' in the global environment. #' #' Any errors that occur in code run outside of `test_that()` will generate #' a test failure and terminate execution of that test file. #' #' @param path Path to file. #' @param env Environment in which to execute the tests. Expert use only. #' @param load_helpers Source helper files before running the tests? #' See [source_test_helpers()] for more details. #' @param encoding Deprecated. All files now assumed to be UTF-8. #' @inheritParams with_reporter #' @inheritParams source_file #' @return Invisibily, a list with one element for each test. #' @export #' @examples #' path <- testthat_example("success") #' test_file(path, reporter = "minimal") #' #' # test_file() invisibly returns a list, with one element for each test. #' # This can be useful if you want to compute on your test results. #' out <- test_file(path, reporter = "minimal") #' str(out[[1]]) test_file <- function(path, reporter = default_reporter(), env = test_env(), start_end_reporter = TRUE, load_helpers = TRUE, encoding = "unknown", wrap = TRUE) { library(testthat) if (!file.exists(path)) { stop("`path` does not exist", call. = FALSE) } if (!missing(encoding) && !identical(encoding, "UTF-8")) { warning("`encoding` is deprecated; all files now assumed to be UTF-8", call. = FALSE) } reporter <- find_reporter(reporter) if (!is.null(reporter) && reporter$is_full()) return() if (load_helpers) { source_test_helpers(dirname(path), env = env) } lister <- ListReporter$new() if (!is.null(reporter)) { reporter <- MultiReporter$new(reporters = list(reporter, lister)) } else { reporter <- lister } on.exit(teardown_run(dirname(path)), add = TRUE) with_reporter( reporter = reporter, start_end_reporter = start_end_reporter, { # We need to notify the lister separately from the reporter, which is why # we call start_file methods twice. reporter$start_file(basename(path)) lister$start_file(basename(path)) source_file( path, new.env(parent = env), chdir = TRUE, wrap = wrap ) reporter$.end_context() # only ends if context was started reporter$end_file() } ) invisible(lister$get_results()) } testthat/R/expect-null.R0000644000176200001440000000111513456034771014706 0ustar liggesusers#' Expectation: is an object `NULL`? #' #' This is a special case because `NULL` is a singleton so it's possible #' check for it either with `expect_equal(x, NULL)` or `expect_type(x, "NULL")`. #' #' @inheritParams expect_that #' @export #' @family expectations #' @examples #' x <- NULL #' y <- 10 #' #' expect_null(x) #' show_failure(expect_null(y)) expect_null <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") expect( is.null(act$val), sprintf("%s is not null.", act$lab), info = info ) invisible(act$val) } testthat/R/reporter-minimal.R0000644000176200001440000000120713176113117015725 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: minimal. #' #' The minimal test reporter provides the absolutely minimum amount of #' information: whether each expectation has succeeded, failed or experienced #' an error. If you want to find out what the failures and errors actually #' were, you'll need to run a more informative test reporter. #' #' @export #' @family reporters MinimalReporter <- R6::R6Class("MinimalReporter", inherit = Reporter, public = list( add_result = function(context, test, result) { self$cat_tight(single_letter_summary(result)) }, end_reporter = function() { self$cat_line() } ) ) testthat/R/expect-setequal.R0000644000176200001440000000632113521025554015553 0ustar liggesusers#' Expectation: do two vectors contain the same values? #' #' * `expect_setequal(x, y)` tests that every element of `x` occurs in `y`, #' and that every element of `y` occurs in `x`. #' * `expect_mapequal(x, y)` tests that `x` and `y` have the same names, and #' that `x[names(y)]` equals `x`. #' #' Note that `expect_setequal()` ignores names, and you will be warned if both #' `object` and `expected` have them. #' #' @inheritParams expect_equal #' @export #' @examples #' expect_setequal(letters, rev(letters)) #' show_failure(expect_setequal(letters[-1], rev(letters))) #' #' x <- list(b = 2, a = 1) #' expect_mapequal(x, list(a = 1, b = 2)) #' show_failure(expect_mapequal(x, list(a = 1))) #' show_failure(expect_mapequal(x, list(a = 1, b = "x"))) #' show_failure(expect_mapequal(x, list(a = 1, b = 2, c = 3))) expect_setequal <- function(object, expected) { act <- quasi_label(enquo(object), arg = "object") exp <- quasi_label(enquo(expected), arg = "expected") if (!is_vector(act$val) || !is_vector(exp$val)) { abort("`object` and `expected` must both be vectors") } if (!is.null(names(act$val)) && !is.null(names(exp$val))) { warn("expect_setequal() ignores names") } act_miss <- !act$val %in% exp$val if (any(act_miss)) { fail( paste0(act$lab, "[", locations(act_miss), "] absent from ", exp$lab) ) } exp_miss <- !exp$val %in% act$val if (any(exp_miss)) { fail( paste0(exp$lab, "[", locations(exp_miss), "] absent from ", act$lab) ) } if (!any(exp_miss) && !any(act_miss)) { succeed() } invisible(act$val) } is_vector <- function(x) is.list(x) || (is.atomic(x) && !is.null(x)) locations <- function(i) { loc <- which(i) if (length(loc) == 1) { return(loc) } if (length(loc) > 10) { loc <- c(loc[1:9], "...") } paste0("c(", paste0(loc, collapse = ", "), ")") } #' @export #' @rdname expect_setequal expect_mapequal <- function(object, expected) { act <- quasi_label(enquo(object), arg = "object") exp <- quasi_label(enquo(expected), arg = "expected") if (!is_vector(act$val) || !is_vector(exp$val)) { abort("`object` and `expected` must both be vectors") } # Length-0 vectors are OK whether named or unnamed. if (length(act$val) == 0 && length(exp$val) == 0) { warn("`object` and `expected` are empty lists") succeed() return(invisible(act$val)) } act_nms <- names(act$val) exp_nms <- names(exp$val) check_names_ok(act_nms, "object") check_names_ok(exp_nms, "expected") if (!setequal(act_nms, exp_nms)) { act_miss <- setdiff(exp_nms, act_nms) if (length(act_miss) > 0) { vals <- paste0(encodeString(act_miss, quote = '"'), ", ") fail(paste0("Names absent from `object`: ", vals)) } exp_miss <- setdiff(act_nms, exp_nms) if (length(exp_miss) > 0) { vals <- paste0(encodeString(exp_miss, quote = '"'), ", ") fail(paste0("Names absent from `expected`: ", vals)) } } else { expect_equal(act$val[exp_nms], exp$val) } invisible(act$val) } check_names_ok <- function(x, label) { if (anyDuplicated(x)) { stop("Duplicate names in `", label, "`: ", unique(x[duplicated(x)])) } if (any(x == "")) { stop("All elements in `", label, "` must be named") } } testthat/R/expect-output.R0000644000176200001440000000341613564563701015302 0ustar liggesusers#' Expectation: does code print output to the console? #' #' Test for output produced by `print()` or `cat()`. This is best used for #' very simple output; for more complex cases use [verify_output()]. #' #' @export #' @family expectations #' @inheritParams expect_that #' @param regexp Regular expression to test against. #' * A character vector giving a regular expression that must match the output. #' * If `NULL`, the default, asserts that there should output, #' but doesn't check for a specific value. #' * If `NA`, asserts that there should be no output. #' @inheritDotParams expect_match -object -regexp -info -label #' @inheritParams capture_output #' @return The first argument, invisibly. #' @examples #' str(mtcars) #' expect_output(str(mtcars), "32 obs") #' expect_output(str(mtcars), "11 variables") #' #' # You can use the arguments of grepl to control the matching #' expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) #' expect_output(str(mtcars), "$ mpg", fixed = TRUE) expect_output <- function(object, regexp = NULL, ..., info = NULL, label = NULL, width = 80 ) { act <- quasi_capture(enquo(object), label, capture_output, width = width) if (identical(regexp, NA)) { expect( identical(act$cap, ""), sprintf("%s produced output.\n%s", act$lab, encodeString(act$cap)), info = info ) } else if (is.null(regexp) || identical(act$cap, "")) { expect( !identical(act$cap, ""), sprintf("%s produced no output", act$lab), info = info ) } else { expect_match(act$cap, enc2native(regexp), ..., info = info, label = act$lab) } invisible(act$val) } testthat/R/expect-length.R0000644000176200001440000000104413456034771015216 0ustar liggesusers#' Expectation: does a vector have the specified length? #' #' @inheritParams expect_that #' @param n Expected length. #' @family expectations #' @export #' @examples #' expect_length(1, 1) #' expect_length(1:10, 10) #' #' \dontrun{ #' expect_length(1:10, 1) #' } expect_length <- function(object, n) { stopifnot(is.numeric(n), length(n) == 1) act <- quasi_label(enquo(object), arg = "object") act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) invisible(act$val) } testthat/R/test-directory.R0000644000176200001440000001616213564563701015437 0ustar liggesusers#' Run all tests in directory or package #' #' @description #' Use `test_dir()` for a collection of tests in a directory; use #' `test_package()` interactively at the console, and `test_check()` #' inside of `R CMD check`. #' #' In your own code, you can use `is_testing()` to determine if code is being #' run as part of a test and `testing_package()` to retrieve the name of the #' package being tested. You can also check the underlying env var directly #' `identical(Sys.getenv("TESTTHAT"), "true")` to avoid creating a run-time #' dependency on testthat. #' #' @section Test files: #' For package code, tests should live in `tests/testthat`. #' #' There are four classes of `.R` files that have special behaviour: #' #' * Test files start with `test` and are executed in alphabetical order. #' * Helper files start with `helper` and are executed before tests are #' run and from `devtools::load_all()`. #' * Setup files start with `setup` and are executed before tests, but not #' during `devtools::load_all()`. #' * Teardown files start with `teardown` and are executed after the tests #' are run. #' #' @section Environments: #' Each test is run in a clean environment to keep tests as isolated as #' possible. For package tests, that environment that inherits from the #' package's namespace environment, so that tests can access internal functions #' and objects. #' #' @section `R CMD check`: #' To run testthat automatically from `R CMD check`, make sure you have #' a `tests/testthat.R` that contains: #' #' ``` #' library(testthat) #' library(yourpackage) #' #' test_check("yourpackage") #' ``` #' #' @param path Path to directory containing tests. #' @param package Name of installed package. #' @param filter If not `NULL`, only tests with file names matching this #' regular expression will be executed. Matching be performed on the file #' name after it has been stripped of `"test-"` and `".R"`. #' @param ... Additional arguments passed to [grepl()] to control filtering. #' @param stop_on_failure If `TRUE`, throw an error if any tests fail. #' #' For historical reasons, the default value of `stop_on_failure` is `TRUE` #' for `test_package()` and `test_check()` but `FALSE` for `test_dir()`, so #' if you're calling `test_dir()` you may want to consider explicitly setting #' `stop_on_failure = TRUE`. #' @param stop_on_warning If `TRUE`, throw an error if any tests generate #' warnings. #' @inheritParams test_file #' @return A list of test results. #' @export #' @examples #' test_dir(testthat_examples(), reporter = "summary") #' test_dir(testthat_examples(), reporter = "minimal") test_dir <- function(path, filter = NULL, reporter = default_reporter(), env = test_env(), ..., encoding = "unknown", load_helpers = TRUE, stop_on_failure = FALSE, stop_on_warning = FALSE, wrap = TRUE) { if (!missing(encoding) && !identical(encoding, "UTF-8")) { warning("`encoding` is deprecated; all files now assumed to be UTF-8", call. = FALSE) } # Find package root, if any, so backtrace srcrefs refer to R/ and # tests/ files consistently testthat_dir <- maybe_root_dir(path) withr::local_envvar(list( R_TESTS = "", TESTTHAT = "true", TESTTHAT_DIR = testthat_dir )) if (load_helpers) { source_test_helpers(path, env) } source_test_setup(path, env) on.exit(source_test_teardown(path, env), add = TRUE) # Promote retirement stages except on CRAN if (identical(Sys.getenv("NOT_CRAN"), "true")) { withr::local_options(list(lifecycle_verbose_retirement = TRUE)) } paths <- find_test_scripts(path, filter, ...) test_files( paths, reporter = reporter, env = env, stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, wrap = wrap ) } #' @export #' @rdname test_dir test_package <- function(package, filter = NULL, reporter = check_reporter(), ..., stop_on_failure = TRUE, stop_on_warning = FALSE) { library(testthat) # Ensure that test package returns silently if called recursively - this # will occur if test-all.R ends up in the same directory as all the other # tests. if (env_test$in_test) { return(invisible()) } env_test$in_test <- TRUE env_test$package <- package on.exit({ env_test$in_test <- FALSE env_test$package <- NULL }) test_path <- system.file("tests", package = package) if (test_path == "") { stop("No tests found for ", package, call. = FALSE) } # If testthat subdir exists, use that test_path2 <- file.path(test_path, "testthat") if (file.exists(test_path2)) { test_path <- test_path2 } else { warning( "Placing tests in `inst/tests` is deprecated. ", "Please use `tests/testthat` instead", call. = FALSE ) } test_package_dir( package = package, test_path = test_path, filter = filter, reporter = reporter, ..., stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning ) } #' @export #' @rdname test_dir test_check <- function(package, filter = NULL, reporter = check_reporter(), ..., stop_on_failure = TRUE, stop_on_warning = FALSE, wrap = TRUE) { library(testthat) require(package, character.only = TRUE) env_test$in_test <- TRUE env_test$package <- package on.exit({ env_test$in_test <- FALSE env_test$package <- NULL }) test_path <- "testthat" if (!utils::file_test("-d", test_path)) { stop("No tests found for ", package, call. = FALSE) } test_package_dir( package = package, test_path = test_path, filter = filter, reporter = reporter, ..., stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, wrap = wrap ) } test_package_dir <- function(package, test_path, filter, reporter, ..., stop_on_failure = TRUE, stop_on_warning = FALSE, wrap = TRUE) { env <- test_pkg_env(package) withr::local_options(list(topLevelEnvironment = env)) withr::local_envvar(list( TESTTHAT_PKG = package, TESTTHAT_DIR = maybe_root_dir(test_path) )) test_dir( path = test_path, reporter = reporter, env = env, filter = filter, ..., stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, wrap = wrap ) } #' @export #' @rdname test_dir is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } #' @export #' @rdname test_dir testing_package <- function() { Sys.getenv("TESTTHAT_PKG") } # Environment utils ------------------------------------------------------- env_test <- new.env(parent = emptyenv()) env_test$in_test <- FALSE env_test$package <- NULL test_pkg_env <- function(package) { list2env( as.list(getNamespace(package), all.names = TRUE), parent = parent.env(getNamespace(package)) ) } testthat/R/reporter-progress.R0000644000176200001440000001535413547623147016166 0ustar liggesusers#' @include reporter.R stack.R NULL #' Test reporter: interactive progress bar of errors. #' #' This reporter is a reimagining of [SummaryReporter] desgined to make the #' most information available up front, while taking up less space overall. It #' is the default reporting reporter used by [test_dir()] and [test_file()]. #' #' As an additional benefit, this reporter will praise you from time-to-time #' if all your tests pass. #' #' @export #' @family reporters ProgressReporter <- R6::R6Class("ProgressReporter", inherit = Reporter, public = list( show_praise = TRUE, min_time = 0.1, start_time = NULL, last_update = NULL, update_interval = NULL, max_fail = NULL, n_ok = 0, n_skip = 0, n_warn = 0, n_fail = 0, ctxt_start_time = NULL, ctxt_issues = NULL, ctxt_n = 0, ctxt_n_ok = 0, ctxt_n_skip = 0, ctxt_n_warn = 0, ctxt_n_fail = 0, ctxt_name = "", file_name = "", initialize = function(show_praise = TRUE, max_failures = getOption("testthat.progress.max_fails", 10L), min_time = 0.1, update_interval = 0.1, ...) { super$initialize(...) self$max_fail <- max_failures self$show_praise <- show_praise self$min_time <- min_time self$update_interval <- update_interval }, is_full = function() { self$n_fail >= self$max_fail }, start_reporter = function(context) { self$start_time <- proc.time() self$show_header() }, start_file = function(file) { self$file_name <- file # Need to set here in case file doesn't contain any tests self$ctxt_issues <- Stack$new() self$ctxt_start_time <- proc.time() }, start_test = function(context, test) { if (is.null(context)) { # We call the regular context() function rather than # self$start_context() or self$.start_context() here because when this # is called by a reporter inside the multi-reporter we need to assign # self$.context in the multi-reporter rather than in the contained # reporter. context(context_name(self$file_name)) } }, start_context = function(context) { self$ctxt_name <- context self$ctxt_issues <- Stack$new() self$ctxt_n <- 0L self$ctxt_n_ok <- 0L self$ctxt_n_fail <- 0L self$ctxt_n_warn <- 0L self$ctxt_n_skip <- 0L self$ctxt_start_time <- proc.time() self$show_status() }, show_header = function() { self$cat_line( cli::symbol$tick, " | OK ", colourise("F", "failure"), " ", colourise("W", "warning"), " ", colourise("S", "skip"), " | ", "Context" ) }, show_status = function(complete = FALSE) { if (complete) { if (self$ctxt_n_fail > 0) { status <- crayon::red(cli::symbol$cross) } else { status <- crayon::green(cli::symbol$tick) } } else { # Do not print if not enough time has passed since we last printed. if (!self$should_update()) { return() } status <- spinner(self$ctxt_n) } col_format <- function(n, type) { if (n == 0) { " " } else { n } } self$cat_tight( "\r", status, " | ", sprintf("%3d", self$ctxt_n_ok), " ", col_format(self$ctxt_n_fail, "fail"), " ", col_format(self$ctxt_n_warn, "warn"), " ", col_format(self$ctxt_n_skip, "skip"), " | ", self$ctxt_name ) }, end_context = function(context) { time <- proc.time() - self$ctxt_start_time self$last_update <- NULL self$show_status(complete = TRUE) if (time[[3]] > self$min_time) { self$cat_line(sprintf(" [%.1f s]", time[[3]]), col = "cyan") } else { self$cat_line() } if (self$ctxt_issues$size() > 0) { self$rule() issues <- self$ctxt_issues$as_list() summary <- vapply(issues, issue_summary, FUN.VALUE = character(1)) self$cat_tight(paste(summary, collapse = "\n\n")) self$cat_line() self$rule() } }, add_result = function(context, test, result) { self$ctxt_n <- self$ctxt_n + 1L if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 self$ctxt_n_fail <- self$ctxt_n_fail + 1 self$ctxt_issues$push(result) } else if (expectation_skip(result)) { self$n_skip <- self$n_skip + 1 self$ctxt_n_skip <- self$ctxt_n_skip + 1 self$ctxt_issues$push(result) } else if (expectation_warning(result)) { self$n_warn <- self$n_warn + 1 self$ctxt_n_warn <- self$ctxt_n_warn + 1 self$ctxt_issues$push(result) } else { self$n_ok <- self$n_ok + 1 self$ctxt_n_ok <- self$ctxt_n_ok + 1 } self$show_status() }, end_reporter = function() { self$cat_line() if (self$is_full()) { self$rule("Terminating early", line = 2) self$cat_line("Too many failures") return() } colour_if <- function(n, type) { colourise(n, if (n == 0) "success" else type) } self$rule(crayon::bold("Results"), line = 2) time <- proc.time() - self$start_time if (time[[3]] > self$min_time) { self$cat_line("Duration: ", sprintf("%.1f s", time[[3]]), col = "cyan") self$cat_line() } self$cat_line("OK: ", colourise(self$n_ok, "success")) self$cat_line("Failed: ", colour_if(self$n_fail, "fail")) self$cat_line("Warnings: ", colour_if(self$n_warn, "warn")) self$cat_line("Skipped: ", colour_if(self$n_skip, "skip")) if (!self$show_praise || runif(1) > 0.1) { return() } self$cat_line() if (self$n_fail == 0) { self$cat_line(colourise(praise(), "success")) } else { self$cat_line(colourise(encourage(), "error")) } }, should_update = function() { if (self$update_interval == 0) { return(TRUE) } time <- proc.time()[[3]] if (!is.null(self$last_update) && (time - self$last_update) < self$update_interval) { return(FALSE) } self$last_update <- time TRUE } ) ) spinner <- function(i) { frames <- cli::get_spinner()$frames frames[((i - 1) %% length(frames)) + 1] } issue_summary <- function(x) { type <- expectation_type(x) if (is.null(x$srcref)) { loc <- "???" } else { filename <- attr(x$srcref, "srcfile")$filename loc <- paste0(basename(filename), ":", x$srcref[1]) } header <- paste0(loc, ": ", colourise(type, type), ": ", x$test) paste0( crayon::bold(header), "\n", format(x) ) } testthat/R/reporter-summary.R0000644000176200001440000000762613522505175016013 0ustar liggesusers#' @include reporter.R stack.R NULL #' Test reporter: summary of errors. #' #' This is a reporter designed for interactive usage: it lets you know which #' tests have run successfully and as well as fully reporting information about #' failures and errors. #' #' You can use the `max_reports` field to control the maximum number #' of detailed reports produced by this reporter. This is useful when running #' with [auto_test()] #' #' As an additional benefit, this reporter will praise you from time-to-time #' if all your tests pass. #' #' @export #' @family reporters SummaryReporter <- R6::R6Class("SummaryReporter", inherit = Reporter, public = list( failures = NULL, skips = NULL, warnings = NULL, max_reports = NULL, show_praise = TRUE, omit_dots = FALSE, initialize = function(show_praise = TRUE, omit_dots = getOption("testthat.summary.omit_dots"), max_reports = getOption("testthat.summary.max_reports", 10L), ...) { super$initialize(...) self$failures <- Stack$new() self$skips <- Stack$new() self$warnings <- Stack$new() self$max_reports <- max_reports self$show_praise <- show_praise self$omit_dots <- omit_dots }, is_full = function() { self$failures$size() >= self$max_reports }, start_context = function(context) { self$cat_tight(context, ": ") }, end_context = function(context) { self$cat_line() }, add_result = function(context, test, result) { if (expectation_broken(result)) { self$failures$push(result) } else if (expectation_skip(result)) { self$skips$push(result) } else if (expectation_warning(result)) { self$warnings$push(result) } else { if (isTRUE(self$omit_dots)) { return() } } self$cat_tight(private$get_summary(result)) }, end_reporter = function() { skips <- self$skips$as_list() failures <- self$failures$as_list() warnings <- self$warnings$as_list() self$cat_line() private$cat_reports("Skipped", skips, Inf, skip_summary) private$cat_reports("Warnings", warnings, Inf, skip_summary) private$cat_reports("Failed", failures, self$max_reports, failure_summary) if (self$failures$size() >= self$max_reports) { self$cat_line( "Maximum number of ", self$max_reports, " failures reached, ", "some test results may be missing." ) self$cat_line() } self$rule("DONE", line = 2) if (self$show_praise) { if (length(failures) == 0 && runif(1) < 0.1) { self$cat_line(colourise(praise(), "success")) } if (length(failures) > 0 && runif(1) < 0.25) { self$cat_line(colourise(encourage(), "error")) } } } ), private = list( get_summary = function(result) { if (expectation_broken(result)) { if (self$failures$size() <= length(labels)) { return(colourise(labels[self$failures$size()], "error")) } } single_letter_summary(result) }, cat_reports = function(header, expectations, max_n, summary_fun, collapse = "\n\n") { n <- length(expectations) if (n == 0L) { return() } self$rule(header, line = 2) if (n > max_n) { expectations <- expectations[seq_len(max_n)] } labels <- seq_along(expectations) exp_summary <- function(i) { summary_fun(expectations[[i]], labels[i]) } report_summary <- vapply(seq_along(expectations), exp_summary, character(1)) self$cat_tight(paste(report_summary, collapse = collapse)) if (n > max_n) { self$cat_line() self$cat_line(" ... and ", n - max_n, " more") } self$cat_line() self$cat_line() } ) ) labels <- c(1:9, letters, LETTERS) testthat/R/expect-condition.R0000644000176200001440000001760313564563701015733 0ustar liggesusers#' Expectation: does code throw error or other condition? #' #' `expect_error()` and `expect_condition()` check that code throws an error #' or condition with a message that matches `regexp`, or a class that inherits #' from `class`. See below for more details. #' #' @section Testing `message` vs `class`: #' When checking that code generates an error, it's important to check that the #' error is the one you expect. There are two ways to do this. The first #' way is the simplest: you just provide a `regexp` that match some fragment #' of the error message. This is easy, but fragile, because the test will #' fail if the error message changes (even if its the same error). #' #' A more robust way is to test for the class of the error, if it has one. #' You can learn more about custom conditions at #' , but in #' short, errors are S3 classes and you can generate a custom class and check #' for it using `class` instead of `regexp`. Because this is a more reliable #' check, you `expect_error()` will warn if the error has a custom class but #' you are testing the message. Eliminate the warning by using `class` instead #' of `regexp`. Alternatively, if you think the warning is a false positive, #' use `class = "error"` to suppress it for any input. #' #' If you are using `expect_error()` to check that an error message is #' formatted in such a way that it makes sense to a human, we now recommend #' using [verify_output()] instead. #' #' @export #' @family expectations #' @inheritParams expect_that #' @param regexp Regular expression to test against. #' * A character vector giving a regular expression that must match the #' error message. #' * If `NULL`, the default, asserts that there should be a error, #' but doesn't test for a specific value. #' * If `NA`, asserts that there should be no errors. #' @inheritDotParams expect_match -object -regexp -info -label #' @param class Instead of supplying a regular expression, you can also supply #' a class name. This is useful for "classed" conditions. #' @return If `regexp = NA`, the value of the first argument; otherwise #' the captured condition. #' @examples #' f <- function() stop("My error!") #' expect_error(f()) #' expect_error(f(), "My error!") #' #' # You can use the arguments of grepl to control the matching #' expect_error(f(), "my error!", ignore.case = TRUE) #' #' # If you are working with classed conditions, it's better to test for #' # the class name, rather than the error message (which may change over time) #' custom_err <- function(var) { #' rlang::abort("A special error", var = var, .subclass = "testthat_special") #' } #' expect_error(custom_err("a"), class = "testthat_special") #' #' # Note that `expect_error()` returns the error object so you can test #' # its components if needed #' err <- expect_error(custom_err("a"), class = "testthat_special") #' expect_equal(err$var, "a") expect_error <- function(object, regexp = NULL, class = NULL, ..., info = NULL, label = NULL ) { act <- quasi_capture(enquo(object), label, capture_error, entrace = TRUE) msg <- compare_condition(act$cap, act$lab, regexp = regexp, class = class, ...) # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields expect(is.null(msg), msg, info = info, trace = act$cap[["trace"]]) if (!is.null(act$cap)) { if (is_informative_error(act$cap) && is.null(class) && !is.null(regexp)) { klass <- paste0(encodeString(class(act$cap), quote = '"'), collapse = ", ") warn(paste0( act$lab, " generated an S3 error and you are testing the error message.\n", "* The error has class = c(", klass, ")\n", "* Testing with `class` is more robust than testing with `regexp`.\n", "* Do you want `expect_error(..., class = \"", class(act$cap)[[1]], "\")`?" )) } } invisible(act$val %||% act$cap) } #' @export #' @rdname expect_error expect_condition <- function(object, regexp = NULL, class = NULL, ..., info = NULL, label = NULL ) { act <- quasi_capture(enquo(object), label, capture_condition, entrace = TRUE) msg <- compare_condition( act$cap, act$lab, regexp = regexp, class = class, ..., cond_type = "condition" ) expect(is.null(msg), msg, info = info, trace = act$cap[["trace"]]) invisible(act$val %||% act$cap) } # Helpers ----------------------------------------------------------------- compare_condition <- function(cond, lab, regexp = NULL, class = NULL, ..., cond_type = "error") { # Expecting no condition if (identical(regexp, NA)) { if (!is.null(cond)) { return(sprintf( "%s threw an %s.\nMessage: %s\nClass: %s", lab, cond_type, cnd_message(cond), paste(class(cond), collapse = "/") )) } else { return() } } # Otherwise we're definitely expecting a condition if (is.null(cond)) { return(sprintf("%s did not throw an %s.", lab, cond_type)) } message <- cnd_message(cond) ok_class <- is.null(class) || inherits(cond, class) ok_msg <- is.null(regexp) || grepl(regexp, message, ...) # All good if (ok_msg && ok_class) { return() } problems <- c(if (!ok_class) "class", if (!ok_msg) "message") details <- c( if (!ok_class) { sprintf( "Expected class: %s\nActual class: %s\nMessage: %s", paste0(class, collapse = "/"), paste0(class(cond), collapse = "/"), message ) }, if (!ok_msg) { sprintf( "Expected match: %s\nActual message: %s", encodeString(regexp, quote = '"'), encodeString(message, quote = '"') ) } ) sprintf( "%s threw an %s with unexpected %s.\n%s", lab, cond_type, paste(problems, collapse = " and "), paste(details, collapse = "\n") ) } # Disable rlang backtrace reminders so they don't interfere with # expected error messages cnd_message <- function(x) { withr::local_options(c(rlang_backtrace_on_error = "none")) conditionMessage(x) } #' Is an error informative? #' #' @description #' #' `is_informative_error()` is a generic predicate that indicates #' whether testthat users should explicitly test for an error #' class. When it returns `TRUE` (the default), and `expect_error()` #' does not check for the class, a warning is issued during tests. #' You can silence the warning by implementing `is_informative_error()`. #' #' The main use case for overriding this method is to introduce an #' experimental error class when you need more experience while #' developing an error hierarchy for your package. Override #' `is_informative_error()` to return `FALSE` to avoid encouraging #' users to depend on the experimental class in their tests. #' #' Since testthat should be a `Suggest` dependency, methods for #' `is_informative_error()` should typically be lazily registered, #' e.g. with `vctrs::s3_register()`. #' #' @param x An error object. #' @inheritParams ellipsis::dots_empty #' #' @details #' A few classes are hard-coded as uninformative: #' - `simpleError` #' - `rlang_error` unless a subclass is detected #' - `Rcpp::eval_error` #' - `Rcpp::exception` #' #' @keywords internal #' @export is_informative_error <- function(x, ...) { ellipsis::check_dots_empty() if (!inherits(x, "error")) { return(TRUE) } if (inherits(x, c("simpleError", "Rcpp::eval_error", "Rcpp::exception"))) { return(FALSE) } if (inherits_only(x, c("rlang_error", "error", "condition"))) { return(FALSE) } UseMethod("is_informative_error") } #' @export is_informative_error.default <- function(x, ...) { TRUE } testthat/R/reporter-list.R0000644000176200001440000001247713564563701015276 0ustar liggesusers#' @include reporter.R NULL methods::setOldClass("proc_time") #' List reporter: gather all test results along with elapsed time and #' file information. #' #' This reporter gathers all results, adding additional information such as #' test elapsed time, and test filename if available. Very useful for reporting. #' #' @export #' @family reporters ListReporter <- R6::R6Class("ListReporter", inherit = Reporter, public = list( current_start_time = NA, current_expectations = NULL, current_file = NULL, results = NULL, initialize = function() { super$initialize() self$results <- Stack$new() }, start_test = function(context, test) { self$current_expectations <- Stack$new() self$current_start_time <- proc.time() }, add_result = function(context, test, result) { if (is.null(self$current_expectations)) { # we received a result outside of a test: # could be a bare expectation or an exception/error if (!inherits(result, 'error')) { return() } self$current_expectations <- Stack$new() } self$current_expectations$push(result) }, end_test = function(context, test) { elapsed <- as.double(proc.time() - self$current_start_time) results <- list() if (!is.null(self$current_expectations)) results <- self$current_expectations$as_list() self$results$push(list( file = self$current_file %||% NA_character_, context = context, test = test, user = elapsed[1], system = elapsed[2], real = elapsed[3], results = results )) self$current_expectations <- NULL }, start_file = function(name) { self$current_file <- name }, end_file = function() { # fallback in case we have errors but no expectations self$end_context(self$current_file) }, end_context = function(context) { results <- self$current_expectations if (is.null(results)) { return() } self$current_expectations <- NULL # look for exceptions raised outside of tests # they happened just before end_context since they interrupt the test_file execution results <- results$as_list() if (length(results) == 0) return() self$results$push(list( file = self$current_file %||% NA_character_, context = context, test = NA_character_, user = NA_real_, system = NA_real_, real = NA_real_, results = results )) }, get_results = function() { testthat_results(self$results$as_list()) } ) ) #' Create a `testthat_results` object from the test results #' as stored in the ListReporter results field. #' #' @param results a list as stored in ListReporter #' @return its list argument as a `testthat_results` object #' @seealso ListReporter #' @keywords internal testthat_results <- function(results) { stopifnot(is.list(results)) structure(results, class = "testthat_results") } # return if all tests are successful w/o error all_passed <- function(res) { if (length(res) == 0) { return(TRUE) } df <- as.data.frame.testthat_results(res) sum(df$failed) == 0 && all(!df$error) } any_warnings <- function(res) { if (length(res) == 0) { return(FALSE) } df <- as.data.frame.testthat_results(res) any(df$warning > 0) } #' @export as.data.frame.testthat_results <- function(x, ...) { if (length(x) == 0) { return( data.frame( file = character(0), context = character(0), test = character(0), nb = integer(0), failed = integer(0), skipped = logical(0), error = logical(0), warning = integer(0), user = numeric(0), system = numeric(0), real = numeric(0), passed = integer(0), result = list(), stringsAsFactors = FALSE ) ) } rows <- lapply(x, summarize_one_test_results) do.call(rbind, rows) } summarize_one_test_results <- function(test) { test_results <- test$results nb_tests <- length(test_results) nb_failed <- nb_skipped <- nb_warning <- nb_passed <- 0L error <- FALSE if (nb_tests > 0) { # error reports should be handled differently. # They may not correspond to an expect_that() test so remove them last_test <- test_results[[nb_tests]] error <- expectation_error(last_test) if (error) { test_results <- test_results[-nb_tests] nb_tests <- length(test_results) } nb_passed <- sum(vapply(test_results, expectation_success, logical(1))) nb_skipped <- sum(vapply(test_results, expectation_skip, logical(1))) nb_failed <- sum(vapply(test_results, expectation_failure, logical(1))) nb_warning <- sum(vapply(test_results, expectation_warning, logical(1))) } context <- if (length(test$context) > 0) test$context else "" res <- data.frame( file = test$file, context = context, test = test$test, nb = nb_tests, failed = nb_failed, skipped = as.logical(nb_skipped), error = error, warning = nb_warning, user = test$user, system = test$system, real = test$real, stringsAsFactors = FALSE ) # Added at end for backward compatibility res$passed <- nb_passed # Cannot easily add list columns in data.frame() res$result <- list(test_results) res } #' @export print.testthat_results <- function(x, ...) { print(as.data.frame(x)) } testthat/R/compare.R0000644000176200001440000000450213564563701014077 0ustar liggesusers#' Provide human-readable comparison of two objects #' #' `compare` is similar to [base::all.equal()], but shows #' you examples of where the failures occured. #' #' @export #' @param x,y Objects to compare #' @param ... Additional arguments used to control specifics of comparison #' @keywords internal compare <- function(x, y, ...) { UseMethod("compare", x) } comparison <- function(equal = TRUE, message = "Equal") { stopifnot(is.logical(equal), length(equal) == 1) stopifnot(is.character(message)) structure( list( equal = equal, message = paste(message, collapse = "\n") ), class = "comparison" ) } difference <- function(..., fmt = "%s") { comparison(FALSE, sprintf(fmt, ...)) } no_difference <- function() { comparison() } #' @export print.comparison <- function(x, ...) { if (x$equal) { cat("Equal\n") return() } cat(x$message) } #' @export #' @rdname compare compare.default <- function(x, y, ..., max_diffs = 9) { same <- all.equal(x, y, ...) if (length(same) > max_diffs) { same <- c(same[1:max_diffs], "...") } comparison(identical(same, TRUE), as.character(same)) } print_out <- function(x, ...) { lines <- capture_output_lines(x, ..., print = TRUE) paste0(lines, collapse = "\n") } # Common helpers --------------------------------------------------------------- same_length <- function(x, y) length(x) == length(y) diff_length <- function(x, y) difference(fmt = "Lengths differ: %i is not %i", length(x), length(y)) same_type <- function(x, y) identical(typeof(x), typeof(y)) diff_type <- function(x, y) difference(fmt = "Types not compatible: %s is not %s", typeof(x), typeof(y)) same_class <- function(x, y) { if (!is.object(x) && !is.object(y)) { return(TRUE) } identical(class(x), class(y)) } diff_class <- function(x, y) { difference(fmt = "Classes differ: %s is not %s", klass(x), klass(y)) } same_attr <- function(x, y) { is.null(attr.all.equal(x, y)) } diff_attr <- function(x, y) { withr::local_options(list(useFancyQuotes = FALSE)) out <- attr.all.equal(x, y) difference(out) } vector_equal <- function(x, y) { (is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y) & x == y) } vector_equal_tol <- function(x, y, tolerance = .Machine$double.eps ^ 0.5) { (is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y)) & (x == y | abs(x - y) < tolerance) } testthat/R/stack.R0000644000176200001440000000264213176113117013550 0ustar liggesusers# Source: https://github.com/rstudio/shiny/blob/master/R/stack.R # License: GPL-3 # Relicensed a MIT with permission. # A Stack object backed by a list. The backing list will grow or shrink as # the stack changes in size. Stack <- R6Class( "Stack", class = FALSE, public = list( initialize = function(init = 20L) { # init is the initial size of the list. It is also used as the minimum # size of the list as it shrinks. private$stack <- vector("list", init) private$init <- init private$count <- 0L }, push = function(..., .list = NULL) { args <- c(list(...), .list) new_size <- private$count + length(args) # Grow if needed; double in size while (new_size > length(private$stack)) { private$stack[length(private$stack) * 2L] <- list(NULL) } private$stack[private$count + seq_along(args)] <- args private$count <- new_size invisible(self) }, size = function() { private$count }, # Return the entire stack as a list, where the first item in the list is the # oldest item in the stack, and the last item is the most recently added. as_list = function() { private$stack[seq_len(private$count)] } ), private = list( stack = NULL, # A list that holds the items count = 0L, # Current number of items in the stack init = 20L # Initial and minimum size of the stack ) ) testthat/R/make-expectation.R0000644000176200001440000000137513176113117015703 0ustar liggesusers#' Make an equality test. #' #' This a convenience function to make a expectation that checks that #' input stays the same. #' #' @param x a vector of values #' @param expectation the type of equality you want to test for #' (`"equals"`, `"is_equivalent_to"`, `"is_identical_to"`) #' @export #' @keywords internal #' @examples #' x <- 1:10 #' make_expectation(x) #' #' make_expectation(mtcars$mpg) #' #' df <- data.frame(x = 2) #' make_expectation(df) make_expectation <- function(x, expectation = "equals") { obj <- substitute(x) expectation <- match.arg( expectation, c("equals", "is_equivalent_to", "is_identical_to") ) dput(substitute( expect_equal(obj, values), list(obj = obj, expectation = as.name(expectation), values = x) )) } testthat/R/skip.R0000644000176200001440000001354313564563701013424 0ustar liggesusers#' Skip a test. #' #' This function allows you to skip a test if it's not currently available. #' This will produce an informative message, but will not cause the test #' suite to fail. #' #' `skip*` functions are intended for use within [test_that()] #' blocks. All expectations following the \code{skip*} statement within the #' same `test_that` block will be skipped. Test summaries that report skip #' counts are reporting how many `test_that` blocks triggered a `skip*` #' statement, not how many expectations were skipped. #' #' @section Helpers: #' `skip_if_not()` works like [stopifnot()], generating #' a message automatically based on the first argument. #' #' `skip_if_offline()` skips tests if an internet connection is not available #' using [curl::nslookup()]. #' #' `skip_on_cran()` skips tests on CRAN, using the `NOT_CRAN` #' environment variable set by devtools. #' #' `skip_on_travis()` skips tests on Travis CI by inspecting the #' `TRAVIS` environment variable. #' #' `skip_on_appveyor()` skips tests on AppVeyor by inspecting the #' `APPVEYOR` environment variable. #' #' `skip_on_ci()` skips tests on continuous integration systems by inspecting #' the `CI` environment variable. #' #' `skip_on_covr()` skips tests when covr is running by inspecting the #' `R_COVR` environment variable #' #' `skip_on_bioc()` skips tests on Bioconductor by inspecting the #' `BBS_HOME` environment variable. #' #' `skip_if_not_installed()` skips a tests if a package is not installed #' or cannot be loaded (useful for suggested packages). It loads the package as #' a side effect, because the package is likely to be used anyway. #' #' @param message A message describing why the test was skipped. #' @param host A string with a hostname to lookup #' @export #' @examples #' if (FALSE) skip("No internet connection") #' #' ## The following are only meaningful when put in test files and #' ## run with `test_file`, `test_dir`, `test_check`, etc. #' #' test_that("skip example", { #' expect_equal(1, 1L) # this expectation runs #' skip('skip') #' expect_equal(1, 2) # this one skipped #' expect_equal(1, 3) # this one is also skipped #' }) skip <- function(message) { message <- paste0(message, collapse = "\n") cond <- structure( list(message = paste0("Reason: ", message)), class = c("skip", "condition") ) stop(cond) } # Called automatically if the test contains no expectations skip_empty <- function() { cond <- structure( list(message = "Reason: empty test"), class = c("skip_empty", "skip", "condition") ) stop(cond) } #' @export #' @rdname skip #' @param condition Boolean condition to check. `skip_if_not()` will skip if #' `FALSE`, `skip_if()` will skip if `TRUE`. skip_if_not <- function(condition, message = deparse(substitute(condition))) { message <- paste0(message, " is not TRUE") if (!isTRUE(condition)) { skip(message) } } #' @export #' @rdname skip skip_if <- function(condition, message = NULL) { if (is.null(message)) { message <- paste(deparse(substitute(condition)), " is TRUE") } if (isTRUE(condition)) { skip(message) } } #' @export #' @param pkg Name of package to check for #' @param minimum_version Minimum required version for the package #' @rdname skip skip_if_not_installed <- function(pkg, minimum_version = NULL) { if (!requireNamespace(pkg, quietly = TRUE)) { skip(paste0(pkg, " cannot be loaded")) } if (!is.null(minimum_version)) { installed_version <- utils::packageVersion(pkg) if (installed_version < minimum_version) { skip(paste0( "Installed ", pkg, " is version ", installed_version, "; ", "but ", minimum_version, " is required" )) } } return(invisible(TRUE)) } #' @export #' @rdname skip skip_if_offline <- function(host = "r-project.org") { skip_if_not_installed("curl") has_internet <- !is.null(curl::nslookup(host, error = FALSE)) if (!has_internet) { skip("offline") } } #' @export #' @rdname skip skip_on_cran <- function() { skip_if(on_cran(), "On CRAN") } on_cran <- function() !identical(Sys.getenv("NOT_CRAN"), "true") #' @export #' @param os Character vector of system names. Supported values are #' `"windows"`, `"mac"`, `"linux"` and `"solaris"`. #' @rdname skip skip_on_os <- function(os) { os <- match.arg( os, c("windows", "mac", "linux", "solaris"), several.ok = TRUE ) sysname <- tolower(Sys.info()[["sysname"]]) switch(sysname, windows = if ("windows" %in% os) skip("On windows"), darwin = if ("mac" %in% os) skip("On Mac"), linux = if ("linux" %in% os) skip("On Linux"), sunos = if ("solaris" %in% os) skip("On Solaris") ) invisible(TRUE) } #' @export #' @rdname skip skip_on_travis <- function() { if (!identical(Sys.getenv("TRAVIS"), "true")) { return(invisible(TRUE)) } skip("On Travis") } #' @export #' @rdname skip skip_on_appveyor <- function() { if (!identical(Sys.getenv("APPVEYOR"), "True")) { return() } skip("On Appveyor") } #' @export #' @rdname skip skip_on_ci <- function() { if (!isTRUE(as.logical(Sys.getenv("CI")))) { return(invisible(TRUE)) } skip("On CI") } #' @export #' @rdname skip skip_on_covr <- function() { if (!identical(Sys.getenv("R_COVR"), "true")) { return(invisible(TRUE)) } skip("On covr") } #' @export #' @rdname skip skip_on_bioc <- function() { if (identical(Sys.getenv("BBS_HOME"), "")) { return(invisible(TRUE)) } skip("On Bioconductor") } #' @export #' @param msgid R message identifier used to check for translation: the default #' uses a message included in most translation packs. See the complete list in #' [`R-base.pot`](https://github.com/wch/r-source/blob/master/src/library/base/po/R-base.pot). #' @rdname skip skip_if_translated <- function(msgid = "'%s' not found") { if (gettext(msgid, domain = "R") == msgid) { return(invisible(TRUE)) } skip(paste0("\"", msgid, "\" is translated")) } testthat/R/reporter-tap.R0000644000176200001440000000271313547572000015070 0ustar liggesusers#' @include reporter.R NULL #' Test reporter: TAP format. #' #' This reporter will output results in the Test Anything Protocol (TAP), #' a simple text-based interface between testing modules in a test harness. #' For more information about TAP, see http://testanything.org #' #' @export #' @family reporters TapReporter <- R6::R6Class("TapReporter", inherit = Reporter, public = list( results = list(), n = 0L, has_tests = FALSE, contexts = NA_character_, start_context = function(context) { self$contexts[self$n + 1] <- context }, add_result = function(context, test, result) { self$has_tests <- TRUE self$n <- self$n + 1L self$results[[self$n]] <- result }, end_reporter = function() { if (!self$has_tests) { return() } self$cat_line("1..", self$n) for (i in 1:self$n) { if (!is.na(self$contexts[i])) { self$cat_line("# Context ", self$contexts[i]) } result <- self$results[[i]] if (expectation_success(result)) { self$cat_line("ok ", i, " ", result$test) } else if (expectation_broken(result)) { self$cat_line("not ok ", i, " ", result$test) msg <- gsub("(^|\n)", "\\1 ", format(result)) self$cat_line(msg) } else { self$cat_line( "ok ", i, " # ", toupper(expectation_type(result)), " ", format(result) ) } } } ) ) testthat/R/reporter-junit.R0000644000176200001440000001167413564563701015452 0ustar liggesusers#' @include reporter.R NULL # To allow the Java-style class name format that Jenkins prefers, # "package_name_or_domain.ClassName", allow "."s in the class name. classnameOK <- function(text) { gsub("[^._A-Za-z0-9]+", "_", text) } #' Test reporter: summary of errors in jUnit XML format. #' #' This reporter includes detailed results about each test and summaries, #' written to a file (or stdout) in jUnit XML format. This can be read by #' the Jenkins Continuous Integration System to report on a dashboard etc. #' Requires the _xml2_ package. #' #' To fit into the jUnit structure, context() becomes the `` #' name as well as the base of the ` classname`. The #' test_that() name becomes the rest of the ` classname`. #' The deparsed expect_that() call becomes the `` name. #' On failure, the message goes into the `` node message #' argument (first line only) and into its text content (full message). #' #' Execution time and some other details are also recorded. #' #' References for the jUnit XML format: #' \url{http://llg.cubic.org/docs/junit/} #' #' @export JunitReporter <- R6::R6Class("JunitReporter", inherit = Reporter, public = list( results = NULL, timer = NULL, doc = NULL, errors = NULL, failures = NULL, skipped = NULL, tests = NULL, root = NULL, suite = NULL, suite_time = NULL, file_name = NULL, elapsed_time = function() { time <- (private$proctime() - self$timer)[["elapsed"]] self$timer <- private$proctime() time }, reset_suite = function() { self$errors <- 0 self$failures <- 0 self$skipped <- 0 self$tests <- 0 self$suite_time <- 0 }, start_reporter = function() { if (!is_installed("xml2")) { stop("Please install the `xml2` package", call. = FALSE) } self$timer <- private$proctime() self$doc <- xml2::xml_new_document() self$root <- xml2::xml_add_child(self$doc, "testsuites") self$reset_suite() }, start_file = function(file) { self$file_name <- file }, start_test = function(context, test) { if (is.null(context)) { context(context_name(self$file_name)) } }, start_context = function(context) { self$suite <- xml2::xml_add_child( self$root, "testsuite", name = context, timestamp = private$timestamp(), hostname = private$hostname() ) }, end_context = function(context) { xml2::xml_attr(self$suite, "tests") <- as.character(self$tests) xml2::xml_attr(self$suite, "skipped") <- as.character(self$skipped) xml2::xml_attr(self$suite, "failures") <- as.character(self$failures) xml2::xml_attr(self$suite, "errors") <- as.character(self$errors) #jenkins junit plugin requires time has at most 3 digits xml2::xml_attr(self$suite, "time") <- as.character(round(self$suite_time, 3)) self$reset_suite() }, add_result = function(context, test, result) { self$tests <- self$tests + 1 time <- self$elapsed_time() self$suite_time <- self$suite_time + time # XML node for test case name <- test %||% "(unnamed)" testcase <- xml2::xml_add_child( self$suite, "testcase", time = toString(time), classname = classnameOK(context), name = classnameOK(name) ) first_line <- function(x) { paste0(strsplit(x$message, split = "\n")[[1]][1], src_loc(x$srcref)) } # add an extra XML child node if not a success if (expectation_error(result)) { # "type" in Java is the exception class error <- xml2::xml_add_child(testcase, "error", type = "error", message = first_line(result)) xml2::xml_text(error) <- format(result) self$errors <- self$errors + 1 } else if (expectation_failure(result)) { # "type" in Java is the type of assertion that failed failure <- xml2::xml_add_child(testcase, "failure", type = "failure", message = first_line(result)) xml2::xml_text(failure) <- format(result) self$failures <- self$failures + 1 } else if (expectation_skip(result)) { xml2::xml_add_child(testcase, "skipped") self$skipped <- self$skipped + 1 } }, end_reporter = function() { if (is.character(self$out)) { xml2::write_xml(self$doc, self$out, format = TRUE) } else if (inherits(self$out, "connection")) { file <- tempfile() xml2::write_xml(self$doc, file, format = TRUE) write_lines(read_lines(file), self$out) } else { stop("unsupported output type: ", toString(self$out)) } } # end_reporter ), # public private = list( proctime = function() { proc.time() }, timestamp = function() { strftime(Sys.time(), "%Y-%m-%dT%H:%M:%SZ", tz = "UTC") }, hostname = function() { Sys.info()[["nodename"]] } ) # private ) testthat/NEWS.md0000644000176200001440000012420613564760546013235 0ustar liggesusers # testthat 2.3.1 * The last version of testthat introduced a performance regression in error assertions (#963). To fix it, you need to install rlang 0.4.2. * Fixed error assertions with rJava errors (#964). * Fixed issue where error and warning messages were not retrieved with `conditionMessage()` under certain circumstances. # testthat 2.3.0 ## Conditions This release mostly focusses on an overhaul of how testthat works with conditions (i.e. errors, warnings and messages). There are relatively few user-facing changes, although you should now see more informative backtraces from errors and failures. * Unexpected errors are now printed with a simplified backtrace. * `expect_error()` and `expect_condition()` now display a backtrace when the error doesn't conform to expectations (#729). * `expect_error()`, `expect_warning()` and `expect_message()` now call `conditionMessage()` to get the condition message. This generic makes it possible to generate messages at print-time rather than signal-time. * `expect_error()` gets a better warning message when you test for a custom error class with `regexp`. * New `exp_signal()` function is a condition signaller that implements the testthat protocol (signal with `stop()` if the expectation is broken, with a `continue_test` restart). * Existence of restarts is first checked before invokation. This makes it possible to signal warnings or messages with a different condition signaller (#874). * `ListReporter` now tracks expectations and errors, even when they occur outside of tests. This ensures that `stop_on_failure` matches the results displayed by the reporter (#936). * You can silence warnings about untested error classes by implementing a method for `is_uninformative_warning()`. This method should be lazily registered, e.g. with `vctrs::s3_register()`. This is useful for introducing an experimental error class without encouraging users to depend on the class in their tests. * Respect options(warn = -1) to ignore all warnings (@jeroen #958). ## Expectations * Expectations can now be explicitly subclassed with `new_expectation()`. This constructor follows our new conventions for S3 classes and takes an optional subclass and optional attributes. * Unquoted inputs no longer potentially generate multiple test messages (#929). * `verify_output()` no longer uses quasiquotation, which fixes issues when verifying the output of tidy eval functions (#945). * `verify_output()` gains a `unicode` parameter to turn on or off the use of Unicode characters by the cli package. It is disabled by default to prevent the tests from failing on platforms like Windows that don't support UTF-8 (which could be your contributors' or your CI machines). * `verify_output()` now correctly handles multi-line condition messages. * `verify_output()` now adds spacing after condition messages, consistent with the spacing added after normal output. * `verify_output()` has a new syntax for inserting headers in output files: insert a `"# Header"` string (starting with `#` as in Markdown) to add a header to a set of outputs. ## Other minor improvements and bug fixes * `compare.numeric()` uses a more sophisticated default tolerance that will automatically skip tests that rely on numeric tolerance if long doubles are not available (#940). * `JunitReporter` now reports tests in ISO 8601 in the UTC timezone and uses the maximum precision of 3 decimal places (#923). # testthat 2.2.1 * Repair regression in `test_rd()` and add a couple of tests to hopefully detect the problem earlier in the future. # testthat 2.2.0 ## New features * New `verify_output()` is designed for testing output aimed at humans (most commonly print methods and error messages). It is a regression test that saves output in a way that makes it easy to review. It is automatically skipped on CRAN (#782, #834). ## Minor improvements and bug fixes * `as.data.frame.testthat_results()` now always returns a data frame with 13 columns (@jozefhajnala, #887). * `auto_test_package()` now correctly handles helper files (`tests/testthat/helper-*.R`), automatically reloading all code and rerunning all tests (@CorradoLanera, #376, #896). * `expect_match()` now displays `info` even when match length is 0 (#867). * `expect_s3_class()` gains new `exact` argument that allows you to check for an exact class match, not just inheritance (#885). * `fail()` and `succeed()` gain `info` argument, which is passed along to `expect()`. * `test_examples()` gets some minor fixes: it now returns the results invisibly, doesn't assume that examples should contain tests, and documents that you shouldn't be using it routinely (#841). * `test_file()` only calls `Reporter$end_context()` if a context was started, fixing an error in `TeamcityReporter` (@atheriel, #883). * `skip()` now reports reason for skipping as: `Reason: {skip condition}` (@patr1ckm, #868). * `skip_if()` and `skip_if_not()` now report `Reason: {skip condition} is TRUE` and `Reason: {skip condition} is not TRUE` respectively (@ patr1ckm, #868). * `skip_if_translated()` now tests for translation of a specific message. This is more robust than the previous approach because translation happens message-by-message, not necessarily for the entire session (#879) (and in general, it's impossible to determine what language R is currently using). * `skip_on_covr()` allows you to skip tests when covr is running. (@ianmcook, #895) * `expect_known_value()` gains a new serialisation `version` argument, defaulting to 2. Prevents the `.rds` files created to hold reference objects from making a package appear to require R >= 3.5 (#888 @jennybc). # testthat 2.1.1 * Fix test failures in strict latin1 locale # testthat 2.1.0 ## New expectations * New `expect_visible()` and `expect_invisible()` make it easier to check if a function call returns its result visibly or invisibly (#719). * New `expect_mapequal(x, y)` checks that `x` and `y` have the same names, and the same value associated with each name (i.e. they compare the values of the vector standardising the order of the names) (#863). * New `expect_vector()` is a wrapper around `vctrs::vec_assert()` making it easy to test against the vctrs definitions of prototype and size (#846). (Currently requires development version of vctrs.) ## Improvements to existing expectations * All expectations give clearer error messages if you forget the `object` or `expected` arguments (#743). * `expect_equal()` now correctly compares infinite values (#789). * In `expect_equal_to_reference()`, the default value for `update` is now `FALSE` (@BrodieG, #683). * `expect_error()` now returns the error object as documentated (#724). It also now warns if you're using a classed expectation and you're not using the `class` argument. This is good practice as it decouples the error object (which tends to be stable) from its rendering to the user (which tends to be fragile) (#816). * `expect_identical()` gains a `...` argument to pass additional arguments down to `identical()` (#714). * `expect_lt()`, `expect_lte()`, `expect_gt()` `expect_gte()` now handle `Inf` and `NA` arguments appropriately (#732), and no longer require the inputs to be numeric. * `expect_output()` gains a `width` argument, allowing you to control the output width. This does not inherit from `getOption("width")`, ensuring that tests return the same results regardless of environment (#805). * `expect_setequal()` now works with more vector types (including lists), because it uses `%in%`, rather than `sort()`. It also warns if the inputs are named, as this suggests that your mental model of how `expect_setequal()` works is wrong (#750). * `is_true()` and `is_false()` have been deprecated because they conflict with other functions in the tidyverse. ## Reporters * Reporter documentation has been considerably improved (#657). * `CheckReporter`, used by R CMD check, now includes a count of warnings. * `JUnitReporter` no longer replaces `.` in class names (#753), and creates ouput that should be more compatible with Jenkins (#806, @comicfans). * `ListReporter` now records number of passed tests and original results in new columns (#675). * `ProgressReporter`, the default reporter, now: * Automatically generates a context from the file name. We no longer recommend the use of `context()` and instead encourage you to delete it, allowing the context to be autogenerated from the file name. This also eliminates the error that occured if tests can before the first `context()` (#700, #705). * Gains a `update_interval` parameter to control how often updates are printed (default 0.1 s). This prevents large printing overhead for very fast tests. (#701, @jimhester) * Uses a 3 character wide column to display test successes, so up to 999 successful tests can be displayed without changing the alignment (#712). * `reporter$end_reporter()` is now only called when testing completes successfully. This ensures that you don't get unnecessary output when the test fails partway through (#727). ## Skips * `skip_if_offline()` skips tests if an internet connection is not available (#685). * `skip_on_ci()` skips tests on continuous integration systems (@mbjoseph, #825) by looking for a `CI` env var.. ## Other new features * New `testthat_examples()` and `testthat_example()` make it easy to access new test files bundled with the package. These are used in various examples to make it easier to understand how to use the package. * New `local_mock()` which allows you to mock a function without having to add an additional layer of indentation as with `with_mock()` (#856). ## Other minor improvements and bug fixes * `auto_test_package()` works better with recent devtools and also watches `src/` for changes (#809). * `expect_s3_class()` now works with unquoting (@jalsalam, #771). * `expectation` objects now contain the failure message, even when successful (#836) * `devtools::test()` no longer fails if run multiple times within the same R session for a package containing Catch tests. ([devtools #1832](https://github.com/r-lib/devtools/issues/1832)) * New `testing_package()` retrieves the name of the package currently being tested (#699). * `run_testthat_tests` C entrypoint is registered more robustly. * `skip()` now always produces a `message` of length 1, as expected elsewhere in testthat (#791). * Warnings are passed through even when `options(warn = 2)` is set (@yutannihilation, #721). # testthat 2.0.1 * Fix failing tests with devtools 2.0.0 # testthat 2.0.0 ## Breaking API changes * "Can't mock functions in base packages": You can no longer use `with_mock()` to mock functions in base packages, because this no longer works in R-devel due to changes with the byte code compiler. I recommend using [mockery](https://github.com/n-s-f/mockery) or [mockr](https://github.com/krlmlr/mockr) instead. * The order of arguments to `expect_equivalent()` and `expect_error()` has changed slightly as both now pass `...` on another function. This reveals itself with a number of different errors, like: * 'what' must be a character vector * 'check.attributes' must be logical * 'tolerance' should be numeric * argument is not interpretable as logical * threw an error with unexpected class * argument "quo" is missing, with no default * argument is missing, with no default If you see one of these errors, check the number, order, and names of arguments to the expectation. * "Failure: (unknown)". The last release mistakenly failed to test bare expectations not wrapped inside `test_that()`. If you see "(unknown)" in a failure message, this is a failing expectation that you previously weren't seeing. As well as fixing the failure, please also wrap inside a `test_that()` with an informative name. * "Error: the argument has already been evaluated": the way in which expectations now need create labels has changed, which caused a couple of failures with unusual usage when combined with `Reduce`, `lapply()`, and `Map()`. Avoid these functions in favour of for loops. I also recommend reading the section below on quasiquotation support in order to create more informative failure messages. ## Expectations ### New and improved expectations * `expect_condition()` works like `expect_error()` but captures any condition, not just error conditions (#621). * `expect_error()` gains a `class` argument that allows you to make an assertion about the class of the error object (#530). * `expect_reference()` checks if two names point to the same object (#622). * `expect_setequal()` compares two sets (stored in vectors), ignoring duplicates and differences in order (#528). ### New and improved skips * `skip_if()` makes it easy to skip a test when a condition is true (#571). For example, use `skip_if(getRversion() <= 3.1)` to skip a test in older R versions. * `skip_if_translated()` skips tests if you're running in an locale where translations are likely to occur (#565). Use this to avoid spurious failures when checking the text of error messages in non-English locales. * `skip_if_not_installed()` gains new `minimum_version` argument (#487, #499). ### Known good values We have identified a useful family of expectations that compares the results of an expression to a known good value stored in a file. They are designed to be use in conjunction with git so that you can see what precisely has changed, and revert it if needed. * `expect_known_output()` replaces `expect_output_file()`, which has been soft-deprecated. It now defaults to `update = TRUE` and warn, rather than failing on the first run. It gains a `print` argument to automatically print the input (#627). It also sets the width option to 80 to ensure consistent output across environments (#514) * `expect_known_value()` replaces `expect_equal_to_reference()`, which has been soft-deprecated. It gains an update argument defaulting to `TRUE`. This changes behaviour from the previous version, and soft-deprecated `expect_equal_to_reference()` gets `update = FALSE`. * `expect_known_failure()` stored and compares the failure message from an expectation. It's a useful regression test when developing informative failure messges for your own expectations. ### Quasiquotation support All expectations can now use unquoting (#626). This makes it much easier to generate informative failure messages when running tests in a for loop. For example take this test: ```R f <- function(i) if (i > 3) i * 9 else i * 10 for (i in 1:5) { expect_equal(f(i), i * 10) } ``` When it fails, you'll see the message ``Error: `f(i)` not equal to `i * 10` ``. That's hard to diagnose because you don't know which iteration caused the problem! ```R for (i in 1:5) { expect_equal(f(!!i), !!(i * 10)) } ``` If you unquote the values using `!!`, you get the failure message `` `f(4L)` not equal to 40.``. This is much easier to diagnose! See `?quasi_label()` for more details. (Note that this is not tidy evaluation per se, but is closely related. At this time you can not unquote quosures.) ## New features ### Setup and teardown * New `setup()` and `teardown()` functions allow you to run at the start and end of each test file. This is useful if you want to pair cleanup code with the code that messes up state (#536). * Two new prefixes are recognised in the `test/` directory. Files starting with `setup` are run before tests (but unlike `helpers` are not run in `devtools::load_all()`). Files starting with `teardown` are run after all tests are completed (#589). ### Other new features * All files are now read and written as UTF-8 (#510, #605). * `is_testing()` allows you to tell if your code is being run inside a testing environment (#631). Rather than taking a run-time dependency on testthat you may want to inline the function into your own package: ```R is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } ``` It's frequently useful to combine with `interactive()`. ### New default reporter A new default reporter, `ReporterProgress`, produces more aesthetically pleasing output and makes the most important information available upfront (#529). You can return to the previous default by setting `options(testthat.default_reporter = "summary")`. ### Reporters * Output colours have been tweaked to be consistent with clang: warnings are now in magenta, and skips in blue. * New `default_reporter()` and `check_reporter()` which returns the default reporters for interactive and check environments (#504). * New `DebugReporter` that calls a better version of `recover()` in case of failures, errors, or warnings (#360, #470). * New `JunitReporter` generates reports in JUnit compatible format. (#481, @lbartnik; #640, @nealrichardson; #575) * New `LocationReporter` which just prints the location of every expectation. This is useful for locating segfaults and C/C++ breakpoints (#551). * `SummaryReporter` recieved a number of smaller tweaks * Aborts testing as soon the limit given by the option `testthat.summary.max_reports` (default 10) is reached (#520). * New option `testthat.summary.omit_dots = TRUE` hides the progress dots speeding up tests by a small amount (#502). * Bring back random praise and encouragement which I accidentally dropped (#478). * New option `testthat.default_check_reporter`, defaults to `"check"`. Continuous Integration system can set this option before evaluating package test sources in order to direct test result details to known location. * All reporters now accept a `file` argument on initialization. If provided, reporters will write the test results to that path. This output destination can also be controlled with the option `testthat.output_file` (#635, @nealrichardson). ## Deprecated functions * `is_null()` and `matches()` have been deprecated because they conflict with other functions in the tidyverse (#523). ## Minor improvements and bug fixes * Updated Catch to 1.9.6. `testthat` now understands and makes use of the package routine registration mechanism required by CRAN with R >= 3.4.0. (@kevinushey) * Better reporting for deeply nested failures, limiting the stack trace to the first and last 10 entries (#474). * Bare expectations notify the reporter once again. This is achieved by running all tests inside `test_code()` by default (#427, #498). This behaviour can be overridden by setting `wrap = FALSE` in `test_dir()` and friends (#586). * `auto_test()` and `auto_test_package()` provide `hash` parameter to enable switching to faster, time-stamp-based modification detection (#598, @katrinleinweber). `auto_test_package()` works correctly on windows (#465). * `capture_output_lines()` is now exported (#504). * `compare.character()` works correctly for vectors of length > 5 (#513, @brodieG) * `compare.default()` gains a `max_diffs` argument and defaults to printing out only the first 9 differences (#538). * `compare.numeric()` respects `check.attributes()` so `expect_equivalent()` correctly ignores attributes of numeric vectors (#485). * Output expectations (`expect_output()`, `expect_message()`, `expect_warning()`, and `expect_silent()`) all invisibly return the first argument to be consistent with the other expectations (#615). * `expect_length()` works with any object that has a `length` method, not just vectors (#564, @nealrichardson) * `expect_match()` now accepts explicit `perl` and `fixed` arguments, and adapts the failure message to the value of `fixed`. This also affects other expectations that forward to `expect_match()`, like `expect_output()`, `expect_message()`, `expect_warning()`, and `expect_error()`. * `expect_match()` escapes special regular expression characters when printing (#522, @jimhester). * `expect_message()`, `expect_warning()` and `expect_error()` produce clearer failure messages. * `find_test_scripts()` only looks for `\.[rR]` in the extension (#492, @brodieG) * `test_dir()`, `test_package()`, `test_check()` unset the `R_TESTS` env var (#603) * `test_examples()` now works with installed packages as well as source packages (@jimhester, #532). * `test_dir()`, `test_package()`, and `test_check()` gain `stop_on_failure` and `stop_on_waring` arguments that control whether or not an error is signalled if any tests fail or generate warnings (#609, #619). * `test_file()` now triggers a `gc()` after tests are run. This helps to ensure that finalisers are run earlier (#535). * `test_path()` now generates correct path when called from within `tools::testInstalledPackage()` (#542). * `test_path()` no longer assumes that the path exists (#448). * `test_that()` calls without any expectations generate a default `skip()` (#413). * `test_dir()` gains `load_helpers` argument (#505). * `show_failures()` simply prints a failure if it occurs. This makes it easier to show failures in examples. * `with_mock()` disallows mocking of functions in base packages, because this doesn't work with the current development version of R (#553). # testthat 1.0.2 * Ensure `std::logic_error()` constructed with `std::string()` argument, to avoid build errors on Solaris. # testthat 1.0.1 * New `expect_output_file()` to compare output of a function with a text file, and optionally update it (#443, @krlmlr). * Properly scoped use + compilation of C++ unit testing code using Catch to `gcc` and `clang` only, as Catch includes code that does not strictly conform to the C++98 standard. (@kevinushey) * Fixed an out-of-bounds memory access when routing Catch output through `Rprintf()`. (@kevinushey) * Ensure that unit tests run on R-oldrel (remove use of `dir.exists()`). (@kevinushey) * Improved overriding of calls to `exit()` within Catch, to ensure compatibility with GCC 6.0. (@krlmlr) * Hardened formatting of difference messages, previously the presence of `%` characters could affect the output (#446, @krlmlr). * Fixed errors in `expect_equal()` when comparing numeric vectors with and without attributes (#453, @krlmlr). * `auto_test()` and `auto_test_package()` show only the results of the current test run and not of previously failed runs (#456, @krlmlr). # testthat 1.0.0 ## Breaking changes The `expectation()` function now expects an expectation type (one of "success", "failure", "error", "skip", "warning") as first argument. If you're creating your own expectations, you'll need to use `expect()` instead (#437). ## New expectations The expectation system got a thorough overhaul (#217). This primarily makes it easier to add new expectations in the future, but also included a thorough review of the documentation, ensuring that related expectations are documented together, and have evocative names. One useful change is that most expectations invisibly return the input `object`. This makes it possible to chain together expectations with magrittr: ```R factor("a") %>% expect_type("integer") %>% expect_s3_class("factor") %>% expect_length(1) ``` (And to make this style even easier, testthat now re-exports the pipe, #412). The exception to this rule are the expectations that evaluate (i.e. for messages, warnings, errors, output etc), which invisibly return `NULL`. These functions are now more consistent: using `NA` will cause a failure if there is a errors/warnings/mesages/output (i.e. they're not missing), and will `NULL` fail if there aren't any errors/warnings/mesages/output. This previously didn't work for `expect_output()` (#323), and the error messages were confusing with `expect_error(..., NA)` (#342, @nealrichardson + @krlmlr, #317). Another change is that `expect_output()` now requires you to explicitly print the output if you want to test a print method: `expect_output("a", "a")` will fail, `expect_output(print("a"), "a")` will succeed. There are six new expectations: * `expect_type()` checks the _type_ of the object (#316), `expect_s3_class()` tests that an object is S3 with given class, `expect_s4_class()` tests that an object is S4 with given class (#373). I recommend using these more specific expectations instead of the more general `expect_is()`. * `expect_length()` checks that an object has expected length. * `expect_success()` and `expect_failure()` are new expectations designed specifically for testing other expectations (#368). A number of older features have been deprecated: * `expect_more_than()` and `expect_less_than()` have been deprecated. Please use `expect_gt()` and `expect_lt()` instead. * `takes_less_than()` has been deprecated. * `not()` has been deprecated. Please use the explicit individual forms `expect_error(..., NA)` , `expect_warning(.., NA)` and so on. ## Expectations are conditions Now all expectations are also conditions, and R's condition system is used to signal failures and successes (#360, @krlmlr). All known conditions (currently, "error", "warning", "message", "failure", and "success") are converted to expectations using the new `as.expectation()`. This allows third-party test packages (such as `assertthat`, `testit`, `ensurer`, `checkmate`, `assertive`) to seamlessly establish `testthat` compatibility by issuing custom error conditions (e.g., `structure(list(message = "Error message"), class = c("customError", "error", "condition"))`) and then implementing `as.expectation.customError()`. The `assertthat` package contains an example. ## Reporters The reporters system class has been considerably refactored to make existing reporters simpler and to make it easier to write new reporters. There are two main changes: * Reporters classes are now R6 classes instead of Reference Classes. * Each callbacks receive the full context: * `add_results()` is passed context and test as well as the expectation. * `test_start()` and `test_end()` both get the context and test. * `context_start()` and `context_end()` get the context. * Warnings are now captured and reported in most reporters. * The reporter output goes to the original standard output and is not affected by `sink()` and `expect_output()` (#420, @krlmlr). * The default summary reporter lists all warnings (#310), and all skipped tests (@krlmlr, #343). New option `testthat.summary.max_reports` limits the number of reports printed by the summary reporter. The default is 15 (@krlmlr, #354). * `MinimalReporter` correct labels errors with E and failures with F (#311). * New `FailReporter` to stop in case of failures or errors after all tests (#308, @krlmlr). ## Other * New functions `capture_output()`, `capture_message()`, and `capture_warnings()` selectively capture function output. These are used in `expect_output()`, `expect_message()` and `expect_warning()` to allow other types out output to percolate up (#410). * `try_again()` allows you to retry code multiple times until it succeeds (#240). * `test_file()`, `test_check()`, and `test_package()` now attach testthat so all testing functions are available. * `source_test_helpers()` gets a useful default path: the testthat tests directory. It defaults to the `test_env()` to be consistent with the other source functions (#415). * `test_file()` now loads helpers in the test directory before running the tests (#350). * `test_path()` makes it possible to create paths to files in `tests/testthat` that work interactively and when called from tests (#345). * Add `skip_if_not()` helper. * Add `skip_on_bioc()` helper (@thomasp85). * `make_expectation()` uses `expect_equal()`. * `setup_test_dir()` has been removed. If you used it previously, instead use `source_test_helpers()` and `find_test_scripts()`. * `source_file()` exports the function testthat uses to load files from disk. * `test_that()` returns a `logical` that indicates if all tests were successful (#360, @krlmlr). * `find_reporter()` (and also all high-level testing functions) support a vector of reporters. For more than one reporter, a `MultiReporter` is created (#307, @krlmlr). * `with_reporter()` is used internally and gains new argument `start_end_reporter = TRUE` (@krlmlr, 355). * `set_reporter()` returns old reporter invisibly (#358, @krlmlr). * Comparing integers to non-numbers doesn't raise errors anymore, and falls back to string comparison if objects have different lengths. Complex numbers are compared using the same routine (#309, @krlmlr). * `compare.numeric()` and `compare.character()` received another overhaul. This should improve behaviour of edge cases, and provides a strong foundation for further work. Added `compare.POSIXt()` for better reporting of datetime differences. * `expect_identical()` and `is_identical_to()` now use `compare()` for more detailed output of differences (#319, @krlmlr). * Added [Catch](https://github.com/philsquared/Catch) v1.2.1 for unit testing of C++ code. See `?use_catch()` for more details. (@kevinushey) # testthat 0.11.0 * Handle skipped tests in the TAP reporter (#262). * New `expect_silent()` ensures that code produces no output, messages, or warnings (#261). * New `expect_lt()`, `expect_lte()`, `expect_gt()` and `expect_gte()` for comparison with or without equality (#305, @krlmlr). * `expect_output()`, `expect_message()`, `expect_warning()`, and `expect_error()` now accept `NA` as the second argument to indicate that output, messages, warnings, and errors should be absent (#219). * Praise gets more diverse thanks to the praise package, and you'll now get random encouragment if your tests don't pass. * testthat no longer muffles warning messages. If you don't want to see them in your output, you need to explicitly quiet them, or use an expectation that captures them (e.g. `expect_warning()`). (#254) * Use tests in `inst/tests` is formally deprecated. Please move them into `tests/testthat` instead (#231). * `expect_match()` now encodes the match, as well as the output, in the expectation message (#232). * `expect_is()` gives better failure message when testing multiple inheritance, e.g. `expect_is(1:10, c("glm", "lm"))` (#293). * Corrected argument order in `compare.numeric()` (#294). * `comparison()` constructure now checks its arguments are the correct type and length. This bugs a bug where tests failed with an error like "values must be length 1, but FUN(X[[1]]) result is length 2" (#279). * Added `skip_on_os()`, to skip tests on specified operating systems (@kevinushey). * Skip test that depends on `devtools` if it is not installed (#247, @krlmlr) * Added `skip_on_appveyor()` to skip tests on Appveyor (@lmullen). * `compare()` shows detailed output of differences for character vectors of different length (#274, @krlmlr). * Detailed output from `expect_equal()` doesn't confuse expected and actual values anymore (#274, @krlmlr). # testthat 0.10.0 * Failure locations are now formated as R error locations. * Add an 'invert' argument to `find_tests_scripts()`. This allows one to select only tests which do _not_ match a pattern. (#239, @jimhester). * Deprecated `library_if_available()` has been removed. * test (`test_dir()`, `test_file()`, `test_package()`, `test_check()`) functions now return a `testthat_results` object that contains all results, and can be printed or converted to data frame. * `test_dir()`, `test_package()`, and `test_check()` have an added `...` argument that allows filtering of test files using, e.g., Perl-style regular expressions,or `fixed` character filtering. Arguments in `...` are passed to `grepl()` (@leeper). * `test_check()` uses a new reporter specifically designed for `R CMD check`. It displays a summary at the end of the tests, designed to be <13 lines long so test failures in `R CMD check` display something more useful. This will hopefully stop BDR from calling testthat a "test obfuscation suite" (#201). * `compare()` is now documented and exported. Added a numeric method so when long numeric vectors don't match you'll see some examples of where the problem is (#177). The line spacing in `compare.character()` was tweaked. * `skip_if_not_installed()` skips tests if a package isn't installed (#192). * `expect_that(a, equals(b))` style of testing has been soft-deprecated. It will keep working, but it's no longer demonstrated any where, and new expectations will only be available in `expect_equal(a, b)` style. (#172) * Once again, testthat suppresses messages and warnings in tests (#189) * New `test_examples()` lets you run package examples as tests. Each example counts as one expectation and it succeeds if the code runs without errors (#204). * New `succeed()` expectation always succeeds. * `skip_on_travis()` allows you to skip tests when run on Travis CI. (Thanks to @mllg) * `colourise()` was removed. (Colour is still supported, via the `crayon` package.) * Mocks can now access values local to the call of `with_mock` (#193, @krlmlr). * All equality expectations are now documented together (#173); all matching expectations are also documented together. # testthat 0.9.1 * Bump R version dependency # testthat 0.9 ## New features * BDD: testhat now comes with an initial behaviour driven development (BDD) interface. The language is similiar to RSpec for Ruby or Mocha for JavaScript. BDD tests read like sentences, so they should make it easier to understand the specification of a function. See `?describe()` for further information and examples. * It's now possible to `skip()` a test with an informative message - this is useful when tests are only available under certain conditions, as when not on CRAN, or when an internet connection is available (#141). * `skip_on_cran()` allows you to skip tests when run on CRAN. To take advantage of this code, you'll need either to use devtools, or run `Sys.setenv(NOT_CRAN = "true"))` * Simple mocking: `with_mock()` makes it easy to temporarily replace functions defined in packages. This is useful for testing code that relies on functions that are slow, have unintended side effects or access resources that may not be available when testing (#159, @krlmlr). * A new expectation, `expect_equal_to_reference()` has been added. It tests for equality to a reference value stored in a file (#148, @jonclayden). ## Minor improvements and bug fixes * `auto_test_package()` works once more, and now uses `devtools::load_all()` for higher fidelity loading (#138, #151). * Bug in `compare.character()` fixed, as reported by Georgi Boshnakov. * `colourise()` now uses option `testthat.use_colours` (default: `TRUE`). If it is `FALSE`, output is not colourised (#153, @mbojan). * `is_identical_to()` only calls `all.equal()` to generate an informative error message if the two objects are not identical (#165). * `safe_digest()` uses a better strategy, and returns NA for directories (#138, #146). * Random praise is renabled by default (again!) (#164). * Teamcity reporter now correctly escapes output messages (#150, @windelinckx). It also uses nested suites to include test names. ## Deprecated functions * `library_if_available()` has been deprecated. # testthat 0.8.1 * Better default environment for `test_check()` and `test_package()` which allows S4 class creation in tests * `compare.character()` no longer fails when one value is missing. # testthat 0.8 testthat 0.8 comes with a new recommended structure for storing your tests. To better meet CRAN recommended practices, testthat now recommend that you to put your tests in `tests/testthat`, instead of `inst/tests` (this makes it possible for users to choose whether or not to install tests). With this new structure, you'll need to use `test_check()` instead of `test_packages()` in the test file (usually `tests/testthat.R`) that runs all testthat unit tests. The other big improvement to usability comes from @kforner, who contributed code to allow the default results (i.e. those produced by `SummaryReporter`) to include source references so you can see exactly where failures occured. ## New reporters * `MultiReporter`, which combines several reporters into one. (Thanks to @kforner) * `ListReporter`, which captures all test results with their file, context, test and elapsed time. `test_dir`, `test_file`, `test_package` and `test_check` now use the `ListReporter` to invisibly return a summary of the tests as a data frame. (Thanks to @kforner) * `TeamCityReporter` to produce output compatible with the TeamCity continuous integration environment. (Thanks to @windelinckx) * `SilentReporter` so that `testthat` can test calls to `test_that`. (Thanks to @craigcitro, #83) ## New expectations * `expect_null()` and `is_null` to check if an object is NULL (#78) * `expect_named()` and `has_names()` to check the names of a vector (#79) * `expect_more_than()`, `is_more_than()`, `expect_less_than()`, `is_less_than()` to check values above or below a threshold. (#77, thanks to @jknowles) ## Minor improvements and bug fixes * `expect_that()` (and thus all `expect_*` functions) now invisibly return the expectation result, and stops if info or label arguments have length > 1 (thanks to @kforner) * fixed two bugs with source_dir(): it did not look for the source scripts at the right place, and it did not use its `chdir` argument. * When using `expect_equal()` to compare strings, the default output for failure provides a lot more information, which should hopefully help make finding string mismatches easier. * `SummaryReporter` has a `max_reports` option to limit the number of detailed failure reports to show. (Thanks to @crowding) * Tracebacks will now also contain information about where the functions came from (where that information is available). * `matches` and `expect_match` now pass additional arguments on to `grepl` so that you can use `fixed = TRUE`, `perl = TRUE` or `ignore.case = TRUE` to control details of the match. `expect_match` now correctly fails to match NULL. (#100) * `expect_output`, `expect_message`, `expect_warning` and `expect_error` also pass ... on to `grepl`, so that you can use `fixed = TRUE`, `perl = TRUE` or `ignore.case = TRUE` * Removed `stringr` and `evaluate` dependencies. * The `not()` function makes it possible to negate tests. For example, `expect_that(f(), not(throws_error()))` asserts that `f()` does not throw an error. * Make `dir_state` less race-y. (Thanks to @craigcitro, #80) * `auto_test` now pays attention to its 'reporter' argument (Thanks to @crowding, #81) * `get_reporter()`, `set_reporter()` and `with_reporter()` are now exported (#102) # testthat 0.7.1 * Ignore attributes in `is_true` and `is_false` (#49) * `make_expectation` works for more types of input (#52) * Now works better with evaluate 0.4.3. * new `fail()` function always forces a failure in a test. Suggested by Richie Cotton (#47) * Added `TapReporter` to produce output compatible with the "test anything protocol". Contributed by Dan Keshet. * Fixed where `auto_test` would identify the wrong files as having changed. (Thanks to Peter Meilstrup) # testthat 0.7 * `SummaryReporter`: still return informative messages even if no tests defined (just bare expectations). (Fixes #31) * Improvements to reference classes (Thanks to John Chambers) * Bug fixes for when nothing was generated in `gives_warning` / `shows_message`. (Thanks to Bernd Bischl) * New `make_expectation` function to programmatically generate an equality expectation. (Fixes #24) * `SummaryReporter`: You don't get praise until you have some tests. * Depend on `methods` rather than requiring it so that testthat works when run from `Rscript` * `auto_test` now normalises paths to enable better identification of file changes, and fixes bug in instantiating new reporter object. # testthat 0.6 * All `mutatr` classes have been replaced with ReferenceClasses. * Better documentation for short-hand expectations. * `test_dir` and `test_package` gain new `filter` argument which allows you to restrict which tests are run. # testthat 0.5 * bare expectations now correctly throw errors again # testthat 0.4 * autotest correctly loads code and executes tests in same environment * contexts are never closed before they are opened, and always closed at the end of file * fixed small bug in `test_dir` where each test was not given its own environment * all `expect_*` short cut functions gain a label argument, thanks to Steve Lianoglou # testthat 0.3 * all expectations now have a shortcut form, so instead of expect_that(a, is_identical_to(b)) you can do expect_identical(a, b) * new shows_message and gives_warning expectations to test warnings and messages * expect_that, equals, is_identical_to and is_equivalent to now have additional label argument which allows you to control the appearance of the text used for the expected object (for expect_that) and actual object (for all other functions) in failure messages. This is useful when you have loops that run tests as otherwise all the variable names are identical, and it's difficult to tell which iteration caused the failure. * executing bare tests gives nicer output * all expectations now give more information on failure to make it easier to track down the problem. * test_file and test_dir now run in code in separate environment to avoid pollution of global environment. They also temporary change the working directory so tests can use relative paths. * test_package makes it easier to run all tests in an installed package. Code run in this manner has access to non-exported functions and objects. If any errors or failures occur, test_package will throw an error, making it suitable for use with R CMD check. # testthat 0.2 * colourise also works in screen terminal * equals expectation provides more information about failure * expect_that has extra info argument to allow you to pass in any extra information you'd like included in the message - this is very helpful if you're using a loop to run tests * is_equivalent_to: new expectation that tests for equality ignoring attributes * library_if_available now works! (thanks to report and fix from Felix Andrews) * specify larger width and join pieces back together whenever deparse used (thanks to report and fix from Felix Andrews) * test_dir now looks for any files starting with test (not test- as before) testthat/MD50000644000176200001440000004234713570757323012450 0ustar liggesusers5334d7897c6f34a65a9bbb2b25ed4ed5 *DESCRIPTION 41b239837eef8d4c02659f86d53fe802 *LICENSE defcefaaa04c19d384d99d8056e0af65 *NAMESPACE 0785695bf28d89a633a8ef6d524ae31c *NEWS.md 67a9ea0c8bd335efd5dee7cd3028304e *R/auto-test.R c643bc55a60be166920da74e05206c53 *R/capture-condition.R 12af590e604239cba4470405e9b003da *R/capture-output.R ff53c681877f70ff93322f38bbbe2114 *R/colour-text.R 8fc7b7f1baa29c62d63ddc38688dab11 *R/compare-character.R 19e854426588f0b8f1da187133fd7f84 *R/compare-numeric.R 13fdeb64601457a5540e2b1262d9deaa *R/compare-time.R 55bc2672bf1038df5c5d9c953d484677 *R/compare.R 34d3436495cc61b4763d253aea21b94b *R/context.R 5a3fdc87cd6ede71a4b8a3706db5dd5d *R/describe.R d13e7cca5bc11e15bf6aa18851fac897 *R/evaluate-promise.R 4c97d19a013de45f294d5ad2d40d4a5d *R/example.R fb4f409fb670338955b4a1c23f5d2cfc *R/expect-comparison.R 4d26be12f6bf4cc7827183792f025c49 *R/expect-condition.R bfc2f0dd2331a6593cc46a211026395e *R/expect-equality.R 25fd3b426ddbd185c81e70b21d249ac6 *R/expect-inheritance.R 5610b644f0f3d303c085cc8d619e6113 *R/expect-invisible.R 3d966d8d5906f66c6fd8a8820b2311fd *R/expect-known.R 5371a005b7fc2ed4a9ece9bd91e970f1 *R/expect-length.R 80eb1a0f2556f92261c009b5aaacbae8 *R/expect-logical.R 6b0d95ba1872e8a03dcc7a2ae115c7f5 *R/expect-messages.R 1c542dd7892c5afdb7da95752eb65e5f *R/expect-named.R e1313715fe88a7da98238b9cd235c35e *R/expect-null.R 7b8b587f5a7926ac8f8aac04e6c521e2 *R/expect-output.R f29c564d37f6dc7c46cdc6a50adb7441 *R/expect-self-test.R b3083e114a5b6e31f38d2d755108e93b *R/expect-setequal.R 42cf6cd6f1c3e8426638770df6f01664 *R/expect-silent.R 0dacd5f3baa663f375be0b96432f535c *R/expect-that.R b4a6d3f195a05f246f34d16b8066c122 *R/expect-vector.R fc3f2aafe9f9599264086391bbea6577 *R/expectation.R f621b00f4bbd2ba370d5ebb34607377f *R/expectations-matches.R a2a562b4b752424a7a66c1bd11e8d718 *R/make-expectation.R a8b530ccb89f02e1afb24edb47f4d827 *R/mock.R 5ab729150d7d7745e80bf1bf653ac9b9 *R/old-school.R b9c37afd04078bdc9242d01535b05ed6 *R/praise.R 05eb405ceb70ceaa6624bbb20d85eeb6 *R/quasi-label.R cbae17b5ea87704572a59acfc6c6d41f *R/recover.R 0bbe7511499b86bff9dcd05d60abf67d *R/reporter-check.R 350df345a782e9d5159ae43c8b8d8c4d *R/reporter-debug.R 51c721af9fe7da47eb47ff8f86e72332 *R/reporter-fail.R 81ac12e8db03ceb69827f0a3df98121d *R/reporter-junit.R a40bef1c6109d9f18d5df340901685d8 *R/reporter-list.R 0a44b025bca4aaab8f6a52a4c00bc7e6 *R/reporter-location.R 274f1c0f37669e65bd58359fc4c03d15 *R/reporter-minimal.R 5354ec3a39beeb334e53479228734c2a *R/reporter-multi.R 32ec5f65b28d69e03a86e31281a3920f *R/reporter-progress.R 5e5e7b9ec8a0b1131239ab613b820ca0 *R/reporter-rstudio.R 8820f7e33f359bb3c54b6c3e6f44b04e *R/reporter-silent.R 13e239f3e45cbb5bc1fb6a065ad5fa9b *R/reporter-stop.R b375be6d564cd5bf38f3aedf6b6e9dba *R/reporter-summary.R 26603c94cddac0fba776d6aa5375a2d5 *R/reporter-tap.R 406170d58f37b33f66ad00b7b7e4b8e5 *R/reporter-teamcity.R f2352d0ad5253f36a5cba1a76b60dfe7 *R/reporter-zzz.R ecdc7d22b7ac947e5e8095b37b3b0d30 *R/reporter.R fe0908b5a32d5bb1452f226f2b6e7901 *R/skip.R f21ac4008de083e02264aae8c0bdee28 *R/source.R 0e8e617e8f105c742610b1146e5c71d9 *R/stack.R b46ecad936d2ee56cab5e8f5ba365a8c *R/teardown.R 4ac14dad9d6e8de262937de735a4d003 *R/test-compiled-code.R e81c50fb7add9eca7ee60fcc11cb06a3 *R/test-directory.R bcaab04d2928340fe769563f8c66a172 *R/test-example.R b717d4475ce18b3598ffd926df182175 *R/test-files.R 0a527a0de94e02e04ae81545f1a9a52f *R/test-path.R a3f21524ce171b0e535699721d145f0c *R/test-that.R 04e13da826a1f40d2e85fabe87e934de *R/try-again.R 1257d06f7ccf4ec7bd31bf287ac32d1b *R/utils-io.R 9144936b037dbc46f5da2ca8327e07bc *R/utils.R 4908068ba804fdf3a5f66b79df6af79f *R/verify-output.R 21eebd03443e6c9b0db39a5dd3340d67 *R/watcher.R 612d6fdb8b9525cd28ec972c487bb5b3 *README.md 2399528846cbdc0feb08dfcf92cabcde *build/vignette.rds cd1f5828e307a856f2844936aea84ef9 *inst/CITATION dd843ceb5a1b0bdde93647b4659fe17a *inst/doc/custom-expectation.R dbccd78c5f627954f8ed711ed7ad5cc0 *inst/doc/custom-expectation.Rmd f2006fb47402fa3e0b1be344b7db7289 *inst/doc/custom-expectation.html 1ece0759f60e98193d53545648d19146 *inst/examples/test-failure.R 9de1a507feaa37a40bbb4203ec763ae0 *inst/examples/test-success.R 543392950ccc39ed50ebf26c75de0910 *inst/include/testthat.h 417b6c2489c59bd44e2835759cf12824 *inst/include/testthat/testthat.h 6abd285711a207de0a9a8d0d2fdc1f8c *inst/include/testthat/vendor/catch.h 2d8c0561278371804f593566e3f19772 *inst/resources/catch-routine-registration.R 82c4713a0f02f21459e1e5a696a02eaa *inst/resources/test-cpp.R 3332a4affe5b211b37b65a3d3311d9ba *inst/resources/test-example.cpp c2cb0c30e10da611f9fbe56aea52f7a9 *inst/resources/test-runner.cpp 8d320a32bbf9fcc7fe83a764600473f4 *man/CheckReporter.Rd abad2ca25d847eb1b4efa75064afaaa2 *man/DebugReporter.Rd 83fc0de87fbdde82379aef8380be7e3e *man/FailReporter.Rd 1ad7a14b765b175937d43bd515988330 *man/JunitReporter.Rd a81813a50337f6133244e87d4383829f *man/ListReporter.Rd c4b0aaadcac852c641ebe23c079d0ce3 *man/LocationReporter.Rd a7388dca58459057762aaae53377f209 *man/MinimalReporter.Rd cb0682551309c7478d4749f2debd9aa9 *man/MultiReporter.Rd 8fea14872fab8147089f7dd6a16d2785 *man/ProgressReporter.Rd d3952b4c0ac08c2b3c7b93ed09a4f0e2 *man/Reporter.Rd 357e162be42c79a95aeb0d78a82f1d8d *man/RstudioReporter.Rd cbfe19dea3192a59ee8223bd8390d80a *man/SilentReporter.Rd b6f80c8c669c372fde1ae99a5823270a *man/StopReporter.Rd 5238604cc0b04c7d0f8df30d24e44256 *man/SummaryReporter.Rd 80eb8efa4efc0a033a967cdfccaa85a2 *man/TapReporter.Rd 2d230fa5e300652e1c86490bb0dccef4 *man/TeamcityReporter.Rd 7b1e573ace51b624ae187ce5419d348a *man/auto_test.Rd f8f6671ce5427a4a51cbf9881c6ac28a *man/auto_test_package.Rd 464ea3723b8c23cb83d033790f99a8e8 *man/capture_condition.Rd cfd3ceece4a7510a86fac29be5e27668 *man/capture_output.Rd 2059b2a5904afc544e789c26cd369be4 *man/compare.Rd ea1c54d24c6395c0f988d14b0ae40956 *man/compare_state.Rd a096b4cc9c852b05dc7a548093508336 *man/comparison-expectations.Rd d80bfb1217c341e6310918fe1ea78ab7 *man/context.Rd 01e9ab6ed7b28e72a05dab940e88336d *man/default_reporter.Rd 01e8cfd4781f1fb2fc83bb3118750080 *man/describe.Rd f2b7ae4da7936bcf6a6d13a161c451ae *man/dir_state.Rd 63c66fd666ed01b33fe294a40c8c7aea *man/equality-expectations.Rd 83120acf2b4d08657f53db8ec927e470 *man/evaluate_promise.Rd 98359575ae525766f3738a3d95fea58c *man/expect.Rd 30487a1fb2ac489450d4841489842bdf *man/expect_cpp_tests_pass.Rd a1ff5c3a918859100421a804e315a249 *man/expect_error.Rd f679bdeb35687de72d833324f10aa548 *man/expect_invisible.Rd 4d0ff1196f165f860949f18fce133501 *man/expect_is.Rd b6860d37b5f7be922897159e7fdce8b8 *man/expect_known_output.Rd 4db13f66b3e213a679ac19cf918364ef *man/expect_length.Rd c039a08e2fe2ca6c789e802847183bed *man/expect_match.Rd 6adbb60b204d179d3bba7a8a8966c62c *man/expect_message.Rd 93b865abbc898ca8637bea45cc5930e9 *man/expect_named.Rd 983dc28bbf29ba42c59cead436bb8268 *man/expect_null.Rd fb91f01f735bceb7176ae711659d6b99 *man/expect_output.Rd f890a74f95c82b199e40ad18cc0999b7 *man/expect_setequal.Rd dcaab50364b8edc3299d0f00c3716b49 *man/expect_silent.Rd 106f637592ec17341c0b61f58d4bf5af *man/expect_success.Rd 0c920c5a1104811ccdcd6d7cbe330e5d *man/expect_that.Rd 199c9cd286757db6a513c2d32f8d5d76 *man/expect_vector.Rd f879d55c3920a398e7d23aa77a5090d1 *man/expectation.Rd 80073001b5a279a27e206e3caf112bb5 *man/fail.Rd b7e68745669ec4141b7940f4dbebc48f *man/figures/logo.png bb0e79b8bdc83b519a64be149499c48c *man/find_reporter.Rd ae98554d5e2fe407cec134b71e22e7a3 *man/find_test_scripts.Rd ef0b249ee3f6da5a7512bc3236562a2d *man/inheritance-expectations.Rd c7429e64e72ca45143518a32d8ed607c *man/is_informative_error.Rd e9f11aa70441f09e2d78267892358413 *man/logical-expectations.Rd 8aa87707c6a53a51dcc2a913a93154bd *man/make_expectation.Rd 970f42fa63244262e70100f5f8304614 *man/not.Rd dabf019327048fc4c942898a6a663418 *man/oldskool.Rd 9f3353edc3135d9b1238e80d36835de9 *man/quasi_label.Rd 02bf46e94e692fccaaba2db6d66a7e06 *man/reexports.Rd efdd9f36891d19bf8d083d569c6ab8d6 *man/reporter-accessors.Rd ae91c9a61217c2f9da9633d63c74b405 *man/safe_digest.Rd f07f1513ddaf9339a1d1ab62aa0d5b80 *man/skip.Rd 3d8ec60292fd1a43722c4f4f58d3c039 *man/source_file.Rd f77872b273cd71427c91de15bcf1ffb3 *man/takes_less_than.Rd 47139bdbe10b23023865bf9bb45f713c *man/teardown.Rd b4578272554cf5e3c5c3022ed2db69a5 *man/test_dir.Rd d80f18859dc03043ef6e5e9521cb42dd *man/test_env.Rd 5a768411474d6b82809c7afb503c1978 *man/test_examples.Rd f478636794cce9752642ca17f44d6896 *man/test_file.Rd 86f63157419e09dcc092d13c261a309c *man/test_path.Rd 277779ca0e5c0fb7bd3fbb05fbaf8ead *man/test_that.Rd b1943999f526f60188c70459b56a301d *man/testthat-package.Rd d8686384001c8ebc03b84dada1fa43f4 *man/testthat_examples.Rd 79ee52da3640f0c553c36954613485f7 *man/testthat_results.Rd ea0d54d0e935a91b3b1ec9eb4cc1c792 *man/try_again.Rd 3fce716abc4be90324164fab29602c6b *man/use_catch.Rd 8560915e31e37725be641f3ea9032e63 *man/verify_output.Rd 54995c6ac4ddd10f9cdc2266214921de *man/watch.Rd 71fee7bd8f34d8dce71d2c3ae98910ee *man/with_mock.Rd d09410b8ca2729ce83dca3d0d4359f3e *src/Makevars d09410b8ca2729ce83dca3d0d4359f3e *src/Makevars.win c9e80245e82aa0383ea8605e63a85434 *src/init.c 8d71e14ae7b661a36b31c729f0b5a994 *src/reassign.c c0cdf926be089c7d4d8820d728991768 *src/test-catch.cpp 3332a4affe5b211b37b65a3d3311d9ba *src/test-example.cpp c2cb0c30e10da611f9fbe56aea52f7a9 *src/test-runner.cpp 1c4f1ad3ec7826fc8c9c2f55a40e97a0 *tests/test-catch.R 41fc10f929d6e7453c3c653547437ce6 *tests/testthat.R dca111da574904db7ea9c6f8d205a1c4 *tests/testthat/context.R 7340014f5a98c1db4a52789a689ee123 *tests/testthat/helper-assign.R 33cba49eaffc9f28bee30bd0c0b71ecd *tests/testthat/helper-junitmock.R 5e60a11148e28d949b9d2002af8c1ea8 *tests/testthat/helper-reporter.R a5279ea95c74f87ce4a41874408305cd *tests/testthat/helper-testthat.R cfde242811815c10019817cd919d7719 *tests/testthat/one.rds b050543415e230e4791e6351b440e2dc *tests/testthat/reporters/backtraces.R 51ed6b92c2624ca35043d5655d93182d *tests/testthat/reporters/check.txt 1d501da21be1427f4d001a5743d09159 *tests/testthat/reporters/debug.txt 4f994bb4151c7afed7b7a63b515dcb14 *tests/testthat/reporters/fail.R e81bc5794cca326ebb93c70b0ea626a6 *tests/testthat/reporters/junit.txt ca81827f32a322c7b2b507259bafd9de *tests/testthat/reporters/location.txt bfee5e839311f14a9eb2331ebab9b37b *tests/testthat/reporters/minimal.txt 98735464c4458c9164cf9ee050d680f6 *tests/testthat/reporters/progress-backtraces.txt ee86dff9c0f20ad2e588108640f329dc *tests/testthat/reporters/progress.txt 299f55d649d1f217f08843dab6421c6d *tests/testthat/reporters/rstudio.txt d41d8cd98f00b204e9800998ecf8427e *tests/testthat/reporters/silent.txt cec6c1eb67a9d7bd94900d210f671d8a *tests/testthat/reporters/stop.txt fb3706f775248155568bb16efe7a4c4d *tests/testthat/reporters/summary-2.txt 91e83f6cc936152156404b902f7fbdab *tests/testthat/reporters/summary-no-dots.txt 51585e0ac3910200134bd6d3b2fb9cbc *tests/testthat/reporters/summary.txt e52583ff43f4f704eb2ac06e210545ab *tests/testthat/reporters/tap.txt 4f48f1fff063ceda980aa9831009a6ac *tests/testthat/reporters/teamcity.txt a9a1f45d0a34aca6c62087fa352e2170 *tests/testthat/reporters/tests.R aeef7e1beef579c305ffd7d98971cb71 *tests/testthat/setup.R bf15d5dd3f1110e097d830715000eeed *tests/testthat/teardown.R 6b43c8a6b6a93727a1d9d1ffb87a416c *tests/testthat/test-bare.R 355a538ca99ece41af6305bf2d2ae6cc *tests/testthat/test-catch.R 2e076e854a1e8141602a83d946d5875e *tests/testthat/test-colour.R c2c66ad5ef40095dad50d1a253655557 *tests/testthat/test-compare-character.R 81b66f8aa634edca5469938453fd1a7b *tests/testthat/test-compare-numeric.R 3edf03fd485e455c66d846e9ecc7bc8c *tests/testthat/test-compare-time.R 3b540232067c2527068b2f286aa36e85 *tests/testthat/test-compare.R e61366fdb27236c37157f26643960794 *tests/testthat/test-context.R 2bd2a199bd20f063d6a91e0bba718e57 *tests/testthat/test-cpp.R ae0d0b28c871d7d41453c8ae3af5a35b *tests/testthat/test-debug-reporter.R fcf73bd42c60a77fddc97de9827230e6 *tests/testthat/test-describe.R df20c3b5b6c08e4663186e334c134c55 *tests/testthat/test-environment.R ef67f70c07f7152e1e6c2ded78e0e530 *tests/testthat/test-error/test-error.R 1319379d822056d441babfc1d1eaf10c *tests/testthat/test-evaluate-promise.R f87af41c50072aa492b63f3243e2bb9a *tests/testthat/test-examples.R 4acf95068d86e780ff2c2bc25bd99321 *tests/testthat/test-expect-comparison.R 8b9a5fe3f74ee2265125b8689bf49fc6 *tests/testthat/test-expect-condition-custom.txt d42c1fef38bfd649918c52018d88a4b9 *tests/testthat/test-expect-condition.R 92cc82e6526f7916ef5477de7879f06d *tests/testthat/test-expect-condition.txt 8760b962332c15a04b606293fc63d54f *tests/testthat/test-expect-equality.R 860eb18d9c272908976aec859bdbe65d *tests/testthat/test-expect-inheritance.R 4a4af63f56c7e42dfadb81f615d74f83 *tests/testthat/test-expect-invisible.R 9bee6938318e126a96c3d7c95aab9fa9 *tests/testthat/test-expect-known-hash.R bb8246b226ba375f38659983eb907b08 *tests/testthat/test-expect-known-output.R 8d2f9f0e6a94e9ef49d9d317fa1f9e6d *tests/testthat/test-expect-known-value.R 01f5516789b88009a1a8974acad4b8b2 *tests/testthat/test-expect-length.R 7ee0e93892379440abd9375a77b9eadb *tests/testthat/test-expect-logical.R 38bb70b19c6e996fc5b42ced808eb863 *tests/testthat/test-expect-match.R 07365cbc01a15c2432a245373635e3c2 *tests/testthat/test-expect-messages-warning.txt fcaf3e05b1dd8c690d7291e359aac3ce *tests/testthat/test-expect-messages.R 9dc68f259d6c188a8b8655f39a9c7374 *tests/testthat/test-expect-named.R d4fc15b3189dc6fcbf42fbfa5c4ee0f7 *tests/testthat/test-expect-null.R 5c82785a60440d0453aefabe39886670 *tests/testthat/test-expect-output.R c7c3c84a05e12be8c0430f42ac51adec *tests/testthat/test-expect-reference.R cb873bbb53fe664a8445f22e3a612532 *tests/testthat/test-expect-self-test.R 7e8bd7ece9dba98a6b5e085b50435abf *tests/testthat/test-expect-setequal.R 74d6b5f91689b82365595a5cfb6c06c6 *tests/testthat/test-expect-silent.R d5d00c3a19ad9196cd461f2ac0fcf30a *tests/testthat/test-expect-vector.R 36979f047b3d34dc153218c84000cf6a *tests/testthat/test-expect_that.R 9990ed03b1b6d7b8972ff0ca5271904a *tests/testthat/test-expectation.R dca9d21a10fa57c3ab3d08b240855623 *tests/testthat/test-helpers.R 9516ad5a07a2871d72f2226e1ac40996 *tests/testthat/test-label.R bdf2a9047484f04fe7696b06e413334f *tests/testthat/test-list-reporter/test-bare-expectations.R 56ce90c9e238efdf955c18be74458ec8 *tests/testthat/test-list-reporter/test-exception-outside-tests.R bebab3a08e2ea3bfd1cbd03f8f9b9c47 *tests/testthat/test-list-reporter/test-exercise-list-reporter.R aaa1e95b5eaa73ec22fa705f9013a3e6 *tests/testthat/test-list-reporter/test-only-error.R 8ea9befa8f1278b3f925f341a78c6f90 *tests/testthat/test-make-expectation.R 800484ce480f20bb3f13e7a109c13dea *tests/testthat/test-mock.R 50592d9357d7e2814f0f7d3f2fdbb0e8 *tests/testthat/test-old-school.R d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-path-installed/testthat-tests/testthat/empty d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-path-missing/empty d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-path-present/tests/testthat/empty 12399c1836ab5719e6f7fc68e287dfc6 *tests/testthat/test-quasi-label.R c018b5e81db078571e0902d9da9af518 *tests/testthat/test-reporter-junit.R 4dca4e28d22e8c6f7cfa6ecdfce02663 *tests/testthat/test-reporter-list.R 747e97a58173abce62479b73d1bf68d9 *tests/testthat/test-reporter-multi.R a59a0a456397a6f888e602e70ca15b94 *tests/testthat/test-reporter-zzz.R a3af9ac7fb5f44966a2cc8a0b24dabc9 *tests/testthat/test-reporter.R 50d50c712e90ff5c43ea2d45652bdfcd *tests/testthat/test-skip.R c4a62cd162daf6fccc348f9cbe7d7486 *tests/testthat/test-source.R 6addb8da099d18560d99764513cb07a5 *tests/testthat/test-source_dir.R 68e40050db215ec10b1df554c56dc8c4 *tests/testthat/test-teardown-1.R c363329ad76f520783b98d3e9a562589 *tests/testthat/test-teardown-2.R 3e00745173d6a97f092f657129334ac9 *tests/testthat/test-test-example.R baf422580a231abc8e092423beaa2d8b *tests/testthat/test-test-path.R 5f7efeeeb776e90935d45a342c3d2bb6 *tests/testthat/test-test-that.R 4385a5f20bcab5d544aec3e3ef6d70b1 *tests/testthat/test-test_dir.R 2169ed8a6f10ca032c8a4c7860561827 *tests/testthat/test-test_dir.txt 8180c3cea7b02ab0f28567b25655bee0 *tests/testthat/test-try-again.R 665a6dd827f27542fe0d19d50954d315 *tests/testthat/test-verify-conditions-lines.txt 2ced23f41abd70581eef5cd593c49a56 *tests/testthat/test-verify-conditions.txt 116a95120141a2554ce41fd7ca35cc6b *tests/testthat/test-verify-constructed-calls.txt e9fd9695724afc2f9a1dcd0ee1d99017 *tests/testthat/test-verify-output.R 41df432fbe39b933083aef3d1a7d0b3e *tests/testthat/test-verify-output.txt e20a8def27befc0c9f779683b97c8d4a *tests/testthat/test-verify-unicode-false.txt 28606b7dd874612a7e8ed0c1e0b0ab4d *tests/testthat/test-verify-unicode-true.txt f5af1f7fab9ca0f74cabd7e5eb913cb7 *tests/testthat/test-warning/test-warning.R 0c1bbb97219306f23cae9a302643d325 *tests/testthat/test-watcher.R a6efbc07fba2e30ed4da6b6806873548 *tests/testthat/test_dir/helper_hello.R a96923901555694e457d4ed22b47a897 *tests/testthat/test_dir/test-bare-expectations.R 816385d70c5f3cbab9cf36cef0fdf5d9 *tests/testthat/test_dir/test-basic.R 0f94f63c25db78f88d444273d037e1b7 *tests/testthat/test_dir/test-empty.R 3a1f5da49b94f6aa9c7c2eca5f3d8d3b *tests/testthat/test_dir/test-errors.R 96701b464ec69f7b2dc8726a1203d5ac *tests/testthat/test_dir/test-failures.R 878f6b8697e826ce93f54ba08369e6a9 *tests/testthat/test_dir/test-helper.R 4545cb72f7310794c70b6a88f0d91b73 *tests/testthat/test_dir/test-skip.R 059d222302b6540ac7ba785c648c1149 *tests/testthat/too-many-failures.R 51e992e553c268305920c6522b0b284e *tests/testthat/utf8.R 9674460b911257f8a2de15ed1a900b14 *tests/testthat/width-80.txt dbccd78c5f627954f8ed711ed7ad5cc0 *vignettes/custom-expectation.Rmd testthat/inst/0000755000176200001440000000000013564761037013103 5ustar liggesuserstestthat/inst/examples/0000755000176200001440000000000013456035023014706 5ustar liggesuserstestthat/inst/examples/test-failure.R0000644000176200001440000000025013456034771017443 0ustar liggesusersplus <- function(x, y) 1 + 1 test_that("one plus one is two", { expect_equal(plus(1, 1), 2) }) test_that("two plus two is four", { expect_equal(plus(2, 2), 4) }) testthat/inst/examples/test-success.R0000644000176200001440000000051513456034771017470 0ustar liggesuserstest_that("one plus one is two", { expect_equal(1 + 1, 2) }) test_that("you can skip tests if needed", { skip("This tests hasn't been written yet") }) test_that("some tests have warnings", { expect_equal(log(-1), NaN) }) test_that("some more successes just to pad things out", { expect_true(TRUE) expect_false(FALSE) }) testthat/inst/doc/0000755000176200001440000000000013564761037013650 5ustar liggesuserstestthat/inst/doc/custom-expectation.R0000644000176200001440000000264313564761037017633 0ustar liggesusers## ----setup, include = FALSE--------------------------------------------------- library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ## ----------------------------------------------------------------------------- expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object), arg = "object") # 2. Call expect() act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) # 3. Invisibly return the value invisible(act$val) } ## ----------------------------------------------------------------------------- mtcars %>% expect_type("list") %>% expect_s3_class("data.frame") %>% expect_length(11) ## ----------------------------------------------------------------------------- expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object), arg = "object") act$n <- length(act$val) if (act$n == n) { succeed() return(invisible(act$val)) } message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n) fail(message) } ## ----------------------------------------------------------------------------- test_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) testthat/inst/doc/custom-expectation.Rmd0000644000176200001440000000644313564563701020154 0ustar liggesusers--- title: "Custom expectations" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Custom expectations} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` This vignette shows you how to create custom expectations that work identically to the built-in `expect_` functions. ## Creating an expectation There are three main parts to writing an expectation, as illustrated by `expect_length()`: ```{r} expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object), arg = "object") # 2. Call expect() act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) # 3. Invisibly return the value invisible(act$val) } ``` ## Quasi-labelling The first step in any expectation is to capture the actual object, and generate a label for it to use if a failure occur. All testthat expectations support quasiquotation so that you can unquote variables. This makes it easier to generate good labels when the expectation is called from a function or within a for loop. By convention, the first argument to every `expect_` function is called `object`, and you capture it's value (`val`) and label (`lab`) with `act <- quasi_label(enquo(object))`, where `act` is short for actual. ### Verify the expectation Next, you should verify the expectation. This often involves a little computation (here just figuring out the `length`), and you should typically store the results back into the `act` object. Next you call `expect()`. This has two arguments: 1. `ok`: was the expectation successful? This is usually easy to write 2. `failure_message`: What informative error message should be reported to the user so that they can diagnose the problem. This is often hard to write! For historical reasons, most built-in expectations generate these with `sprintf()`, but today I'd recommend using the [glue](http://glue.tidyverse.org) package ### Invisibly return the input Expectation functions are called primarily for their side-effects (triggering a failure), so should invisibly return their input, `act$val`. This allows expectations to be chained: ```{r} mtcars %>% expect_type("list") %>% expect_s3_class("data.frame") %>% expect_length(11) ``` ## `succeed()` and `fail()` For expectations with more complex logic governing when success or failure occurs, you can use `succeed()` and `fail()`. These are simple wrappers around `expect()` that allow you to write code that looks like this: ```{r} expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object), arg = "object") act$n <- length(act$val) if (act$n == n) { succeed() return(invisible(act$val)) } message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n) fail(message) } ``` ## Testing your expectations Use the expectations `expect_success()` and `expect_failure()` to test your expectation. ```{r} test_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) ``` testthat/inst/doc/custom-expectation.html0000644000176200001440000003711513564761037020400 0ustar liggesusers Custom expectations

Custom expectations

This vignette shows you how to create custom expectations that work identically to the built-in expect_ functions.

Creating an expectation

There are three main parts to writing an expectation, as illustrated by expect_length():

expect_length <- function(object, n) {
  # 1. Capture object and label
  act <- quasi_label(rlang::enquo(object), arg = "object")

  # 2. Call expect()
  act$n <- length(act$val)
  expect(
    act$n == n,
    sprintf("%s has length %i, not length %i.", act$lab, act$n, n)
  )

  # 3. Invisibly return the value
  invisible(act$val)
}

Quasi-labelling

The first step in any expectation is to capture the actual object, and generate a label for it to use if a failure occur. All testthat expectations support quasiquotation so that you can unquote variables. This makes it easier to generate good labels when the expectation is called from a function or within a for loop.

By convention, the first argument to every expect_ function is called object, and you capture it’s value (val) and label (lab) with act <- quasi_label(enquo(object)), where act is short for actual.

Verify the expectation

Next, you should verify the expectation. This often involves a little computation (here just figuring out the length), and you should typically store the results back into the act object.

Next you call expect(). This has two arguments:

  1. ok: was the expectation successful? This is usually easy to write

  2. failure_message: What informative error message should be reported to the user so that they can diagnose the problem. This is often hard to write!

    For historical reasons, most built-in expectations generate these with sprintf(), but today I’d recommend using the glue package

Invisibly return the input

Expectation functions are called primarily for their side-effects (triggering a failure), so should invisibly return their input, act$val. This allows expectations to be chained:

mtcars %>%
  expect_type("list") %>%
  expect_s3_class("data.frame") %>% 
  expect_length(11)

succeed() and fail()

For expectations with more complex logic governing when success or failure occurs, you can use succeed() and fail(). These are simple wrappers around expect() that allow you to write code that looks like this:

expect_length <- function(object, n) {
  act <- quasi_label(rlang::enquo(object), arg = "object")

  act$n <- length(act$val)
  if (act$n == n) {
    succeed()
    return(invisible(act$val))
  }

  message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n)
  fail(message)
}

Testing your expectations

Use the expectations expect_success() and expect_failure() to test your expectation.

test_that("length computed correctly", {
  expect_success(expect_length(1, 1))
  expect_failure(expect_length(1, 2), "has length 1, not length 2.")
  expect_success(expect_length(1:10, 10))
  expect_success(expect_length(letters[1:5], 5))
})
testthat/inst/resources/0000755000176200001440000000000013173620527015107 5ustar liggesuserstestthat/inst/resources/catch-routine-registration.R0000644000176200001440000000042613173076020022502 0ustar liggesusers# This dummy function definition is included with the package to ensure that # 'tools::package_native_routine_registration_skeleton()' generates the required # registration info for the 'run_testthat_tests' symbol. (function() { .Call("run_testthat_tests", PACKAGE = "%s") }) testthat/inst/resources/test-cpp.R0000644000176200001440000000012713137625470016773 0ustar liggesuserscontext("C++") test_that("Catch unit tests pass", { expect_cpp_tests_pass("%s") }) testthat/inst/resources/test-example.cpp0000644000176200001440000000216012666267031020225 0ustar liggesusers/* * This file uses the Catch unit testing library, alongside * testthat's simple bindings, to test a C++ function. * * For your own packages, ensure that your test files are * placed within the `src/` folder, and that you include * `LinkingTo: testthat` within your DESCRIPTION file. */ // All test files should include the // header file. #include // Normally this would be a function from your package's // compiled library -- you might instead just include a header // file providing the definition, and let R CMD INSTALL // handle building and linking. int twoPlusTwo() { return 2 + 2; } // Initialize a unit test context. This is similar to how you // might begin an R test file with 'context()', expect the // associated context should be wrapped in braced. context("Sample unit tests") { // The format for specifying tests is similar to that of // testthat's R functions. Use 'test_that()' to define a // unit test, and use 'expect_true()' and 'expect_false()' // to test the desired conditions. test_that("two plus two equals four") { expect_true(twoPlusTwo() == 4); } } testthat/inst/resources/test-runner.cpp0000644000176200001440000000036712666267031020112 0ustar liggesusers/* * Please do not edit this file -- it ensures that your package will export a * 'run_testthat_tests()' C routine that can be used to run the Catch unit tests * available in your package. */ #define TESTTHAT_TEST_RUNNER #include testthat/inst/include/0000755000176200001440000000000013137625470014522 5ustar liggesuserstestthat/inst/include/testthat.h0000644000176200001440000000003713137625470016533 0ustar liggesusers#include testthat/inst/include/testthat/0000755000176200001440000000000013570757322016365 5ustar liggesuserstestthat/inst/include/testthat/testthat.h0000644000176200001440000001075013347747707020411 0ustar liggesusers#ifndef TESTTHAT_HPP #define TESTTHAT_HPP #define TESTTHAT_TOKEN_PASTE_IMPL(__X__, __Y__) __X__ ## __Y__ #define TESTTHAT_TOKEN_PASTE(__X__, __Y__) TESTTHAT_TOKEN_PASTE_IMPL(__X__, __Y__) #define TESTTHAT_DISABLED_FUNCTION \ static void TESTTHAT_TOKEN_PASTE(testthat_disabled_test_, __LINE__) () /** * Conditionally enable or disable 'testthat' + 'Catch'. * Force 'testthat' to be enabled by defining TESTTHAT_ENABLED. * Force 'testthat' to be disabled by defining TESTTHAT_DISABLED. * TESTTHAT_DISABLED takes precedence. * 'testthat' is disabled on Solaris by default. * * Hide symbols containing static members on gcc, to work around issues * with DLL unload due to static members in inline functions. * https://github.com/r-lib/devtools/issues/1832 */ #if defined(__GNUC__) || defined(__clang__) # define TESTTHAT_ENABLED # define TESTTHAT_ATTRIBUTE_HIDDEN __attribute__ ((visibility("hidden"))) #else # define TESTTHAT_ATTRIBUTE_HIDDEN #endif #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__sun) || defined(__SVR4) # define TESTTHAT_DISABLED #endif #ifndef TESTTHAT_ENABLED # define TESTTHAT_DISABLED #endif #ifndef TESTTHAT_DISABLED # define CATCH_CONFIG_PREFIX_ALL # define CATCH_CONFIG_NOSTDOUT # ifdef TESTTHAT_TEST_RUNNER # define CATCH_CONFIG_RUNNER # endif # include // CHAR_MAX # include // EOF # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wparentheses" # endif namespace Catch { // Avoid 'R CMD check' warnings related to the use of 'std::rand()' and // 'std::srand()'. Since we don't call any Catch APIs that use these // functions, it suffices to just override them in the Catch namespace. inline void srand(unsigned) {} inline int rand() { return 42; } // Catch has calls to 'exit' on failure, which upsets R CMD check. // We won't bump into them during normal test execution so just override // it in the Catch namespace before we include 'catch'. inline void exit(int) throw() {} } # include "vendor/catch.h" // Implement an output stream that avoids writing to stdout / stderr. extern "C" void Rprintf(const char*, ...); extern "C" void R_FlushConsole(); namespace testthat { class r_streambuf : public std::streambuf { public: r_streambuf() {} protected: virtual std::streamsize xsputn(const char* s, std::streamsize n) { if (n == 1) Rprintf("%c", *s); else Rprintf("%.*s", n, s); return n; } virtual int overflow(int c = EOF) { if (c == EOF) return c; if (c > CHAR_MAX) return c; Rprintf("%c", (char) c); return c; } virtual int sync() { R_FlushConsole(); return 0; } }; class r_ostream : public std::ostream { public: r_ostream() : std::ostream(new r_streambuf) {} ~r_ostream() { delete rdbuf(); } }; // Allow client packages to access the Catch::Session // exported by testthat. # ifdef CATCH_CONFIG_RUNNER TESTTHAT_ATTRIBUTE_HIDDEN inline Catch::Session& catchSession() { static Catch::Session instance; return instance; } inline bool run_tests() { return catchSession().run() == 0; } # endif // CATCH_CONFIG_RUNNER } // namespace testthat namespace Catch { TESTTHAT_ATTRIBUTE_HIDDEN inline std::ostream& cout() { static testthat::r_ostream instance; return instance; } TESTTHAT_ATTRIBUTE_HIDDEN inline std::ostream& cerr() { static testthat::r_ostream instance; return instance; } } // namespace Catch # ifdef TESTTHAT_TEST_RUNNER // ERROR will be redefined by R; avoid compiler warnings # ifdef ERROR # undef ERROR # endif # include # include extern "C" SEXP run_testthat_tests() { bool success = testthat::run_tests(); return ScalarLogical(success); } # endif // TESTTHAT_TEST_RUNNER # define context(__X__) CATCH_TEST_CASE(__X__ " | " __FILE__) # define test_that CATCH_SECTION # define expect_true CATCH_CHECK # define expect_false CATCH_CHECK_FALSE # define expect_error CATCH_CHECK_THROWS # define expect_error_as CATCH_CHECK_THROWS_AS #else // TESTTHAT_DISABLED # define context(__X__) TESTTHAT_DISABLED_FUNCTION # define test_that(__X__) if (false) # define expect_true(__X__) (void) (__X__) # define expect_false(__X__) (void) (__X__) # define expect_error(__X__) (void) (__X__) # define expect_error_as(__X__, __Y__) (void) (__X__) # ifdef TESTTHAT_TEST_RUNNER # include # include extern "C" SEXP run_testthat_tests() { return ScalarLogical(true); } # endif // TESTTHAT_TEST_RUNNER #endif // TESTTHAT_DISABLED #endif /* TESTTHAT_HPP */ testthat/inst/include/testthat/vendor/0000755000176200001440000000000013521025554017651 5ustar liggesuserstestthat/inst/include/testthat/vendor/catch.h0000644000176200001440000146535313521025554021125 0ustar liggesusers/* * Catch v1.9.6 * Generated: 2017-06-27 12:19:54.557875 * ---------------------------------------------------------- * This file has been merged from multiple headers. Please don't edit it directly * Copyright (c) 2012 Two Blue Cubes Ltd. All rights reserved. * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED #define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED #define TWOBLUECUBES_CATCH_HPP_INCLUDED #ifdef __clang__ # pragma clang system_header #elif defined __GNUC__ # pragma GCC system_header #endif // #included from: internal/catch_suppress_warnings.h #ifdef __clang__ # ifdef __ICC // icpc defines the __clang__ macro # pragma warning(push) # pragma warning(disable: 161 1682) # else // __ICC # pragma clang diagnostic ignored "-Wglobal-constructors" # pragma clang diagnostic ignored "-Wvariadic-macros" # pragma clang diagnostic ignored "-Wc99-extensions" # pragma clang diagnostic ignored "-Wunused-variable" # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" # pragma clang diagnostic ignored "-Wc++98-compat" # pragma clang diagnostic ignored "-Wc++98-compat-pedantic" # pragma clang diagnostic ignored "-Wswitch-enum" # pragma clang diagnostic ignored "-Wcovered-switch-default" # endif #elif defined __GNUC__ # pragma GCC diagnostic ignored "-Wvariadic-macros" # pragma GCC diagnostic ignored "-Wunused-variable" # pragma GCC diagnostic ignored "-Wparentheses" # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wpadded" #endif #if defined(CATCH_CONFIG_MAIN) || defined(CATCH_CONFIG_RUNNER) # define CATCH_IMPL #endif #ifdef CATCH_IMPL # ifndef CLARA_CONFIG_MAIN # define CLARA_CONFIG_MAIN_NOT_DEFINED # define CLARA_CONFIG_MAIN # endif #endif // #included from: internal/catch_notimplemented_exception.h #define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_H_INCLUDED // #included from: catch_common.h #define TWOBLUECUBES_CATCH_COMMON_H_INCLUDED // #included from: catch_compiler_capabilities.h #define TWOBLUECUBES_CATCH_COMPILER_CAPABILITIES_HPP_INCLUDED // Detect a number of compiler features - mostly C++11/14 conformance - by compiler // The following features are defined: // // CATCH_CONFIG_CPP11_NULLPTR : is nullptr supported? // CATCH_CONFIG_CPP11_NOEXCEPT : is noexcept supported? // CATCH_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods // CATCH_CONFIG_CPP11_IS_ENUM : std::is_enum is supported? // CATCH_CONFIG_CPP11_TUPLE : std::tuple is supported // CATCH_CONFIG_CPP11_LONG_LONG : is long long supported? // CATCH_CONFIG_CPP11_OVERRIDE : is override supported? // CATCH_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) // CATCH_CONFIG_CPP11_SHUFFLE : is std::shuffle supported? // CATCH_CONFIG_CPP11_TYPE_TRAITS : are type_traits and enable_if supported? // CATCH_CONFIG_CPP11_OR_GREATER : Is C++11 supported? // CATCH_CONFIG_VARIADIC_MACROS : are variadic macros supported? // CATCH_CONFIG_COUNTER : is the __COUNTER__ macro supported? // CATCH_CONFIG_WINDOWS_SEH : is Windows SEH supported? // CATCH_CONFIG_POSIX_SIGNALS : are POSIX signals supported? // **************** // Note to maintainers: if new toggles are added please document them // in configuration.md, too // **************** // In general each macro has a _NO_ form // (e.g. CATCH_CONFIG_CPP11_NO_NULLPTR) which disables the feature. // Many features, at point of detection, define an _INTERNAL_ macro, so they // can be combined, en-mass, with the _NO_ forms later. // All the C++11 features can be disabled with CATCH_CONFIG_NO_CPP11 #ifdef __cplusplus # if __cplusplus >= 201103L # define CATCH_CPP11_OR_GREATER # endif # if __cplusplus >= 201402L # define CATCH_CPP14_OR_GREATER # endif #endif #ifdef __clang__ # if __has_feature(cxx_nullptr) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif # if __has_feature(cxx_noexcept) # define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # endif # if defined(CATCH_CPP11_OR_GREATER) # define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ _Pragma( "clang diagnostic push" ) \ _Pragma( "clang diagnostic ignored \"-Wexit-time-destructors\"" ) # define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ _Pragma( "clang diagnostic pop" ) # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ _Pragma( "clang diagnostic push" ) \ _Pragma( "clang diagnostic ignored \"-Wparentheses\"" ) # define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ _Pragma( "clang diagnostic pop" ) # endif #endif // __clang__ //////////////////////////////////////////////////////////////////////////////// // We know some environments not to support full POSIX signals #if defined(__CYGWIN__) || defined(__QNX__) # if !defined(CATCH_CONFIG_POSIX_SIGNALS) # define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS # endif #endif //////////////////////////////////////////////////////////////////////////////// // Cygwin #ifdef __CYGWIN__ // Required for some versions of Cygwin to declare gettimeofday // see: http://stackoverflow.com/questions/36901803/gettimeofday-not-declared-in-this-scope-cygwin # define _BSD_SOURCE #endif // __CYGWIN__ //////////////////////////////////////////////////////////////////////////////// // Borland #ifdef __BORLANDC__ #endif // __BORLANDC__ //////////////////////////////////////////////////////////////////////////////// // EDG #ifdef __EDG_VERSION__ #endif // __EDG_VERSION__ //////////////////////////////////////////////////////////////////////////////// // Digital Mars #ifdef __DMC__ #endif // __DMC__ //////////////////////////////////////////////////////////////////////////////// // GCC #ifdef __GNUC__ # if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif // - otherwise more recent versions define __cplusplus >= 201103L // and will get picked up below #endif // __GNUC__ //////////////////////////////////////////////////////////////////////////////// // Visual C++ #ifdef _MSC_VER #define CATCH_INTERNAL_CONFIG_WINDOWS_SEH #if (_MSC_VER >= 1600) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) #define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE #define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS #endif #endif // _MSC_VER //////////////////////////////////////////////////////////////////////////////// // Use variadic macros if the compiler supports them #if ( defined _MSC_VER && _MSC_VER > 1400 && !defined __EDGE__) || \ ( defined __WAVE__ && __WAVE_HAS_VARIADICS ) || \ ( defined __GNUC__ && __GNUC__ >= 3 ) || \ ( !defined __cplusplus && __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L ) #define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS #endif // Use __COUNTER__ if the compiler supports it #if ( defined _MSC_VER && _MSC_VER >= 1300 ) || \ ( defined __GNUC__ && ( __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3 )) ) || \ ( defined __clang__ && __clang_major__ >= 3 ) #define CATCH_INTERNAL_CONFIG_COUNTER #endif //////////////////////////////////////////////////////////////////////////////// // C++ language feature support // catch all support for C++11 #if defined(CATCH_CPP11_OR_GREATER) # if !defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS # define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM # define CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_TUPLE # define CATCH_INTERNAL_CONFIG_CPP11_TUPLE # endif # ifndef CATCH_INTERNAL_CONFIG_VARIADIC_MACROS # define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) # define CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) # define CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) # define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) # define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) # define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS # endif #endif // __cplusplus >= 201103L // Now set the actual defines based on the above + anything the user has configured #if defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NO_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_NULLPTR #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_NOEXCEPT #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_GENERATED_METHODS #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_NO_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_IS_ENUM #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_CPP11_NO_TUPLE) && !defined(CATCH_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_TUPLE #endif #if defined(CATCH_INTERNAL_CONFIG_VARIADIC_MACROS) && !defined(CATCH_CONFIG_NO_VARIADIC_MACROS) && !defined(CATCH_CONFIG_VARIADIC_MACROS) # define CATCH_CONFIG_VARIADIC_MACROS #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_NO_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_LONG_LONG #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_NO_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_OVERRIDE #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_NO_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_UNIQUE_PTR #endif // Use of __COUNTER__ is suppressed if __JETBRAINS_IDE__ is #defined (meaning we're being parsed by a JetBrains IDE for // analytics) because, at time of writing, __COUNTER__ is not properly handled by it. // This does not affect compilation #if defined(CATCH_INTERNAL_CONFIG_COUNTER) && !defined(CATCH_CONFIG_NO_COUNTER) && !defined(CATCH_CONFIG_COUNTER) && !defined(__JETBRAINS_IDE__) # define CATCH_CONFIG_COUNTER #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_NO_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_SHUFFLE #endif # if defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_NO_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_TYPE_TRAITS # endif #if defined(CATCH_INTERNAL_CONFIG_WINDOWS_SEH) && !defined(CATCH_CONFIG_NO_WINDOWS_SEH) && !defined(CATCH_CONFIG_WINDOWS_SEH) # define CATCH_CONFIG_WINDOWS_SEH #endif // This is set by default, because we assume that unix compilers are posix-signal-compatible by default. #if !defined(CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_POSIX_SIGNALS) # define CATCH_CONFIG_POSIX_SIGNALS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS # define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS # define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #endif // noexcept support: #if defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_NOEXCEPT) # define CATCH_NOEXCEPT noexcept # define CATCH_NOEXCEPT_IS(x) noexcept(x) #else # define CATCH_NOEXCEPT throw() # define CATCH_NOEXCEPT_IS(x) #endif // nullptr support #ifdef CATCH_CONFIG_CPP11_NULLPTR # define CATCH_NULL nullptr #else # define CATCH_NULL NULL #endif // override support #ifdef CATCH_CONFIG_CPP11_OVERRIDE # define CATCH_OVERRIDE override #else # define CATCH_OVERRIDE #endif // unique_ptr support #ifdef CATCH_CONFIG_CPP11_UNIQUE_PTR # define CATCH_AUTO_PTR( T ) std::unique_ptr #else # define CATCH_AUTO_PTR( T ) std::auto_ptr #endif #define INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) name##line #define INTERNAL_CATCH_UNIQUE_NAME_LINE( name, line ) INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) #ifdef CATCH_CONFIG_COUNTER # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __COUNTER__ ) #else # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ ) #endif #define INTERNAL_CATCH_STRINGIFY2( expr ) #expr #define INTERNAL_CATCH_STRINGIFY( expr ) INTERNAL_CATCH_STRINGIFY2( expr ) #include #include namespace Catch { struct IConfig; struct CaseSensitive { enum Choice { Yes, No }; }; class NonCopyable { #ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS NonCopyable( NonCopyable const& ) = delete; NonCopyable( NonCopyable && ) = delete; NonCopyable& operator = ( NonCopyable const& ) = delete; NonCopyable& operator = ( NonCopyable && ) = delete; #else NonCopyable( NonCopyable const& info ); NonCopyable& operator = ( NonCopyable const& ); #endif protected: NonCopyable() {} virtual ~NonCopyable(); }; class SafeBool { public: typedef void (SafeBool::*type)() const; static type makeSafe( bool value ) { return value ? &SafeBool::trueValue : 0; } private: void trueValue() const {} }; template inline void deleteAll( ContainerT& container ) { typename ContainerT::const_iterator it = container.begin(); typename ContainerT::const_iterator itEnd = container.end(); for(; it != itEnd; ++it ) delete *it; } template inline void deleteAllValues( AssociativeContainerT& container ) { typename AssociativeContainerT::const_iterator it = container.begin(); typename AssociativeContainerT::const_iterator itEnd = container.end(); for(; it != itEnd; ++it ) delete it->second; } bool startsWith( std::string const& s, std::string const& prefix ); bool startsWith( std::string const& s, char prefix ); bool endsWith( std::string const& s, std::string const& suffix ); bool endsWith( std::string const& s, char suffix ); bool contains( std::string const& s, std::string const& infix ); void toLowerInPlace( std::string& s ); std::string toLower( std::string const& s ); std::string trim( std::string const& str ); bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ); struct pluralise { pluralise( std::size_t count, std::string const& label ); friend std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ); std::size_t m_count; std::string m_label; }; struct SourceLineInfo { SourceLineInfo(); SourceLineInfo( char const* _file, std::size_t _line ); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS SourceLineInfo(SourceLineInfo const& other) = default; SourceLineInfo( SourceLineInfo && ) = default; SourceLineInfo& operator = ( SourceLineInfo const& ) = default; SourceLineInfo& operator = ( SourceLineInfo && ) = default; # endif bool empty() const; bool operator == ( SourceLineInfo const& other ) const; bool operator < ( SourceLineInfo const& other ) const; char const* file; std::size_t line; }; std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ); // This is just here to avoid compiler warnings with macro constants and boolean literals inline bool isTrue( bool value ){ return value; } inline bool alwaysTrue() { return true; } inline bool alwaysFalse() { return false; } void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ); void seedRng( IConfig const& config ); unsigned int rngSeed(); // Use this in variadic streaming macros to allow // >> +StreamEndStop // as well as // >> stuff +StreamEndStop struct StreamEndStop { std::string operator+() { return std::string(); } }; template T const& operator + ( T const& value, StreamEndStop ) { return value; } } #define CATCH_INTERNAL_LINEINFO ::Catch::SourceLineInfo( __FILE__, static_cast( __LINE__ ) ) #define CATCH_INTERNAL_ERROR( msg ) ::Catch::throwLogicError( msg, CATCH_INTERNAL_LINEINFO ); namespace Catch { class NotImplementedException : public std::exception { public: NotImplementedException( SourceLineInfo const& lineInfo ); NotImplementedException( NotImplementedException const& ) {} virtual ~NotImplementedException() CATCH_NOEXCEPT {} virtual const char* what() const CATCH_NOEXCEPT; private: std::string m_what; SourceLineInfo m_lineInfo; }; } // end namespace Catch /////////////////////////////////////////////////////////////////////////////// #define CATCH_NOT_IMPLEMENTED throw Catch::NotImplementedException( CATCH_INTERNAL_LINEINFO ) // #included from: internal/catch_context.h #define TWOBLUECUBES_CATCH_CONTEXT_H_INCLUDED // #included from: catch_interfaces_generators.h #define TWOBLUECUBES_CATCH_INTERFACES_GENERATORS_H_INCLUDED #include namespace Catch { struct IGeneratorInfo { virtual ~IGeneratorInfo(); virtual bool moveNext() = 0; virtual std::size_t getCurrentIndex() const = 0; }; struct IGeneratorsForTest { virtual ~IGeneratorsForTest(); virtual IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) = 0; virtual bool moveNext() = 0; }; IGeneratorsForTest* createGeneratorsForTest(); } // end namespace Catch // #included from: catch_ptr.hpp #define TWOBLUECUBES_CATCH_PTR_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif namespace Catch { // An intrusive reference counting smart pointer. // T must implement addRef() and release() methods // typically implementing the IShared interface template class Ptr { public: Ptr() : m_p( CATCH_NULL ){} Ptr( T* p ) : m_p( p ){ if( m_p ) m_p->addRef(); } Ptr( Ptr const& other ) : m_p( other.m_p ){ if( m_p ) m_p->addRef(); } ~Ptr(){ if( m_p ) m_p->release(); } void reset() { if( m_p ) m_p->release(); m_p = CATCH_NULL; } Ptr& operator = ( T* p ){ Ptr temp( p ); swap( temp ); return *this; } Ptr& operator = ( Ptr const& other ){ Ptr temp( other ); swap( temp ); return *this; } void swap( Ptr& other ) { std::swap( m_p, other.m_p ); } T* get() const{ return m_p; } T& operator*() const { return *m_p; } T* operator->() const { return m_p; } bool operator !() const { return m_p == CATCH_NULL; } operator SafeBool::type() const { return SafeBool::makeSafe( m_p != CATCH_NULL ); } private: T* m_p; }; struct IShared : NonCopyable { virtual ~IShared(); virtual void addRef() const = 0; virtual void release() const = 0; }; template struct SharedImpl : T { SharedImpl() : m_rc( 0 ){} virtual void addRef() const { ++m_rc; } virtual void release() const { if( --m_rc == 0 ) delete this; } mutable unsigned int m_rc; }; } // end namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif namespace Catch { class TestCase; class Stream; struct IResultCapture; struct IRunner; struct IGeneratorsForTest; struct IConfig; struct IContext { virtual ~IContext(); virtual IResultCapture* getResultCapture() = 0; virtual IRunner* getRunner() = 0; virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) = 0; virtual bool advanceGeneratorsForCurrentTest() = 0; virtual Ptr getConfig() const = 0; }; struct IMutableContext : IContext { virtual ~IMutableContext(); virtual void setResultCapture( IResultCapture* resultCapture ) = 0; virtual void setRunner( IRunner* runner ) = 0; virtual void setConfig( Ptr const& config ) = 0; }; IContext& getCurrentContext(); IMutableContext& getCurrentMutableContext(); void cleanUpContext(); Stream createStream( std::string const& streamName ); } // #included from: internal/catch_test_registry.hpp #define TWOBLUECUBES_CATCH_TEST_REGISTRY_HPP_INCLUDED // #included from: catch_interfaces_testcase.h #define TWOBLUECUBES_CATCH_INTERFACES_TESTCASE_H_INCLUDED #include namespace Catch { class TestSpec; struct ITestCase : IShared { virtual void invoke () const = 0; protected: virtual ~ITestCase(); }; class TestCase; struct IConfig; struct ITestCaseRegistry { virtual ~ITestCaseRegistry(); virtual std::vector const& getAllTests() const = 0; virtual std::vector const& getAllTestsSorted( IConfig const& config ) const = 0; }; bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ); std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ); std::vector const& getAllTestCasesSorted( IConfig const& config ); } namespace Catch { template class MethodTestCase : public SharedImpl { public: MethodTestCase( void (C::*method)() ) : m_method( method ) {} virtual void invoke() const { C obj; (obj.*m_method)(); } private: virtual ~MethodTestCase() {} void (C::*m_method)(); }; typedef void(*TestFunction)(); struct NameAndDesc { NameAndDesc( const char* _name = "", const char* _description= "" ) : name( _name ), description( _description ) {} const char* name; const char* description; }; void registerTestCase ( ITestCase* testCase, char const* className, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ); struct AutoReg { AutoReg ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ); template AutoReg ( void (C::*method)(), char const* className, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ) { registerTestCase ( new MethodTestCase( method ), className, nameAndDesc, lineInfo ); } ~AutoReg(); private: AutoReg( AutoReg const& ); void operator= ( AutoReg const& ); }; void registerTestCaseFunction ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ); } // end namespace Catch #ifdef CATCH_CONFIG_VARIADIC_MACROS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TESTCASE2( TestName, ... ) \ static void TestName(); \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ static void TestName() #define INTERNAL_CATCH_TESTCASE( ... ) \ INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), __VA_ARGS__ ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, ... ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestName, ClassName, ... )\ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ \ struct TestName : ClassName{ \ void test(); \ }; \ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestName::test, #ClassName, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); \ } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ void TestName::test() #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, ... ) \ INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, __VA_ARGS__ ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, ... ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #else /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TESTCASE2( TestName, Name, Desc ) \ static void TestName(); \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); }\ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ static void TestName() #define INTERNAL_CATCH_TESTCASE( Name, Desc ) \ INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), Name, Desc ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, Name, Desc ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( Name, Desc ), CATCH_INTERNAL_LINEINFO ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestCaseName, ClassName, TestName, Desc )\ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ \ struct TestCaseName : ClassName{ \ void test(); \ }; \ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestCaseName::test, #ClassName, Catch::NameAndDesc( TestName, Desc ), CATCH_INTERNAL_LINEINFO ); \ } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ void TestCaseName::test() #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, TestName, Desc )\ INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, TestName, Desc ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, Name, Desc ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #endif // #included from: internal/catch_capture.hpp #define TWOBLUECUBES_CATCH_CAPTURE_HPP_INCLUDED // #included from: catch_result_builder.h #define TWOBLUECUBES_CATCH_RESULT_BUILDER_H_INCLUDED // #included from: catch_result_type.h #define TWOBLUECUBES_CATCH_RESULT_TYPE_H_INCLUDED namespace Catch { // ResultWas::OfType enum struct ResultWas { enum OfType { Unknown = -1, Ok = 0, Info = 1, Warning = 2, FailureBit = 0x10, ExpressionFailed = FailureBit | 1, ExplicitFailure = FailureBit | 2, Exception = 0x100 | FailureBit, ThrewException = Exception | 1, DidntThrowException = Exception | 2, FatalErrorCondition = 0x200 | FailureBit }; }; inline bool isOk( ResultWas::OfType resultType ) { return ( resultType & ResultWas::FailureBit ) == 0; } inline bool isJustInfo( int flags ) { return flags == ResultWas::Info; } // ResultDisposition::Flags enum struct ResultDisposition { enum Flags { Normal = 0x01, ContinueOnFailure = 0x02, // Failures fail test, but execution continues FalseTest = 0x04, // Prefix expression with ! SuppressFail = 0x08 // Failures are reported but do not fail the test }; }; inline ResultDisposition::Flags operator | ( ResultDisposition::Flags lhs, ResultDisposition::Flags rhs ) { return static_cast( static_cast( lhs ) | static_cast( rhs ) ); } inline bool shouldContinueOnFailure( int flags ) { return ( flags & ResultDisposition::ContinueOnFailure ) != 0; } inline bool isFalseTest( int flags ) { return ( flags & ResultDisposition::FalseTest ) != 0; } inline bool shouldSuppressFailure( int flags ) { return ( flags & ResultDisposition::SuppressFail ) != 0; } } // end namespace Catch // #included from: catch_assertionresult.h #define TWOBLUECUBES_CATCH_ASSERTIONRESULT_H_INCLUDED #include namespace Catch { struct STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison; struct DecomposedExpression { virtual ~DecomposedExpression() {} virtual bool isBinaryExpression() const { return false; } virtual void reconstructExpression( std::string& dest ) const = 0; // Only simple binary comparisons can be decomposed. // If more complex check is required then wrap sub-expressions in parentheses. template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator + ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator - ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator * ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator / ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator % ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator && ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator || ( T const& ); private: DecomposedExpression& operator = (DecomposedExpression const&); }; struct AssertionInfo { AssertionInfo() {} AssertionInfo( char const * _macroName, SourceLineInfo const& _lineInfo, char const * _capturedExpression, ResultDisposition::Flags _resultDisposition, char const * _secondArg = ""); char const * macroName; SourceLineInfo lineInfo; char const * capturedExpression; ResultDisposition::Flags resultDisposition; char const * secondArg; }; struct AssertionResultData { AssertionResultData() : decomposedExpression( CATCH_NULL ) , resultType( ResultWas::Unknown ) , negated( false ) , parenthesized( false ) {} void negate( bool parenthesize ) { negated = !negated; parenthesized = parenthesize; if( resultType == ResultWas::Ok ) resultType = ResultWas::ExpressionFailed; else if( resultType == ResultWas::ExpressionFailed ) resultType = ResultWas::Ok; } std::string const& reconstructExpression() const { if( decomposedExpression != CATCH_NULL ) { decomposedExpression->reconstructExpression( reconstructedExpression ); if( parenthesized ) { reconstructedExpression.insert( 0, 1, '(' ); reconstructedExpression.append( 1, ')' ); } if( negated ) { reconstructedExpression.insert( 0, 1, '!' ); } decomposedExpression = CATCH_NULL; } return reconstructedExpression; } mutable DecomposedExpression const* decomposedExpression; mutable std::string reconstructedExpression; std::string message; ResultWas::OfType resultType; bool negated; bool parenthesized; }; class AssertionResult { public: AssertionResult(); AssertionResult( AssertionInfo const& info, AssertionResultData const& data ); ~AssertionResult(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS AssertionResult( AssertionResult const& ) = default; AssertionResult( AssertionResult && ) = default; AssertionResult& operator = ( AssertionResult const& ) = default; AssertionResult& operator = ( AssertionResult && ) = default; # endif bool isOk() const; bool succeeded() const; ResultWas::OfType getResultType() const; bool hasExpression() const; bool hasMessage() const; std::string getExpression() const; std::string getExpressionInMacro() const; bool hasExpandedExpression() const; std::string getExpandedExpression() const; std::string getMessage() const; SourceLineInfo getSourceInfo() const; std::string getTestMacroName() const; void discardDecomposedExpression() const; void expandDecomposedExpression() const; protected: AssertionInfo m_info; AssertionResultData m_resultData; }; } // end namespace Catch // #included from: catch_matchers.hpp #define TWOBLUECUBES_CATCH_MATCHERS_HPP_INCLUDED namespace Catch { namespace Matchers { namespace Impl { template struct MatchAllOf; template struct MatchAnyOf; template struct MatchNotOf; class MatcherUntypedBase { public: std::string toString() const { if( m_cachedToString.empty() ) m_cachedToString = describe(); return m_cachedToString; } protected: virtual ~MatcherUntypedBase(); virtual std::string describe() const = 0; mutable std::string m_cachedToString; private: MatcherUntypedBase& operator = ( MatcherUntypedBase const& ); }; template struct MatcherMethod { virtual bool match( ObjectT const& arg ) const = 0; }; template struct MatcherMethod { virtual bool match( PtrT* arg ) const = 0; }; template struct MatcherBase : MatcherUntypedBase, MatcherMethod { MatchAllOf operator && ( MatcherBase const& other ) const; MatchAnyOf operator || ( MatcherBase const& other ) const; MatchNotOf operator ! () const; }; template struct MatchAllOf : MatcherBase { virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if (!m_matchers[i]->match(arg)) return false; } return true; } virtual std::string describe() const CATCH_OVERRIDE { std::string description; description.reserve( 4 + m_matchers.size()*32 ); description += "( "; for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if( i != 0 ) description += " and "; description += m_matchers[i]->toString(); } description += " )"; return description; } MatchAllOf& operator && ( MatcherBase const& other ) { m_matchers.push_back( &other ); return *this; } std::vector const*> m_matchers; }; template struct MatchAnyOf : MatcherBase { virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if (m_matchers[i]->match(arg)) return true; } return false; } virtual std::string describe() const CATCH_OVERRIDE { std::string description; description.reserve( 4 + m_matchers.size()*32 ); description += "( "; for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if( i != 0 ) description += " or "; description += m_matchers[i]->toString(); } description += " )"; return description; } MatchAnyOf& operator || ( MatcherBase const& other ) { m_matchers.push_back( &other ); return *this; } std::vector const*> m_matchers; }; template struct MatchNotOf : MatcherBase { MatchNotOf( MatcherBase const& underlyingMatcher ) : m_underlyingMatcher( underlyingMatcher ) {} virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { return !m_underlyingMatcher.match( arg ); } virtual std::string describe() const CATCH_OVERRIDE { return "not " + m_underlyingMatcher.toString(); } MatcherBase const& m_underlyingMatcher; }; template MatchAllOf MatcherBase::operator && ( MatcherBase const& other ) const { return MatchAllOf() && *this && other; } template MatchAnyOf MatcherBase::operator || ( MatcherBase const& other ) const { return MatchAnyOf() || *this || other; } template MatchNotOf MatcherBase::operator ! () const { return MatchNotOf( *this ); } } // namespace Impl // The following functions create the actual matcher objects. // This allows the types to be inferred // - deprecated: prefer ||, && and ! template inline Impl::MatchNotOf Not( Impl::MatcherBase const& underlyingMatcher ) { return Impl::MatchNotOf( underlyingMatcher ); } template inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { return Impl::MatchAllOf() && m1 && m2; } template inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { return Impl::MatchAllOf() && m1 && m2 && m3; } template inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { return Impl::MatchAnyOf() || m1 || m2; } template inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { return Impl::MatchAnyOf() || m1 || m2 || m3; } } // namespace Matchers using namespace Matchers; using Matchers::Impl::MatcherBase; } // namespace Catch namespace Catch { struct TestFailureException{}; template class ExpressionLhs; struct CopyableStream { CopyableStream() {} CopyableStream( CopyableStream const& other ) { oss << other.oss.str(); } CopyableStream& operator=( CopyableStream const& other ) { oss.str(std::string()); oss << other.oss.str(); return *this; } std::ostringstream oss; }; class ResultBuilder : public DecomposedExpression { public: ResultBuilder( char const* macroName, SourceLineInfo const& lineInfo, char const* capturedExpression, ResultDisposition::Flags resultDisposition, char const* secondArg = "" ); ~ResultBuilder(); template ExpressionLhs operator <= ( T const& operand ); ExpressionLhs operator <= ( bool value ); template ResultBuilder& operator << ( T const& value ) { m_stream().oss << value; return *this; } ResultBuilder& setResultType( ResultWas::OfType result ); ResultBuilder& setResultType( bool result ); void endExpression( DecomposedExpression const& expr ); virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE; AssertionResult build() const; AssertionResult build( DecomposedExpression const& expr ) const; void useActiveException( ResultDisposition::Flags resultDisposition = ResultDisposition::Normal ); void captureResult( ResultWas::OfType resultType ); void captureExpression(); void captureExpectedException( std::string const& expectedMessage ); void captureExpectedException( Matchers::Impl::MatcherBase const& matcher ); void handleResult( AssertionResult const& result ); void react(); bool shouldDebugBreak() const; bool allowThrows() const; template void captureMatch( ArgT const& arg, MatcherT const& matcher, char const* matcherString ); void setExceptionGuard(); void unsetExceptionGuard(); private: AssertionInfo m_assertionInfo; AssertionResultData m_data; static CopyableStream &m_stream() { static CopyableStream s; return s; } bool m_shouldDebugBreak; bool m_shouldThrow; bool m_guardException; }; } // namespace Catch // Include after due to circular dependency: // #included from: catch_expression_lhs.hpp #define TWOBLUECUBES_CATCH_EXPRESSION_LHS_HPP_INCLUDED // #included from: catch_evaluate.hpp #define TWOBLUECUBES_CATCH_EVALUATE_HPP_INCLUDED #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable:4389) // '==' : signed/unsigned mismatch # pragma warning(disable:4312) // Converting int to T* using reinterpret_cast (issue on x64 platform) #endif #include namespace Catch { namespace Internal { enum Operator { IsEqualTo, IsNotEqualTo, IsLessThan, IsGreaterThan, IsLessThanOrEqualTo, IsGreaterThanOrEqualTo }; template struct OperatorTraits { static const char* getName(){ return "*error*"; } }; template<> struct OperatorTraits { static const char* getName(){ return "=="; } }; template<> struct OperatorTraits { static const char* getName(){ return "!="; } }; template<> struct OperatorTraits { static const char* getName(){ return "<"; } }; template<> struct OperatorTraits { static const char* getName(){ return ">"; } }; template<> struct OperatorTraits { static const char* getName(){ return "<="; } }; template<> struct OperatorTraits{ static const char* getName(){ return ">="; } }; template inline T& opCast(T const& t) { return const_cast(t); } // nullptr_t support based on pull request #154 from Konstantin Baumann #ifdef CATCH_CONFIG_CPP11_NULLPTR inline std::nullptr_t opCast(std::nullptr_t) { return nullptr; } #endif // CATCH_CONFIG_CPP11_NULLPTR // So the compare overloads can be operator agnostic we convey the operator as a template // enum, which is used to specialise an Evaluator for doing the comparison. template class Evaluator{}; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs) { return bool( opCast( lhs ) == opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) != opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) < opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) > opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) >= opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) <= opCast( rhs ) ); } }; template bool applyEvaluator( T1 const& lhs, T2 const& rhs ) { return Evaluator::evaluate( lhs, rhs ); } // This level of indirection allows us to specialise for integer types // to avoid signed/ unsigned warnings // "base" overload template bool compare( T1 const& lhs, T2 const& rhs ) { return Evaluator::evaluate( lhs, rhs ); } // unsigned X to int template bool compare( unsigned int lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned long lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned char lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } // unsigned X to long template bool compare( unsigned int lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned long lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned char lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } // int to unsigned X template bool compare( int lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( int lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( int lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // long to unsigned X template bool compare( long lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // pointer to long (when comparing against NULL) template bool compare( long lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, long rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } // pointer to int (when comparing against NULL) template bool compare( int lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, int rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } #ifdef CATCH_CONFIG_CPP11_LONG_LONG // long long to unsigned X template bool compare( long long lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned long long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // unsigned long long to X template bool compare( unsigned long long lhs, int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, long long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // pointer to long long (when comparing against NULL) template bool compare( long long lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, long long rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } #endif // CATCH_CONFIG_CPP11_LONG_LONG #ifdef CATCH_CONFIG_CPP11_NULLPTR // pointer to nullptr_t (when comparing against nullptr) template bool compare( std::nullptr_t, T* rhs ) { return Evaluator::evaluate( nullptr, rhs ); } template bool compare( T* lhs, std::nullptr_t ) { return Evaluator::evaluate( lhs, nullptr ); } #endif // CATCH_CONFIG_CPP11_NULLPTR } // end of namespace Internal } // end of namespace Catch #ifdef _MSC_VER # pragma warning(pop) #endif // #included from: catch_tostring.h #define TWOBLUECUBES_CATCH_TOSTRING_H_INCLUDED #include #include #include #include #include #ifdef __OBJC__ // #included from: catch_objc_arc.hpp #define TWOBLUECUBES_CATCH_OBJC_ARC_HPP_INCLUDED #import #ifdef __has_feature #define CATCH_ARC_ENABLED __has_feature(objc_arc) #else #define CATCH_ARC_ENABLED 0 #endif void arcSafeRelease( NSObject* obj ); id performOptionalSelector( id obj, SEL sel ); #if !CATCH_ARC_ENABLED inline void arcSafeRelease( NSObject* obj ) { [obj release]; } inline id performOptionalSelector( id obj, SEL sel ) { if( [obj respondsToSelector: sel] ) return [obj performSelector: sel]; return nil; } #define CATCH_UNSAFE_UNRETAINED #define CATCH_ARC_STRONG #else inline void arcSafeRelease( NSObject* ){} inline id performOptionalSelector( id obj, SEL sel ) { #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Warc-performSelector-leaks" #endif if( [obj respondsToSelector: sel] ) return [obj performSelector: sel]; #ifdef __clang__ # pragma clang diagnostic pop #endif return nil; } #define CATCH_UNSAFE_UNRETAINED __unsafe_unretained #define CATCH_ARC_STRONG __strong #endif #endif #ifdef CATCH_CONFIG_CPP11_TUPLE #include #endif #ifdef CATCH_CONFIG_CPP11_IS_ENUM #include #endif namespace Catch { // Why we're here. template std::string toString( T const& value ); // Built in overloads std::string toString( std::string const& value ); std::string toString( std::wstring const& value ); std::string toString( const char* const value ); std::string toString( char* const value ); std::string toString( const wchar_t* const value ); std::string toString( wchar_t* const value ); std::string toString( int value ); std::string toString( unsigned long value ); std::string toString( unsigned int value ); std::string toString( const double value ); std::string toString( const float value ); std::string toString( bool value ); std::string toString( char value ); std::string toString( signed char value ); std::string toString( unsigned char value ); #ifdef CATCH_CONFIG_CPP11_LONG_LONG std::string toString( long long value ); std::string toString( unsigned long long value ); #endif #ifdef CATCH_CONFIG_CPP11_NULLPTR std::string toString( std::nullptr_t ); #endif #ifdef __OBJC__ std::string toString( NSString const * const& nsstring ); std::string toString( NSString * CATCH_ARC_STRONG & nsstring ); std::string toString( NSObject* const& nsObject ); #endif namespace Detail { extern const std::string unprintableString; #if !defined(CATCH_CONFIG_CPP11_STREAM_INSERTABLE_CHECK) struct BorgType { template BorgType( T const& ); }; struct TrueType { char sizer[1]; }; struct FalseType { char sizer[2]; }; TrueType& testStreamable( std::ostream& ); FalseType testStreamable( FalseType ); FalseType operator<<( std::ostream const&, BorgType const& ); template struct IsStreamInsertable { static std::ostream &s; static T const&t; enum { value = sizeof( testStreamable(s << t) ) == sizeof( TrueType ) }; }; #else template class IsStreamInsertable { template static auto test(int) -> decltype( std::declval() << std::declval(), std::true_type() ); template static auto test(...) -> std::false_type; public: static const bool value = decltype(test(0))::value; }; #endif #if defined(CATCH_CONFIG_CPP11_IS_ENUM) template::value > struct EnumStringMaker { static std::string convert( T const& ) { return unprintableString; } }; template struct EnumStringMaker { static std::string convert( T const& v ) { return ::Catch::toString( static_cast::type>(v) ); } }; #endif template struct StringMakerBase { #if defined(CATCH_CONFIG_CPP11_IS_ENUM) template static std::string convert( T const& v ) { return EnumStringMaker::convert( v ); } #else template static std::string convert( T const& ) { return unprintableString; } #endif }; template<> struct StringMakerBase { template static std::string convert( T const& _value ) { std::ostringstream oss; oss << _value; return oss.str(); } }; std::string rawMemoryToString( const void *object, std::size_t size ); template inline std::string rawMemoryToString( const T& object ) { return rawMemoryToString( &object, sizeof(object) ); } } // end namespace Detail template struct StringMaker : Detail::StringMakerBase::value> {}; template struct StringMaker { template static std::string convert( U* p ) { if( !p ) return "NULL"; else return Detail::rawMemoryToString( p ); } }; template struct StringMaker { static std::string convert( R C::* p ) { if( !p ) return "NULL"; else return Detail::rawMemoryToString( p ); } }; namespace Detail { template std::string rangeToString( InputIterator first, InputIterator last ); } //template //struct StringMaker > { // static std::string convert( std::vector const& v ) { // return Detail::rangeToString( v.begin(), v.end() ); // } //}; template std::string toString( std::vector const& v ) { return Detail::rangeToString( v.begin(), v.end() ); } #ifdef CATCH_CONFIG_CPP11_TUPLE // toString for tuples namespace TupleDetail { template< typename Tuple, std::size_t N = 0, bool = (N < std::tuple_size::value) > struct ElementPrinter { static void print( const Tuple& tuple, std::ostream& os ) { os << ( N ? ", " : " " ) << Catch::toString(std::get(tuple)); ElementPrinter::print(tuple,os); } }; template< typename Tuple, std::size_t N > struct ElementPrinter { static void print( const Tuple&, std::ostream& ) {} }; } template struct StringMaker> { static std::string convert( const std::tuple& tuple ) { std::ostringstream os; os << '{'; TupleDetail::ElementPrinter>::print( tuple, os ); os << " }"; return os.str(); } }; #endif // CATCH_CONFIG_CPP11_TUPLE namespace Detail { template std::string makeString( T const& value ) { return StringMaker::convert( value ); } } // end namespace Detail /// \brief converts any type to a string /// /// The default template forwards on to ostringstream - except when an /// ostringstream overload does not exist - in which case it attempts to detect /// that and writes {?}. /// Overload (not specialise) this template for custom typs that you don't want /// to provide an ostream overload for. template std::string toString( T const& value ) { return StringMaker::convert( value ); } namespace Detail { template std::string rangeToString( InputIterator first, InputIterator last ) { std::ostringstream oss; oss << "{ "; if( first != last ) { oss << Catch::toString( *first ); for( ++first ; first != last ; ++first ) oss << ", " << Catch::toString( *first ); } oss << " }"; return oss.str(); } } } // end namespace Catch namespace Catch { template class BinaryExpression; template class MatchExpression; // Wraps the LHS of an expression and overloads comparison operators // for also capturing those and RHS (if any) template class ExpressionLhs : public DecomposedExpression { public: ExpressionLhs( ResultBuilder& rb, T lhs ) : m_rb( rb ), m_lhs( lhs ), m_truthy(false) {} ExpressionLhs& operator = ( const ExpressionLhs& ); template BinaryExpression operator == ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator != ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator < ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator > ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator <= ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator >= ( RhsT const& rhs ) { return captureExpression( rhs ); } BinaryExpression operator == ( bool rhs ) { return captureExpression( rhs ); } BinaryExpression operator != ( bool rhs ) { return captureExpression( rhs ); } void endExpression() { m_truthy = m_lhs ? true : false; m_rb .setResultType( m_truthy ) .endExpression( *this ); } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { dest = Catch::toString( m_lhs ); } private: template BinaryExpression captureExpression( RhsT& rhs ) const { return BinaryExpression( m_rb, m_lhs, rhs ); } template BinaryExpression captureExpression( bool rhs ) const { return BinaryExpression( m_rb, m_lhs, rhs ); } private: ResultBuilder& m_rb; T m_lhs; bool m_truthy; }; template class BinaryExpression : public DecomposedExpression { public: BinaryExpression( ResultBuilder& rb, LhsT lhs, RhsT rhs ) : m_rb( rb ), m_lhs( lhs ), m_rhs( rhs ) {} BinaryExpression& operator = ( BinaryExpression& ); void endExpression() const { m_rb .setResultType( Internal::compare( m_lhs, m_rhs ) ) .endExpression( *this ); } virtual bool isBinaryExpression() const CATCH_OVERRIDE { return true; } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { std::string lhs = Catch::toString( m_lhs ); std::string rhs = Catch::toString( m_rhs ); char delim = lhs.size() + rhs.size() < 40 && lhs.find('\n') == std::string::npos && rhs.find('\n') == std::string::npos ? ' ' : '\n'; dest.reserve( 7 + lhs.size() + rhs.size() ); // 2 for spaces around operator // 2 for operator // 2 for parentheses (conditionally added later) // 1 for negation (conditionally added later) dest = lhs; dest += delim; dest += Internal::OperatorTraits::getName(); dest += delim; dest += rhs; } private: ResultBuilder& m_rb; LhsT m_lhs; RhsT m_rhs; }; template class MatchExpression : public DecomposedExpression { public: MatchExpression( ArgT arg, MatcherT matcher, char const* matcherString ) : m_arg( arg ), m_matcher( matcher ), m_matcherString( matcherString ) {} virtual bool isBinaryExpression() const CATCH_OVERRIDE { return true; } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { std::string matcherAsString = m_matcher.toString(); dest = Catch::toString( m_arg ); dest += ' '; if( matcherAsString == Detail::unprintableString ) dest += m_matcherString; else dest += matcherAsString; } private: ArgT m_arg; MatcherT m_matcher; char const* m_matcherString; }; } // end namespace Catch namespace Catch { template inline ExpressionLhs ResultBuilder::operator <= ( T const& operand ) { return ExpressionLhs( *this, operand ); } inline ExpressionLhs ResultBuilder::operator <= ( bool value ) { return ExpressionLhs( *this, value ); } template inline void ResultBuilder::captureMatch( ArgT const& arg, MatcherT const& matcher, char const* matcherString ) { MatchExpression expr( arg, matcher, matcherString ); setResultType( matcher.match( arg ) ); endExpression( expr ); } } // namespace Catch // #included from: catch_message.h #define TWOBLUECUBES_CATCH_MESSAGE_H_INCLUDED #include namespace Catch { struct MessageInfo { MessageInfo( std::string const& _macroName, SourceLineInfo const& _lineInfo, ResultWas::OfType _type ); std::string macroName; SourceLineInfo lineInfo; ResultWas::OfType type; std::string message; unsigned int sequence; bool operator == ( MessageInfo const& other ) const { return sequence == other.sequence; } bool operator < ( MessageInfo const& other ) const { return sequence < other.sequence; } private: static unsigned int globalCount; }; struct MessageBuilder { MessageBuilder( std::string const& macroName, SourceLineInfo const& lineInfo, ResultWas::OfType type ) : m_info( macroName, lineInfo, type ) {} template MessageBuilder& operator << ( T const& value ) { m_stream << value; return *this; } MessageInfo m_info; std::ostringstream m_stream; }; class ScopedMessage { public: ScopedMessage( MessageBuilder const& builder ); ScopedMessage( ScopedMessage const& other ); ~ScopedMessage(); MessageInfo m_info; }; } // end namespace Catch // #included from: catch_interfaces_capture.h #define TWOBLUECUBES_CATCH_INTERFACES_CAPTURE_H_INCLUDED #include namespace Catch { class TestCase; class AssertionResult; struct AssertionInfo; struct SectionInfo; struct SectionEndInfo; struct MessageInfo; class ScopedMessageBuilder; struct Counts; struct IResultCapture { virtual ~IResultCapture(); virtual void assertionEnded( AssertionResult const& result ) = 0; virtual bool sectionStarted( SectionInfo const& sectionInfo, Counts& assertions ) = 0; virtual void sectionEnded( SectionEndInfo const& endInfo ) = 0; virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) = 0; virtual void pushScopedMessage( MessageInfo const& message ) = 0; virtual void popScopedMessage( MessageInfo const& message ) = 0; virtual std::string getCurrentTestName() const = 0; virtual const AssertionResult* getLastResult() const = 0; virtual void exceptionEarlyReported() = 0; virtual void handleFatalErrorCondition( std::string const& message ) = 0; }; IResultCapture& getResultCapture(); } // #included from: catch_debugger.h #define TWOBLUECUBES_CATCH_DEBUGGER_H_INCLUDED // #included from: catch_platform.h #define TWOBLUECUBES_CATCH_PLATFORM_H_INCLUDED #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # define CATCH_PLATFORM_MAC #elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED) # define CATCH_PLATFORM_IPHONE #elif defined(linux) || defined(__linux) || defined(__linux__) # define CATCH_PLATFORM_LINUX #elif defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) # define CATCH_PLATFORM_WINDOWS # if !defined(NOMINMAX) && !defined(CATCH_CONFIG_NO_NOMINMAX) # define CATCH_DEFINES_NOMINMAX # endif # if !defined(WIN32_LEAN_AND_MEAN) && !defined(CATCH_CONFIG_NO_WIN32_LEAN_AND_MEAN) # define CATCH_DEFINES_WIN32_LEAN_AND_MEAN # endif #endif #include namespace Catch{ bool isDebuggerActive(); void writeToDebugConsole( std::string const& text ); } #ifdef CATCH_PLATFORM_MAC // The following code snippet based on: // http://cocoawithlove.com/2008/03/break-into-debugger.html #if defined(__ppc64__) || defined(__ppc__) #define CATCH_TRAP() \ __asm__("li r0, 20\nsc\nnop\nli r0, 37\nli r4, 2\nsc\nnop\n" \ : : : "memory","r0","r3","r4" ) #else #define CATCH_TRAP() __asm__("int $3\n" : : ) #endif #elif defined(CATCH_PLATFORM_LINUX) // If we can use inline assembler, do it because this allows us to break // directly at the location of the failing check instead of breaking inside // raise() called from it, i.e. one stack frame below. #if defined(__GNUC__) && (defined(__i386) || defined(__x86_64)) #define CATCH_TRAP() asm volatile ("int $3") #else // Fall back to the generic way. #include #define CATCH_TRAP() raise(SIGTRAP) #endif #elif defined(_MSC_VER) #define CATCH_TRAP() __debugbreak() #elif defined(__MINGW32__) extern "C" __declspec(dllimport) void __stdcall DebugBreak(); #define CATCH_TRAP() DebugBreak() #endif #ifdef CATCH_TRAP #define CATCH_BREAK_INTO_DEBUGGER() if( Catch::isDebuggerActive() ) { CATCH_TRAP(); } #else #define CATCH_BREAK_INTO_DEBUGGER() Catch::alwaysTrue(); #endif // #included from: catch_interfaces_runner.h #define TWOBLUECUBES_CATCH_INTERFACES_RUNNER_H_INCLUDED namespace Catch { class TestCase; struct IRunner { virtual ~IRunner(); virtual bool aborting() const = 0; }; } #if defined(CATCH_CONFIG_FAST_COMPILE) /////////////////////////////////////////////////////////////////////////////// // We can speedup compilation significantly by breaking into debugger lower in // the callstack, because then we don't have to expand CATCH_BREAK_INTO_DEBUGGER // macro in each assertion #define INTERNAL_CATCH_REACT( resultBuilder ) \ resultBuilder.react(); /////////////////////////////////////////////////////////////////////////////// // Another way to speed-up compilation is to omit local try-catch for REQUIRE* // macros. // This can potentially cause false negative, if the test code catches // the exception before it propagates back up to the runner. #define INTERNAL_CATCH_TEST_NO_TRY( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ __catchResult.setExceptionGuard(); \ CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ ( __catchResult <= expr ).endExpression(); \ CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ __catchResult.unsetExceptionGuard(); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look // The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. #define INTERNAL_CHECK_THAT_NO_TRY( macroName, matcher, resultDisposition, arg ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ __catchResult.setExceptionGuard(); \ __catchResult.captureMatch( arg, matcher, #matcher ); \ __catchResult.unsetExceptionGuard(); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #else /////////////////////////////////////////////////////////////////////////////// // In the event of a failure works out if the debugger needs to be invoked // and/or an exception thrown and takes appropriate action. // This needs to be done as a macro so the debugger will stop in the user // source code rather than in Catch library code #define INTERNAL_CATCH_REACT( resultBuilder ) \ if( resultBuilder.shouldDebugBreak() ) CATCH_BREAK_INTO_DEBUGGER(); \ resultBuilder.react(); #endif /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ try { \ CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ ( __catchResult <= expr ).endExpression(); \ CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look // The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_IF( macroName, resultDisposition, expr ) \ INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ if( Catch::getResultCapture().getLastResult()->succeeded() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_ELSE( macroName, resultDisposition, expr ) \ INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ if( !Catch::getResultCapture().getLastResult()->succeeded() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_NO_THROW( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_THROWS( macroName, resultDisposition, matcher, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition, #matcher ); \ if( __catchResult.allowThrows() ) \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ } \ catch( ... ) { \ __catchResult.captureExpectedException( matcher ); \ } \ else \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_THROWS_AS( macroName, exceptionType, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr ", " #exceptionType, resultDisposition ); \ if( __catchResult.allowThrows() ) \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ } \ catch( const exceptionType& ) { \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ else \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #ifdef CATCH_CONFIG_VARIADIC_MACROS #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, ... ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ __catchResult << __VA_ARGS__ + ::Catch::StreamEndStop(); \ __catchResult.captureResult( messageType ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #else #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, log ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ __catchResult << log + ::Catch::StreamEndStop(); \ __catchResult.captureResult( messageType ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #endif /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_INFO( macroName, log ) \ Catch::ScopedMessage INTERNAL_CATCH_UNIQUE_NAME( scopedMessage ) = Catch::MessageBuilder( macroName, CATCH_INTERNAL_LINEINFO, Catch::ResultWas::Info ) << log; /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CHECK_THAT( macroName, matcher, resultDisposition, arg ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ try { \ __catchResult.captureMatch( arg, matcher, #matcher ); \ } catch( ... ) { \ __catchResult.useActiveException( resultDisposition | Catch::ResultDisposition::ContinueOnFailure ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) // #included from: internal/catch_section.h #define TWOBLUECUBES_CATCH_SECTION_H_INCLUDED // #included from: catch_section_info.h #define TWOBLUECUBES_CATCH_SECTION_INFO_H_INCLUDED // #included from: catch_totals.hpp #define TWOBLUECUBES_CATCH_TOTALS_HPP_INCLUDED #include namespace Catch { struct Counts { Counts() : passed( 0 ), failed( 0 ), failedButOk( 0 ) {} Counts operator - ( Counts const& other ) const { Counts diff; diff.passed = passed - other.passed; diff.failed = failed - other.failed; diff.failedButOk = failedButOk - other.failedButOk; return diff; } Counts& operator += ( Counts const& other ) { passed += other.passed; failed += other.failed; failedButOk += other.failedButOk; return *this; } std::size_t total() const { return passed + failed + failedButOk; } bool allPassed() const { return failed == 0 && failedButOk == 0; } bool allOk() const { return failed == 0; } std::size_t passed; std::size_t failed; std::size_t failedButOk; }; struct Totals { Totals operator - ( Totals const& other ) const { Totals diff; diff.assertions = assertions - other.assertions; diff.testCases = testCases - other.testCases; return diff; } Totals delta( Totals const& prevTotals ) const { Totals diff = *this - prevTotals; if( diff.assertions.failed > 0 ) ++diff.testCases.failed; else if( diff.assertions.failedButOk > 0 ) ++diff.testCases.failedButOk; else ++diff.testCases.passed; return diff; } Totals& operator += ( Totals const& other ) { assertions += other.assertions; testCases += other.testCases; return *this; } Counts assertions; Counts testCases; }; } #include namespace Catch { struct SectionInfo { SectionInfo ( SourceLineInfo const& _lineInfo, std::string const& _name, std::string const& _description = std::string() ); std::string name; std::string description; SourceLineInfo lineInfo; }; struct SectionEndInfo { SectionEndInfo( SectionInfo const& _sectionInfo, Counts const& _prevAssertions, double _durationInSeconds ) : sectionInfo( _sectionInfo ), prevAssertions( _prevAssertions ), durationInSeconds( _durationInSeconds ) {} SectionInfo sectionInfo; Counts prevAssertions; double durationInSeconds; }; } // end namespace Catch // #included from: catch_timer.h #define TWOBLUECUBES_CATCH_TIMER_H_INCLUDED #ifdef _MSC_VER namespace Catch { typedef unsigned long long UInt64; } #else #include namespace Catch { typedef uint64_t UInt64; } #endif namespace Catch { class Timer { public: Timer() : m_ticks( 0 ) {} void start(); unsigned int getElapsedMicroseconds() const; unsigned int getElapsedMilliseconds() const; double getElapsedSeconds() const; private: UInt64 m_ticks; }; } // namespace Catch #include namespace Catch { class Section : NonCopyable { public: Section( SectionInfo const& info ); ~Section(); // This indicates whether the section should be executed or not operator bool() const; private: SectionInfo m_info; std::string m_name; Counts m_assertions; bool m_sectionIncluded; Timer m_timer; }; } // end namespace Catch #ifdef CATCH_CONFIG_VARIADIC_MACROS #define INTERNAL_CATCH_SECTION( ... ) \ if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, __VA_ARGS__ ) ) #else #define INTERNAL_CATCH_SECTION( name, desc ) \ if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, name, desc ) ) #endif // #included from: internal/catch_generators.hpp #define TWOBLUECUBES_CATCH_GENERATORS_HPP_INCLUDED #include #include #include namespace Catch { template struct IGenerator { virtual ~IGenerator() {} virtual T getValue( std::size_t index ) const = 0; virtual std::size_t size () const = 0; }; template class BetweenGenerator : public IGenerator { public: BetweenGenerator( T from, T to ) : m_from( from ), m_to( to ){} virtual T getValue( std::size_t index ) const { return m_from+static_cast( index ); } virtual std::size_t size() const { return static_cast( 1+m_to-m_from ); } private: T m_from; T m_to; }; template class ValuesGenerator : public IGenerator { public: ValuesGenerator(){} void add( T value ) { m_values.push_back( value ); } virtual T getValue( std::size_t index ) const { return m_values[index]; } virtual std::size_t size() const { return m_values.size(); } private: std::vector m_values; }; template class CompositeGenerator { public: CompositeGenerator() : m_totalSize( 0 ) {} // *** Move semantics, similar to auto_ptr *** CompositeGenerator( CompositeGenerator& other ) : m_fileInfo( other.m_fileInfo ), m_totalSize( 0 ) { move( other ); } CompositeGenerator& setFileInfo( const char* fileInfo ) { m_fileInfo = fileInfo; return *this; } ~CompositeGenerator() { deleteAll( m_composed ); } operator T () const { size_t overallIndex = getCurrentContext().getGeneratorIndex( m_fileInfo, m_totalSize ); typename std::vector*>::const_iterator it = m_composed.begin(); typename std::vector*>::const_iterator itEnd = m_composed.end(); for( size_t index = 0; it != itEnd; ++it ) { const IGenerator* generator = *it; if( overallIndex >= index && overallIndex < index + generator->size() ) { return generator->getValue( overallIndex-index ); } index += generator->size(); } CATCH_INTERNAL_ERROR( "Indexed past end of generated range" ); return T(); // Suppress spurious "not all control paths return a value" warning in Visual Studio - if you know how to fix this please do so } void add( const IGenerator* generator ) { m_totalSize += generator->size(); m_composed.push_back( generator ); } CompositeGenerator& then( CompositeGenerator& other ) { move( other ); return *this; } CompositeGenerator& then( T value ) { ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( value ); add( valuesGen ); return *this; } private: void move( CompositeGenerator& other ) { m_composed.insert( m_composed.end(), other.m_composed.begin(), other.m_composed.end() ); m_totalSize += other.m_totalSize; other.m_composed.clear(); } std::vector*> m_composed; std::string m_fileInfo; size_t m_totalSize; }; namespace Generators { template CompositeGenerator between( T from, T to ) { CompositeGenerator generators; generators.add( new BetweenGenerator( from, to ) ); return generators; } template CompositeGenerator values( T val1, T val2 ) { CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); generators.add( valuesGen ); return generators; } template CompositeGenerator values( T val1, T val2, T val3 ){ CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); valuesGen->add( val3 ); generators.add( valuesGen ); return generators; } template CompositeGenerator values( T val1, T val2, T val3, T val4 ) { CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); valuesGen->add( val3 ); valuesGen->add( val4 ); generators.add( valuesGen ); return generators; } } // end namespace Generators using namespace Generators; } // end namespace Catch #define INTERNAL_CATCH_LINESTR2( line ) #line #define INTERNAL_CATCH_LINESTR( line ) INTERNAL_CATCH_LINESTR2( line ) #define INTERNAL_CATCH_GENERATE( expr ) expr.setFileInfo( __FILE__ "(" INTERNAL_CATCH_LINESTR( __LINE__ ) ")" ) // #included from: internal/catch_interfaces_exception.h #define TWOBLUECUBES_CATCH_INTERFACES_EXCEPTION_H_INCLUDED #include #include // #included from: catch_interfaces_registry_hub.h #define TWOBLUECUBES_CATCH_INTERFACES_REGISTRY_HUB_H_INCLUDED #include namespace Catch { class TestCase; struct ITestCaseRegistry; struct IExceptionTranslatorRegistry; struct IExceptionTranslator; struct IReporterRegistry; struct IReporterFactory; struct ITagAliasRegistry; struct IRegistryHub { virtual ~IRegistryHub(); virtual IReporterRegistry const& getReporterRegistry() const = 0; virtual ITestCaseRegistry const& getTestCaseRegistry() const = 0; virtual ITagAliasRegistry const& getTagAliasRegistry() const = 0; virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() = 0; }; struct IMutableRegistryHub { virtual ~IMutableRegistryHub(); virtual void registerReporter( std::string const& name, Ptr const& factory ) = 0; virtual void registerListener( Ptr const& factory ) = 0; virtual void registerTest( TestCase const& testInfo ) = 0; virtual void registerTranslator( const IExceptionTranslator* translator ) = 0; virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) = 0; }; IRegistryHub& getRegistryHub(); IMutableRegistryHub& getMutableRegistryHub(); void cleanUp(); std::string translateActiveException(); } namespace Catch { typedef std::string(*exceptionTranslateFunction)(); struct IExceptionTranslator; typedef std::vector ExceptionTranslators; struct IExceptionTranslator { virtual ~IExceptionTranslator(); virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const = 0; }; struct IExceptionTranslatorRegistry { virtual ~IExceptionTranslatorRegistry(); virtual std::string translateActiveException() const = 0; }; class ExceptionTranslatorRegistrar { template class ExceptionTranslator : public IExceptionTranslator { public: ExceptionTranslator( std::string(*translateFunction)( T& ) ) : m_translateFunction( translateFunction ) {} virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const CATCH_OVERRIDE { try { if( it == itEnd ) throw; else return (*it)->translate( it+1, itEnd ); } catch( T& ex ) { return m_translateFunction( ex ); } } protected: std::string(*m_translateFunction)( T& ); }; public: template ExceptionTranslatorRegistrar( std::string(*translateFunction)( T& ) ) { getMutableRegistryHub().registerTranslator ( new ExceptionTranslator( translateFunction ) ); } }; } /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TRANSLATE_EXCEPTION2( translatorName, signature ) \ static std::string translatorName( signature ); \ namespace{ Catch::ExceptionTranslatorRegistrar INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionRegistrar )( &translatorName ); }\ static std::string translatorName( signature ) #define INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION2( INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionTranslator ), signature ) // #included from: internal/catch_approx.hpp #define TWOBLUECUBES_CATCH_APPROX_HPP_INCLUDED #include #include #if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) #include #endif namespace Catch { namespace Detail { class Approx { public: explicit Approx ( double value ) : m_epsilon( std::numeric_limits::epsilon()*100 ), m_margin( 0.0 ), m_scale( 1.0 ), m_value( value ) {} Approx( Approx const& other ) : m_epsilon( other.m_epsilon ), m_margin( other.m_margin ), m_scale( other.m_scale ), m_value( other.m_value ) {} static Approx custom() { return Approx( 0 ); } #if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) template ::value>::type> Approx operator()( T value ) { Approx approx( static_cast(value) ); approx.epsilon( m_epsilon ); approx.margin( m_margin ); approx.scale( m_scale ); return approx; } template ::value>::type> explicit Approx( T value ): Approx(static_cast(value)) {} template ::value>::type> friend bool operator == ( const T& lhs, Approx const& rhs ) { // Thanks to Richard Harris for his help refining this formula auto lhs_v = double(lhs); bool relativeOK = std::fabs(lhs_v - rhs.m_value) < rhs.m_epsilon * (rhs.m_scale + (std::max)(std::fabs(lhs_v), std::fabs(rhs.m_value))); if (relativeOK) { return true; } return std::fabs(lhs_v - rhs.m_value) < rhs.m_margin; } template ::value>::type> friend bool operator == ( Approx const& lhs, const T& rhs ) { return operator==( rhs, lhs ); } template ::value>::type> friend bool operator != ( T lhs, Approx const& rhs ) { return !operator==( lhs, rhs ); } template ::value>::type> friend bool operator != ( Approx const& lhs, T rhs ) { return !operator==( rhs, lhs ); } template ::value>::type> friend bool operator <= ( T lhs, Approx const& rhs ) { return double(lhs) < rhs.m_value || lhs == rhs; } template ::value>::type> friend bool operator <= ( Approx const& lhs, T rhs ) { return lhs.m_value < double(rhs) || lhs == rhs; } template ::value>::type> friend bool operator >= ( T lhs, Approx const& rhs ) { return double(lhs) > rhs.m_value || lhs == rhs; } template ::value>::type> friend bool operator >= ( Approx const& lhs, T rhs ) { return lhs.m_value > double(rhs) || lhs == rhs; } template ::value>::type> Approx& epsilon( T newEpsilon ) { m_epsilon = double(newEpsilon); return *this; } template ::value>::type> Approx& margin( T newMargin ) { m_margin = double(newMargin); return *this; } template ::value>::type> Approx& scale( T newScale ) { m_scale = double(newScale); return *this; } #else Approx operator()( double value ) { Approx approx( value ); approx.epsilon( m_epsilon ); approx.margin( m_margin ); approx.scale( m_scale ); return approx; } friend bool operator == ( double lhs, Approx const& rhs ) { // Thanks to Richard Harris for his help refining this formula bool relativeOK = std::fabs( lhs - rhs.m_value ) < rhs.m_epsilon * (rhs.m_scale + (std::max)( std::fabs(lhs), std::fabs(rhs.m_value) ) ); if (relativeOK) { return true; } return std::fabs(lhs - rhs.m_value) < rhs.m_margin; } friend bool operator == ( Approx const& lhs, double rhs ) { return operator==( rhs, lhs ); } friend bool operator != ( double lhs, Approx const& rhs ) { return !operator==( lhs, rhs ); } friend bool operator != ( Approx const& lhs, double rhs ) { return !operator==( rhs, lhs ); } friend bool operator <= ( double lhs, Approx const& rhs ) { return lhs < rhs.m_value || lhs == rhs; } friend bool operator <= ( Approx const& lhs, double rhs ) { return lhs.m_value < rhs || lhs == rhs; } friend bool operator >= ( double lhs, Approx const& rhs ) { return lhs > rhs.m_value || lhs == rhs; } friend bool operator >= ( Approx const& lhs, double rhs ) { return lhs.m_value > rhs || lhs == rhs; } Approx& epsilon( double newEpsilon ) { m_epsilon = newEpsilon; return *this; } Approx& margin( double newMargin ) { m_margin = newMargin; return *this; } Approx& scale( double newScale ) { m_scale = newScale; return *this; } #endif std::string toString() const { std::ostringstream oss; oss << "Approx( " << Catch::toString( m_value ) << " )"; return oss.str(); } private: double m_epsilon; double m_margin; double m_scale; double m_value; }; } template<> inline std::string toString( Detail::Approx const& value ) { return value.toString(); } } // end namespace Catch // #included from: internal/catch_matchers_string.h #define TWOBLUECUBES_CATCH_MATCHERS_STRING_H_INCLUDED namespace Catch { namespace Matchers { namespace StdString { struct CasedString { CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ); std::string adjustString( std::string const& str ) const; std::string caseSensitivitySuffix() const; CaseSensitive::Choice m_caseSensitivity; std::string m_str; }; struct StringMatcherBase : MatcherBase { StringMatcherBase( std::string const& operation, CasedString const& comparator ); virtual std::string describe() const CATCH_OVERRIDE; CasedString m_comparator; std::string m_operation; }; struct EqualsMatcher : StringMatcherBase { EqualsMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct ContainsMatcher : StringMatcherBase { ContainsMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct StartsWithMatcher : StringMatcherBase { StartsWithMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct EndsWithMatcher : StringMatcherBase { EndsWithMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; } // namespace StdString // The following functions create the actual matcher objects. // This allows the types to be inferred StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); } // namespace Matchers } // namespace Catch // #included from: internal/catch_matchers_vector.h #define TWOBLUECUBES_CATCH_MATCHERS_VECTOR_H_INCLUDED namespace Catch { namespace Matchers { namespace Vector { template struct ContainsElementMatcher : MatcherBase, T> { ContainsElementMatcher(T const &comparator) : m_comparator( comparator) {} bool match(std::vector const &v) const CATCH_OVERRIDE { return std::find(v.begin(), v.end(), m_comparator) != v.end(); } virtual std::string describe() const CATCH_OVERRIDE { return "Contains: " + Catch::toString( m_comparator ); } T const& m_comparator; }; template struct ContainsMatcher : MatcherBase, std::vector > { ContainsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} bool match(std::vector const &v) const CATCH_OVERRIDE { // !TBD: see note in EqualsMatcher if (m_comparator.size() > v.size()) return false; for (size_t i = 0; i < m_comparator.size(); ++i) if (std::find(v.begin(), v.end(), m_comparator[i]) == v.end()) return false; return true; } virtual std::string describe() const CATCH_OVERRIDE { return "Contains: " + Catch::toString( m_comparator ); } std::vector const& m_comparator; }; template struct EqualsMatcher : MatcherBase, std::vector > { EqualsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} bool match(std::vector const &v) const CATCH_OVERRIDE { // !TBD: This currently works if all elements can be compared using != // - a more general approach would be via a compare template that defaults // to using !=. but could be specialised for, e.g. std::vector etc // - then just call that directly if (m_comparator.size() != v.size()) return false; for (size_t i = 0; i < v.size(); ++i) if (m_comparator[i] != v[i]) return false; return true; } virtual std::string describe() const CATCH_OVERRIDE { return "Equals: " + Catch::toString( m_comparator ); } std::vector const& m_comparator; }; } // namespace Vector // The following functions create the actual matcher objects. // This allows the types to be inferred template Vector::ContainsMatcher Contains( std::vector const& comparator ) { return Vector::ContainsMatcher( comparator ); } template Vector::ContainsElementMatcher VectorContains( T const& comparator ) { return Vector::ContainsElementMatcher( comparator ); } template Vector::EqualsMatcher Equals( std::vector const& comparator ) { return Vector::EqualsMatcher( comparator ); } } // namespace Matchers } // namespace Catch // #included from: internal/catch_interfaces_tag_alias_registry.h #define TWOBLUECUBES_CATCH_INTERFACES_TAG_ALIAS_REGISTRY_H_INCLUDED // #included from: catch_tag_alias.h #define TWOBLUECUBES_CATCH_TAG_ALIAS_H_INCLUDED #include namespace Catch { struct TagAlias { TagAlias( std::string const& _tag, SourceLineInfo _lineInfo ) : tag( _tag ), lineInfo( _lineInfo ) {} std::string tag; SourceLineInfo lineInfo; }; struct RegistrarForTagAliases { RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ); }; } // end namespace Catch #define CATCH_REGISTER_TAG_ALIAS( alias, spec ) namespace{ Catch::RegistrarForTagAliases INTERNAL_CATCH_UNIQUE_NAME( AutoRegisterTagAlias )( alias, spec, CATCH_INTERNAL_LINEINFO ); } // #included from: catch_option.hpp #define TWOBLUECUBES_CATCH_OPTION_HPP_INCLUDED namespace Catch { // An optional type template class Option { public: Option() : nullableValue( CATCH_NULL ) {} Option( T const& _value ) : nullableValue( new( storage ) T( _value ) ) {} Option( Option const& _other ) : nullableValue( _other ? new( storage ) T( *_other ) : CATCH_NULL ) {} ~Option() { reset(); } Option& operator= ( Option const& _other ) { if( &_other != this ) { reset(); if( _other ) nullableValue = new( storage ) T( *_other ); } return *this; } Option& operator = ( T const& _value ) { reset(); nullableValue = new( storage ) T( _value ); return *this; } void reset() { if( nullableValue ) nullableValue->~T(); nullableValue = CATCH_NULL; } T& operator*() { return *nullableValue; } T const& operator*() const { return *nullableValue; } T* operator->() { return nullableValue; } const T* operator->() const { return nullableValue; } T valueOr( T const& defaultValue ) const { return nullableValue ? *nullableValue : defaultValue; } bool some() const { return nullableValue != CATCH_NULL; } bool none() const { return nullableValue == CATCH_NULL; } bool operator !() const { return nullableValue == CATCH_NULL; } operator SafeBool::type() const { return SafeBool::makeSafe( some() ); } private: T *nullableValue; union { char storage[sizeof(T)]; // These are here to force alignment for the storage long double dummy1; void (*dummy2)(); long double dummy3; #ifdef CATCH_CONFIG_CPP11_LONG_LONG long long dummy4; #endif }; }; } // end namespace Catch namespace Catch { struct ITagAliasRegistry { virtual ~ITagAliasRegistry(); virtual Option find( std::string const& alias ) const = 0; virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const = 0; static ITagAliasRegistry const& get(); }; } // end namespace Catch // These files are included here so the single_include script doesn't put them // in the conditionally compiled sections // #included from: internal/catch_test_case_info.h #define TWOBLUECUBES_CATCH_TEST_CASE_INFO_H_INCLUDED #include #include #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif namespace Catch { struct ITestCase; struct TestCaseInfo { enum SpecialProperties{ None = 0, IsHidden = 1 << 1, ShouldFail = 1 << 2, MayFail = 1 << 3, Throws = 1 << 4, NonPortable = 1 << 5 }; TestCaseInfo( std::string const& _name, std::string const& _className, std::string const& _description, std::set const& _tags, SourceLineInfo const& _lineInfo ); TestCaseInfo( TestCaseInfo const& other ); friend void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ); bool isHidden() const; bool throws() const; bool okToFail() const; bool expectedToFail() const; std::string name; std::string className; std::string description; std::set tags; std::set lcaseTags; std::string tagsAsString; SourceLineInfo lineInfo; SpecialProperties properties; }; class TestCase : public TestCaseInfo { public: TestCase( ITestCase* testCase, TestCaseInfo const& info ); TestCase( TestCase const& other ); TestCase withName( std::string const& _newName ) const; void invoke() const; TestCaseInfo const& getTestCaseInfo() const; void swap( TestCase& other ); bool operator == ( TestCase const& other ) const; bool operator < ( TestCase const& other ) const; TestCase& operator = ( TestCase const& other ); private: Ptr test; }; TestCase makeTestCase( ITestCase* testCase, std::string const& className, std::string const& name, std::string const& description, SourceLineInfo const& lineInfo ); } #ifdef __clang__ # pragma clang diagnostic pop #endif #ifdef __OBJC__ // #included from: internal/catch_objc.hpp #define TWOBLUECUBES_CATCH_OBJC_HPP_INCLUDED #import #include // NB. Any general catch headers included here must be included // in catch.hpp first to make sure they are included by the single // header for non obj-usage /////////////////////////////////////////////////////////////////////////////// // This protocol is really only here for (self) documenting purposes, since // all its methods are optional. @protocol OcFixture @optional -(void) setUp; -(void) tearDown; @end namespace Catch { class OcMethod : public SharedImpl { public: OcMethod( Class cls, SEL sel ) : m_cls( cls ), m_sel( sel ) {} virtual void invoke() const { id obj = [[m_cls alloc] init]; performOptionalSelector( obj, @selector(setUp) ); performOptionalSelector( obj, m_sel ); performOptionalSelector( obj, @selector(tearDown) ); arcSafeRelease( obj ); } private: virtual ~OcMethod() {} Class m_cls; SEL m_sel; }; namespace Detail{ inline std::string getAnnotation( Class cls, std::string const& annotationName, std::string const& testCaseName ) { NSString* selStr = [[NSString alloc] initWithFormat:@"Catch_%s_%s", annotationName.c_str(), testCaseName.c_str()]; SEL sel = NSSelectorFromString( selStr ); arcSafeRelease( selStr ); id value = performOptionalSelector( cls, sel ); if( value ) return [(NSString*)value UTF8String]; return ""; } } inline size_t registerTestMethods() { size_t noTestMethods = 0; int noClasses = objc_getClassList( CATCH_NULL, 0 ); Class* classes = (CATCH_UNSAFE_UNRETAINED Class *)malloc( sizeof(Class) * noClasses); objc_getClassList( classes, noClasses ); for( int c = 0; c < noClasses; c++ ) { Class cls = classes[c]; { u_int count; Method* methods = class_copyMethodList( cls, &count ); for( u_int m = 0; m < count ; m++ ) { SEL selector = method_getName(methods[m]); std::string methodName = sel_getName(selector); if( startsWith( methodName, "Catch_TestCase_" ) ) { std::string testCaseName = methodName.substr( 15 ); std::string name = Detail::getAnnotation( cls, "Name", testCaseName ); std::string desc = Detail::getAnnotation( cls, "Description", testCaseName ); const char* className = class_getName( cls ); getMutableRegistryHub().registerTest( makeTestCase( new OcMethod( cls, selector ), className, name.c_str(), desc.c_str(), SourceLineInfo() ) ); noTestMethods++; } } free(methods); } } return noTestMethods; } namespace Matchers { namespace Impl { namespace NSStringMatchers { struct StringHolder : MatcherBase{ StringHolder( NSString* substr ) : m_substr( [substr copy] ){} StringHolder( StringHolder const& other ) : m_substr( [other.m_substr copy] ){} StringHolder() { arcSafeRelease( m_substr ); } virtual bool match( NSString* arg ) const CATCH_OVERRIDE { return false; } NSString* m_substr; }; struct Equals : StringHolder { Equals( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const CATCH_OVERRIDE { return (str != nil || m_substr == nil ) && [str isEqualToString:m_substr]; } virtual std::string describe() const CATCH_OVERRIDE { return "equals string: " + Catch::toString( m_substr ); } }; struct Contains : StringHolder { Contains( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location != NSNotFound; } virtual std::string describe() const CATCH_OVERRIDE { return "contains string: " + Catch::toString( m_substr ); } }; struct StartsWith : StringHolder { StartsWith( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location == 0; } virtual std::string describe() const CATCH_OVERRIDE { return "starts with: " + Catch::toString( m_substr ); } }; struct EndsWith : StringHolder { EndsWith( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location == [str length] - [m_substr length]; } virtual std::string describe() const CATCH_OVERRIDE { return "ends with: " + Catch::toString( m_substr ); } }; } // namespace NSStringMatchers } // namespace Impl inline Impl::NSStringMatchers::Equals Equals( NSString* substr ){ return Impl::NSStringMatchers::Equals( substr ); } inline Impl::NSStringMatchers::Contains Contains( NSString* substr ){ return Impl::NSStringMatchers::Contains( substr ); } inline Impl::NSStringMatchers::StartsWith StartsWith( NSString* substr ){ return Impl::NSStringMatchers::StartsWith( substr ); } inline Impl::NSStringMatchers::EndsWith EndsWith( NSString* substr ){ return Impl::NSStringMatchers::EndsWith( substr ); } } // namespace Matchers using namespace Matchers; } // namespace Catch /////////////////////////////////////////////////////////////////////////////// #define OC_TEST_CASE( name, desc )\ +(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Name_test ) \ {\ return @ name; \ }\ +(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Description_test ) \ { \ return @ desc; \ } \ -(void) INTERNAL_CATCH_UNIQUE_NAME( Catch_TestCase_test ) #endif #ifdef CATCH_IMPL // !TBD: Move the leak detector code into a separate header #ifdef CATCH_CONFIG_WINDOWS_CRTDBG #include class LeakDetector { public: LeakDetector() { int flag = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG); flag |= _CRTDBG_LEAK_CHECK_DF; flag |= _CRTDBG_ALLOC_MEM_DF; _CrtSetDbgFlag(flag); _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); // Change this to leaking allocation's number to break there _CrtSetBreakAlloc(-1); } }; #else class LeakDetector {}; #endif LeakDetector leakDetector; // #included from: internal/catch_impl.hpp #define TWOBLUECUBES_CATCH_IMPL_HPP_INCLUDED // Collect all the implementation files together here // These are the equivalent of what would usually be cpp files #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wweak-vtables" #endif // #included from: ../catch_session.hpp #define TWOBLUECUBES_CATCH_RUNNER_HPP_INCLUDED // #included from: internal/catch_commandline.hpp #define TWOBLUECUBES_CATCH_COMMANDLINE_HPP_INCLUDED // #included from: catch_config.hpp #define TWOBLUECUBES_CATCH_CONFIG_HPP_INCLUDED // #included from: catch_test_spec_parser.hpp #define TWOBLUECUBES_CATCH_TEST_SPEC_PARSER_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif // #included from: catch_test_spec.hpp #define TWOBLUECUBES_CATCH_TEST_SPEC_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif // #included from: catch_wildcard_pattern.hpp #define TWOBLUECUBES_CATCH_WILDCARD_PATTERN_HPP_INCLUDED #include namespace Catch { class WildcardPattern { enum WildcardPosition { NoWildcard = 0, WildcardAtStart = 1, WildcardAtEnd = 2, WildcardAtBothEnds = WildcardAtStart | WildcardAtEnd }; public: WildcardPattern( std::string const& pattern, CaseSensitive::Choice caseSensitivity ) : m_caseSensitivity( caseSensitivity ), m_wildcard( NoWildcard ), m_pattern( adjustCase( pattern ) ) { if( startsWith( m_pattern, '*' ) ) { m_pattern = m_pattern.substr( 1 ); m_wildcard = WildcardAtStart; } if( endsWith( m_pattern, '*' ) ) { m_pattern = m_pattern.substr( 0, m_pattern.size()-1 ); m_wildcard = static_cast( m_wildcard | WildcardAtEnd ); } } virtual ~WildcardPattern(); virtual bool matches( std::string const& str ) const { switch( m_wildcard ) { case NoWildcard: return m_pattern == adjustCase( str ); case WildcardAtStart: return endsWith( adjustCase( str ), m_pattern ); case WildcardAtEnd: return startsWith( adjustCase( str ), m_pattern ); case WildcardAtBothEnds: return contains( adjustCase( str ), m_pattern ); } #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wunreachable-code" #endif throw std::logic_error( "Unknown enum" ); #ifdef __clang__ # pragma clang diagnostic pop #endif } private: std::string adjustCase( std::string const& str ) const { return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str; } CaseSensitive::Choice m_caseSensitivity; WildcardPosition m_wildcard; std::string m_pattern; }; } #include #include namespace Catch { class TestSpec { struct Pattern : SharedImpl<> { virtual ~Pattern(); virtual bool matches( TestCaseInfo const& testCase ) const = 0; }; class NamePattern : public Pattern { public: NamePattern( std::string const& name ) : m_wildcardPattern( toLower( name ), CaseSensitive::No ) {} virtual ~NamePattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return m_wildcardPattern.matches( toLower( testCase.name ) ); } private: WildcardPattern m_wildcardPattern; }; class TagPattern : public Pattern { public: TagPattern( std::string const& tag ) : m_tag( toLower( tag ) ) {} virtual ~TagPattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return testCase.lcaseTags.find( m_tag ) != testCase.lcaseTags.end(); } private: std::string m_tag; }; class ExcludedPattern : public Pattern { public: ExcludedPattern( Ptr const& underlyingPattern ) : m_underlyingPattern( underlyingPattern ) {} virtual ~ExcludedPattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return !m_underlyingPattern->matches( testCase ); } private: Ptr m_underlyingPattern; }; struct Filter { std::vector > m_patterns; bool matches( TestCaseInfo const& testCase ) const { // All patterns in a filter must match for the filter to be a match for( std::vector >::const_iterator it = m_patterns.begin(), itEnd = m_patterns.end(); it != itEnd; ++it ) { if( !(*it)->matches( testCase ) ) return false; } return true; } }; public: bool hasFilters() const { return !m_filters.empty(); } bool matches( TestCaseInfo const& testCase ) const { // A TestSpec matches if any filter matches for( std::vector::const_iterator it = m_filters.begin(), itEnd = m_filters.end(); it != itEnd; ++it ) if( it->matches( testCase ) ) return true; return false; } private: std::vector m_filters; friend class TestSpecParser; }; } #ifdef __clang__ # pragma clang diagnostic pop #endif namespace Catch { class TestSpecParser { enum Mode{ None, Name, QuotedName, Tag, EscapedName }; Mode m_mode; bool m_exclusion; std::size_t m_start, m_pos; std::string m_arg; std::vector m_escapeChars; TestSpec::Filter m_currentFilter; TestSpec m_testSpec; ITagAliasRegistry const* m_tagAliases; public: TestSpecParser( ITagAliasRegistry const& tagAliases ) : m_tagAliases( &tagAliases ) {} TestSpecParser& parse( std::string const& arg ) { m_mode = None; m_exclusion = false; m_start = std::string::npos; m_arg = m_tagAliases->expandAliases( arg ); m_escapeChars.clear(); for( m_pos = 0; m_pos < m_arg.size(); ++m_pos ) visitChar( m_arg[m_pos] ); if( m_mode == Name ) addPattern(); return *this; } TestSpec testSpec() { addFilter(); return m_testSpec; } private: void visitChar( char c ) { if( m_mode == None ) { switch( c ) { case ' ': return; case '~': m_exclusion = true; return; case '[': return startNewMode( Tag, ++m_pos ); case '"': return startNewMode( QuotedName, ++m_pos ); case '\\': return escape(); default: startNewMode( Name, m_pos ); break; } } if( m_mode == Name ) { if( c == ',' ) { addPattern(); addFilter(); } else if( c == '[' ) { if( subString() == "exclude:" ) m_exclusion = true; else addPattern(); startNewMode( Tag, ++m_pos ); } else if( c == '\\' ) escape(); } else if( m_mode == EscapedName ) m_mode = Name; else if( m_mode == QuotedName && c == '"' ) addPattern(); else if( m_mode == Tag && c == ']' ) addPattern(); } void startNewMode( Mode mode, std::size_t start ) { m_mode = mode; m_start = start; } void escape() { if( m_mode == None ) m_start = m_pos; m_mode = EscapedName; m_escapeChars.push_back( m_pos ); } std::string subString() const { return m_arg.substr( m_start, m_pos - m_start ); } template void addPattern() { std::string token = subString(); for( size_t i = 0; i < m_escapeChars.size(); ++i ) token = token.substr( 0, m_escapeChars[i]-m_start-i ) + token.substr( m_escapeChars[i]-m_start-i+1 ); m_escapeChars.clear(); if( startsWith( token, "exclude:" ) ) { m_exclusion = true; token = token.substr( 8 ); } if( !token.empty() ) { Ptr pattern = new T( token ); if( m_exclusion ) pattern = new TestSpec::ExcludedPattern( pattern ); m_currentFilter.m_patterns.push_back( pattern ); } m_exclusion = false; m_mode = None; } void addFilter() { if( !m_currentFilter.m_patterns.empty() ) { m_testSpec.m_filters.push_back( m_currentFilter ); m_currentFilter = TestSpec::Filter(); } } }; inline TestSpec parseTestSpec( std::string const& arg ) { return TestSpecParser( ITagAliasRegistry::get() ).parse( arg ).testSpec(); } } // namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif // #included from: catch_interfaces_config.h #define TWOBLUECUBES_CATCH_INTERFACES_CONFIG_H_INCLUDED #include #include #include namespace Catch { struct Verbosity { enum Level { NoOutput = 0, Quiet, Normal }; }; struct WarnAbout { enum What { Nothing = 0x00, NoAssertions = 0x01 }; }; struct ShowDurations { enum OrNot { DefaultForReporter, Always, Never }; }; struct RunTests { enum InWhatOrder { InDeclarationOrder, InLexicographicalOrder, InRandomOrder }; }; struct UseColour { enum YesOrNo { Auto, Yes, No }; }; class TestSpec; struct IConfig : IShared { virtual ~IConfig(); virtual bool allowThrows() const = 0; virtual std::ostream& stream() const = 0; virtual std::string name() const = 0; virtual bool includeSuccessfulResults() const = 0; virtual bool shouldDebugBreak() const = 0; virtual bool warnAboutMissingAssertions() const = 0; virtual int abortAfter() const = 0; virtual bool showInvisibles() const = 0; virtual ShowDurations::OrNot showDurations() const = 0; virtual TestSpec const& testSpec() const = 0; virtual RunTests::InWhatOrder runOrder() const = 0; virtual unsigned int rngSeed() const = 0; virtual UseColour::YesOrNo useColour() const = 0; virtual std::vector const& getSectionsToRun() const = 0; }; } // #included from: catch_stream.h #define TWOBLUECUBES_CATCH_STREAM_H_INCLUDED // #included from: catch_streambuf.h #define TWOBLUECUBES_CATCH_STREAMBUF_H_INCLUDED #include namespace Catch { class StreamBufBase : public std::streambuf { public: virtual ~StreamBufBase() CATCH_NOEXCEPT; }; } #include #include #include #include namespace Catch { std::ostream& cout(); std::ostream& cerr(); struct IStream { virtual ~IStream() CATCH_NOEXCEPT; virtual std::ostream& stream() const = 0; }; class FileStream : public IStream { mutable std::ofstream m_ofs; public: FileStream( std::string const& filename ); virtual ~FileStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; class CoutStream : public IStream { mutable std::ostream m_os; public: CoutStream(); virtual ~CoutStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; class DebugOutStream : public IStream { CATCH_AUTO_PTR( StreamBufBase ) m_streamBuf; mutable std::ostream m_os; public: DebugOutStream(); virtual ~DebugOutStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; } #include #include #include #include #ifndef CATCH_CONFIG_CONSOLE_WIDTH #define CATCH_CONFIG_CONSOLE_WIDTH 80 #endif namespace Catch { struct ConfigData { ConfigData() : listTests( false ), listTags( false ), listReporters( false ), listTestNamesOnly( false ), listExtraInfo( false ), showSuccessfulTests( false ), shouldDebugBreak( false ), noThrow( false ), showHelp( false ), showInvisibles( false ), filenamesAsTags( false ), abortAfter( -1 ), rngSeed( 0 ), verbosity( Verbosity::Normal ), warnings( WarnAbout::Nothing ), showDurations( ShowDurations::DefaultForReporter ), runOrder( RunTests::InDeclarationOrder ), useColour( UseColour::Auto ) {} bool listTests; bool listTags; bool listReporters; bool listTestNamesOnly; bool listExtraInfo; bool showSuccessfulTests; bool shouldDebugBreak; bool noThrow; bool showHelp; bool showInvisibles; bool filenamesAsTags; int abortAfter; unsigned int rngSeed; Verbosity::Level verbosity; WarnAbout::What warnings; ShowDurations::OrNot showDurations; RunTests::InWhatOrder runOrder; UseColour::YesOrNo useColour; std::string outputFilename; std::string name; std::string processName; std::vector reporterNames; std::vector testsOrTags; std::vector sectionsToRun; }; class Config : public SharedImpl { private: Config( Config const& other ); Config& operator = ( Config const& other ); virtual void dummy(); public: Config() {} Config( ConfigData const& data ) : m_data( data ), m_stream( openStream() ) { if( !data.testsOrTags.empty() ) { TestSpecParser parser( ITagAliasRegistry::get() ); for( std::size_t i = 0; i < data.testsOrTags.size(); ++i ) parser.parse( data.testsOrTags[i] ); m_testSpec = parser.testSpec(); } } virtual ~Config() {} std::string const& getFilename() const { return m_data.outputFilename ; } bool listTests() const { return m_data.listTests; } bool listTestNamesOnly() const { return m_data.listTestNamesOnly; } bool listTags() const { return m_data.listTags; } bool listReporters() const { return m_data.listReporters; } bool listExtraInfo() const { return m_data.listExtraInfo; } std::string getProcessName() const { return m_data.processName; } std::vector const& getReporterNames() const { return m_data.reporterNames; } std::vector const& getSectionsToRun() const CATCH_OVERRIDE { return m_data.sectionsToRun; } virtual TestSpec const& testSpec() const CATCH_OVERRIDE { return m_testSpec; } bool showHelp() const { return m_data.showHelp; } // IConfig interface virtual bool allowThrows() const CATCH_OVERRIDE { return !m_data.noThrow; } virtual std::ostream& stream() const CATCH_OVERRIDE { return m_stream->stream(); } virtual std::string name() const CATCH_OVERRIDE { return m_data.name.empty() ? m_data.processName : m_data.name; } virtual bool includeSuccessfulResults() const CATCH_OVERRIDE { return m_data.showSuccessfulTests; } virtual bool warnAboutMissingAssertions() const CATCH_OVERRIDE { return m_data.warnings & WarnAbout::NoAssertions; } virtual ShowDurations::OrNot showDurations() const CATCH_OVERRIDE { return m_data.showDurations; } virtual RunTests::InWhatOrder runOrder() const CATCH_OVERRIDE { return m_data.runOrder; } virtual unsigned int rngSeed() const CATCH_OVERRIDE { return m_data.rngSeed; } virtual UseColour::YesOrNo useColour() const CATCH_OVERRIDE { return m_data.useColour; } virtual bool shouldDebugBreak() const CATCH_OVERRIDE { return m_data.shouldDebugBreak; } virtual int abortAfter() const CATCH_OVERRIDE { return m_data.abortAfter; } virtual bool showInvisibles() const CATCH_OVERRIDE { return m_data.showInvisibles; } private: IStream const* openStream() { if( m_data.outputFilename.empty() ) return new CoutStream(); else if( m_data.outputFilename[0] == '%' ) { if( m_data.outputFilename == "%debug" ) return new DebugOutStream(); else throw std::domain_error( "Unrecognised stream: " + m_data.outputFilename ); } else return new FileStream( m_data.outputFilename ); } ConfigData m_data; CATCH_AUTO_PTR( IStream const ) m_stream; TestSpec m_testSpec; }; } // end namespace Catch // #included from: catch_clara.h #define TWOBLUECUBES_CATCH_CLARA_H_INCLUDED // Use Catch's value for console width (store Clara's off to the side, if present) #ifdef CLARA_CONFIG_CONSOLE_WIDTH #define CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH CLARA_CONFIG_CONSOLE_WIDTH #undef CLARA_CONFIG_CONSOLE_WIDTH #endif #define CLARA_CONFIG_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH // Declare Clara inside the Catch namespace #define STITCH_CLARA_OPEN_NAMESPACE namespace Catch { // #included from: ../external/clara.h // Version 0.0.2.4 // Only use header guard if we are not using an outer namespace #if !defined(TWOBLUECUBES_CLARA_H_INCLUDED) || defined(STITCH_CLARA_OPEN_NAMESPACE) #ifndef STITCH_CLARA_OPEN_NAMESPACE #define TWOBLUECUBES_CLARA_H_INCLUDED #define STITCH_CLARA_OPEN_NAMESPACE #define STITCH_CLARA_CLOSE_NAMESPACE #else #define STITCH_CLARA_CLOSE_NAMESPACE } #endif #define STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE // ----------- #included from tbc_text_format.h ----------- // Only use header guard if we are not using an outer namespace #if !defined(TBC_TEXT_FORMAT_H_INCLUDED) || defined(STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE) #ifndef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE #define TBC_TEXT_FORMAT_H_INCLUDED #endif #include #include #include #include #include // Use optional outer namespace #ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE { #endif namespace Tbc { #ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif struct TextAttributes { TextAttributes() : initialIndent( std::string::npos ), indent( 0 ), width( consoleWidth-1 ), tabChar( '\t' ) {} TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } TextAttributes& setTabChar( char _value ) { tabChar = _value; return *this; } std::size_t initialIndent; // indent of first line, or npos std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos std::size_t width; // maximum width of text, including indent. Longer text will wrap char tabChar; // If this char is seen the indent is changed to current pos }; class Text { public: Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) : attr( _attr ) { std::string wrappableChars = " [({.,/|\\-"; std::size_t indent = _attr.initialIndent != std::string::npos ? _attr.initialIndent : _attr.indent; std::string remainder = _str; while( !remainder.empty() ) { if( lines.size() >= 1000 ) { lines.push_back( "... message truncated due to excessive size" ); return; } std::size_t tabPos = std::string::npos; std::size_t width = (std::min)( remainder.size(), _attr.width - indent ); std::size_t pos = remainder.find_first_of( '\n' ); if( pos <= width ) { width = pos; } pos = remainder.find_last_of( _attr.tabChar, width ); if( pos != std::string::npos ) { tabPos = pos; if( remainder[width] == '\n' ) width--; remainder = remainder.substr( 0, tabPos ) + remainder.substr( tabPos+1 ); } if( width == remainder.size() ) { spliceLine( indent, remainder, width ); } else if( remainder[width] == '\n' ) { spliceLine( indent, remainder, width ); if( width <= 1 || remainder.size() != 1 ) remainder = remainder.substr( 1 ); indent = _attr.indent; } else { pos = remainder.find_last_of( wrappableChars, width ); if( pos != std::string::npos && pos > 0 ) { spliceLine( indent, remainder, pos ); if( remainder[0] == ' ' ) remainder = remainder.substr( 1 ); } else { spliceLine( indent, remainder, width-1 ); lines.back() += "-"; } if( lines.size() == 1 ) indent = _attr.indent; if( tabPos != std::string::npos ) indent += tabPos; } } } void spliceLine( std::size_t _indent, std::string& _remainder, std::size_t _pos ) { lines.push_back( std::string( _indent, ' ' ) + _remainder.substr( 0, _pos ) ); _remainder = _remainder.substr( _pos ); } typedef std::vector::const_iterator const_iterator; const_iterator begin() const { return lines.begin(); } const_iterator end() const { return lines.end(); } std::string const& last() const { return lines.back(); } std::size_t size() const { return lines.size(); } std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } std::string toString() const { std::ostringstream oss; oss << *this; return oss.str(); } inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); it != itEnd; ++it ) { if( it != _text.begin() ) _stream << "\n"; _stream << *it; } return _stream; } private: std::string str; TextAttributes attr; std::vector lines; }; } // end namespace Tbc #ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE } // end outer namespace #endif #endif // TBC_TEXT_FORMAT_H_INCLUDED // ----------- end of #include from tbc_text_format.h ----------- // ........... back in clara.h #undef STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE // ----------- #included from clara_compilers.h ----------- #ifndef TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED #define TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED // Detect a number of compiler features - mostly C++11/14 conformance - by compiler // The following features are defined: // // CLARA_CONFIG_CPP11_NULLPTR : is nullptr supported? // CLARA_CONFIG_CPP11_NOEXCEPT : is noexcept supported? // CLARA_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods // CLARA_CONFIG_CPP11_OVERRIDE : is override supported? // CLARA_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) // CLARA_CONFIG_CPP11_OR_GREATER : Is C++11 supported? // CLARA_CONFIG_VARIADIC_MACROS : are variadic macros supported? // In general each macro has a _NO_ form // (e.g. CLARA_CONFIG_CPP11_NO_NULLPTR) which disables the feature. // Many features, at point of detection, define an _INTERNAL_ macro, so they // can be combined, en-mass, with the _NO_ forms later. // All the C++11 features can be disabled with CLARA_CONFIG_NO_CPP11 #ifdef __clang__ #if __has_feature(cxx_nullptr) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif #if __has_feature(cxx_noexcept) #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #endif #endif // __clang__ //////////////////////////////////////////////////////////////////////////////// // GCC #ifdef __GNUC__ #if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif // - otherwise more recent versions define __cplusplus >= 201103L // and will get picked up below #endif // __GNUC__ //////////////////////////////////////////////////////////////////////////////// // Visual C++ #ifdef _MSC_VER #if (_MSC_VER >= 1600) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #endif #endif // _MSC_VER //////////////////////////////////////////////////////////////////////////////// // C++ language feature support // catch all support for C++11 #if defined(__cplusplus) && __cplusplus >= 201103L #define CLARA_CPP11_OR_GREATER #if !defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif #ifndef CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #endif #ifndef CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #endif #if !defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) #define CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE #endif #if !defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) #define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #endif // __cplusplus >= 201103L // Now set the actual defines based on the above + anything the user has configured #if defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NO_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_NULLPTR #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_NOEXCEPT #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_GENERATED_METHODS #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_OVERRIDE) && !defined(CLARA_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_OVERRIDE #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_UNIQUE_PTR) && !defined(CLARA_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_UNIQUE_PTR #endif // noexcept support: #if defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_NOEXCEPT) #define CLARA_NOEXCEPT noexcept # define CLARA_NOEXCEPT_IS(x) noexcept(x) #else #define CLARA_NOEXCEPT throw() # define CLARA_NOEXCEPT_IS(x) #endif // nullptr support #ifdef CLARA_CONFIG_CPP11_NULLPTR #define CLARA_NULL nullptr #else #define CLARA_NULL NULL #endif // override support #ifdef CLARA_CONFIG_CPP11_OVERRIDE #define CLARA_OVERRIDE override #else #define CLARA_OVERRIDE #endif // unique_ptr support #ifdef CLARA_CONFIG_CPP11_UNIQUE_PTR # define CLARA_AUTO_PTR( T ) std::unique_ptr #else # define CLARA_AUTO_PTR( T ) std::auto_ptr #endif #endif // TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED // ----------- end of #include from clara_compilers.h ----------- // ........... back in clara.h #include #include #include #if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) #define CLARA_PLATFORM_WINDOWS #endif // Use optional outer namespace #ifdef STITCH_CLARA_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE #endif namespace Clara { struct UnpositionalTag {}; extern UnpositionalTag _; #ifdef CLARA_CONFIG_MAIN UnpositionalTag _; #endif namespace Detail { #ifdef CLARA_CONSOLE_WIDTH const unsigned int consoleWidth = CLARA_CONFIG_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif using namespace Tbc; inline bool startsWith( std::string const& str, std::string const& prefix ) { return str.size() >= prefix.size() && str.substr( 0, prefix.size() ) == prefix; } template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct IsBool { static const bool value = false; }; template<> struct IsBool { static const bool value = true; }; template void convertInto( std::string const& _source, T& _dest ) { std::stringstream ss; ss << _source; ss >> _dest; if( ss.fail() ) throw std::runtime_error( "Unable to convert " + _source + " to destination type" ); } inline void convertInto( std::string const& _source, std::string& _dest ) { _dest = _source; } char toLowerCh(char c) { return static_cast( std::tolower( c ) ); } inline void convertInto( std::string const& _source, bool& _dest ) { std::string sourceLC = _source; std::transform( sourceLC.begin(), sourceLC.end(), sourceLC.begin(), toLowerCh ); if( sourceLC == "y" || sourceLC == "1" || sourceLC == "true" || sourceLC == "yes" || sourceLC == "on" ) _dest = true; else if( sourceLC == "n" || sourceLC == "0" || sourceLC == "false" || sourceLC == "no" || sourceLC == "off" ) _dest = false; else throw std::runtime_error( "Expected a boolean value but did not recognise:\n '" + _source + "'" ); } template struct IArgFunction { virtual ~IArgFunction() {} #ifdef CLARA_CONFIG_CPP11_GENERATED_METHODS IArgFunction() = default; IArgFunction( IArgFunction const& ) = default; #endif virtual void set( ConfigT& config, std::string const& value ) const = 0; virtual bool takesArg() const = 0; virtual IArgFunction* clone() const = 0; }; template class BoundArgFunction { public: BoundArgFunction() : functionObj( CLARA_NULL ) {} BoundArgFunction( IArgFunction* _functionObj ) : functionObj( _functionObj ) {} BoundArgFunction( BoundArgFunction const& other ) : functionObj( other.functionObj ? other.functionObj->clone() : CLARA_NULL ) {} BoundArgFunction& operator = ( BoundArgFunction const& other ) { IArgFunction* newFunctionObj = other.functionObj ? other.functionObj->clone() : CLARA_NULL; delete functionObj; functionObj = newFunctionObj; return *this; } ~BoundArgFunction() { delete functionObj; } void set( ConfigT& config, std::string const& value ) const { functionObj->set( config, value ); } bool takesArg() const { return functionObj->takesArg(); } bool isSet() const { return functionObj != CLARA_NULL; } private: IArgFunction* functionObj; }; template struct NullBinder : IArgFunction{ virtual void set( C&, std::string const& ) const {} virtual bool takesArg() const { return true; } virtual IArgFunction* clone() const { return new NullBinder( *this ); } }; template struct BoundDataMember : IArgFunction{ BoundDataMember( M C::* _member ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { convertInto( stringValue, p.*member ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundDataMember( *this ); } M C::* member; }; template struct BoundUnaryMethod : IArgFunction{ BoundUnaryMethod( void (C::*_member)( M ) ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { typename RemoveConstRef::type value; convertInto( stringValue, value ); (p.*member)( value ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundUnaryMethod( *this ); } void (C::*member)( M ); }; template struct BoundNullaryMethod : IArgFunction{ BoundNullaryMethod( void (C::*_member)() ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { bool value; convertInto( stringValue, value ); if( value ) (p.*member)(); } virtual bool takesArg() const { return false; } virtual IArgFunction* clone() const { return new BoundNullaryMethod( *this ); } void (C::*member)(); }; template struct BoundUnaryFunction : IArgFunction{ BoundUnaryFunction( void (*_function)( C& ) ) : function( _function ) {} virtual void set( C& obj, std::string const& stringValue ) const { bool value; convertInto( stringValue, value ); if( value ) function( obj ); } virtual bool takesArg() const { return false; } virtual IArgFunction* clone() const { return new BoundUnaryFunction( *this ); } void (*function)( C& ); }; template struct BoundBinaryFunction : IArgFunction{ BoundBinaryFunction( void (*_function)( C&, T ) ) : function( _function ) {} virtual void set( C& obj, std::string const& stringValue ) const { typename RemoveConstRef::type value; convertInto( stringValue, value ); function( obj, value ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundBinaryFunction( *this ); } void (*function)( C&, T ); }; } // namespace Detail inline std::vector argsToVector( int argc, char const* const* const argv ) { std::vector args( static_cast( argc ) ); for( std::size_t i = 0; i < static_cast( argc ); ++i ) args[i] = argv[i]; return args; } class Parser { enum Mode { None, MaybeShortOpt, SlashOpt, ShortOpt, LongOpt, Positional }; Mode mode; std::size_t from; bool inQuotes; public: struct Token { enum Type { Positional, ShortOpt, LongOpt }; Token( Type _type, std::string const& _data ) : type( _type ), data( _data ) {} Type type; std::string data; }; Parser() : mode( None ), from( 0 ), inQuotes( false ){} void parseIntoTokens( std::vector const& args, std::vector& tokens ) { const std::string doubleDash = "--"; for( std::size_t i = 1; i < args.size() && args[i] != doubleDash; ++i ) parseIntoTokens( args[i], tokens); } void parseIntoTokens( std::string const& arg, std::vector& tokens ) { for( std::size_t i = 0; i < arg.size(); ++i ) { char c = arg[i]; if( c == '"' ) inQuotes = !inQuotes; mode = handleMode( i, c, arg, tokens ); } mode = handleMode( arg.size(), '\0', arg, tokens ); } Mode handleMode( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { switch( mode ) { case None: return handleNone( i, c ); case MaybeShortOpt: return handleMaybeShortOpt( i, c ); case ShortOpt: case LongOpt: case SlashOpt: return handleOpt( i, c, arg, tokens ); case Positional: return handlePositional( i, c, arg, tokens ); default: throw std::logic_error( "Unknown mode" ); } } Mode handleNone( std::size_t i, char c ) { if( inQuotes ) { from = i; return Positional; } switch( c ) { case '-': return MaybeShortOpt; #ifdef CLARA_PLATFORM_WINDOWS case '/': from = i+1; return SlashOpt; #endif default: from = i; return Positional; } } Mode handleMaybeShortOpt( std::size_t i, char c ) { switch( c ) { case '-': from = i+1; return LongOpt; default: from = i; return ShortOpt; } } Mode handleOpt( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { if( std::string( ":=\0", 3 ).find( c ) == std::string::npos ) return mode; std::string optName = arg.substr( from, i-from ); if( mode == ShortOpt ) for( std::size_t j = 0; j < optName.size(); ++j ) tokens.push_back( Token( Token::ShortOpt, optName.substr( j, 1 ) ) ); else if( mode == SlashOpt && optName.size() == 1 ) tokens.push_back( Token( Token::ShortOpt, optName ) ); else tokens.push_back( Token( Token::LongOpt, optName ) ); return None; } Mode handlePositional( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { if( inQuotes || std::string( "\0", 1 ).find( c ) == std::string::npos ) return mode; std::string data = arg.substr( from, i-from ); tokens.push_back( Token( Token::Positional, data ) ); return None; } }; template struct CommonArgProperties { CommonArgProperties() {} CommonArgProperties( Detail::BoundArgFunction const& _boundField ) : boundField( _boundField ) {} Detail::BoundArgFunction boundField; std::string description; std::string detail; std::string placeholder; // Only value if boundField takes an arg bool takesArg() const { return !placeholder.empty(); } void validate() const { if( !boundField.isSet() ) throw std::logic_error( "option not bound" ); } }; struct OptionArgProperties { std::vector shortNames; std::string longName; bool hasShortName( std::string const& shortName ) const { return std::find( shortNames.begin(), shortNames.end(), shortName ) != shortNames.end(); } bool hasLongName( std::string const& _longName ) const { return _longName == longName; } }; struct PositionalArgProperties { PositionalArgProperties() : position( -1 ) {} int position; // -1 means non-positional (floating) bool isFixedPositional() const { return position != -1; } }; template class CommandLine { struct Arg : CommonArgProperties, OptionArgProperties, PositionalArgProperties { Arg() {} Arg( Detail::BoundArgFunction const& _boundField ) : CommonArgProperties( _boundField ) {} using CommonArgProperties::placeholder; // !TBD std::string dbgName() const { if( !longName.empty() ) return "--" + longName; if( !shortNames.empty() ) return "-" + shortNames[0]; return "positional args"; } std::string commands() const { std::ostringstream oss; bool first = true; std::vector::const_iterator it = shortNames.begin(), itEnd = shortNames.end(); for(; it != itEnd; ++it ) { if( first ) first = false; else oss << ", "; oss << "-" << *it; } if( !longName.empty() ) { if( !first ) oss << ", "; oss << "--" << longName; } if( !placeholder.empty() ) oss << " <" << placeholder << ">"; return oss.str(); } }; typedef CLARA_AUTO_PTR( Arg ) ArgAutoPtr; friend void addOptName( Arg& arg, std::string const& optName ) { if( optName.empty() ) return; if( Detail::startsWith( optName, "--" ) ) { if( !arg.longName.empty() ) throw std::logic_error( "Only one long opt may be specified. '" + arg.longName + "' already specified, now attempting to add '" + optName + "'" ); arg.longName = optName.substr( 2 ); } else if( Detail::startsWith( optName, "-" ) ) arg.shortNames.push_back( optName.substr( 1 ) ); else throw std::logic_error( "option must begin with - or --. Option was: '" + optName + "'" ); } friend void setPositionalArg( Arg& arg, int position ) { arg.position = position; } class ArgBuilder { public: ArgBuilder( Arg* arg ) : m_arg( arg ) {} // Bind a non-boolean data member (requires placeholder string) template void bind( M C::* field, std::string const& placeholder ) { m_arg->boundField = new Detail::BoundDataMember( field ); m_arg->placeholder = placeholder; } // Bind a boolean data member (no placeholder required) template void bind( bool C::* field ) { m_arg->boundField = new Detail::BoundDataMember( field ); } // Bind a method taking a single, non-boolean argument (requires a placeholder string) template void bind( void (C::* unaryMethod)( M ), std::string const& placeholder ) { m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); m_arg->placeholder = placeholder; } // Bind a method taking a single, boolean argument (no placeholder string required) template void bind( void (C::* unaryMethod)( bool ) ) { m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); } // Bind a method that takes no arguments (will be called if opt is present) template void bind( void (C::* nullaryMethod)() ) { m_arg->boundField = new Detail::BoundNullaryMethod( nullaryMethod ); } // Bind a free function taking a single argument - the object to operate on (no placeholder string required) template void bind( void (* unaryFunction)( C& ) ) { m_arg->boundField = new Detail::BoundUnaryFunction( unaryFunction ); } // Bind a free function taking a single argument - the object to operate on (requires a placeholder string) template void bind( void (* binaryFunction)( C&, T ), std::string const& placeholder ) { m_arg->boundField = new Detail::BoundBinaryFunction( binaryFunction ); m_arg->placeholder = placeholder; } ArgBuilder& describe( std::string const& description ) { m_arg->description = description; return *this; } ArgBuilder& detail( std::string const& detail ) { m_arg->detail = detail; return *this; } protected: Arg* m_arg; }; class OptBuilder : public ArgBuilder { public: OptBuilder( Arg* arg ) : ArgBuilder( arg ) {} OptBuilder( OptBuilder& other ) : ArgBuilder( other ) {} OptBuilder& operator[]( std::string const& optName ) { addOptName( *ArgBuilder::m_arg, optName ); return *this; } }; public: CommandLine() : m_boundProcessName( new Detail::NullBinder() ), m_highestSpecifiedArgPosition( 0 ), m_throwOnUnrecognisedTokens( false ) {} CommandLine( CommandLine const& other ) : m_boundProcessName( other.m_boundProcessName ), m_options ( other.m_options ), m_positionalArgs( other.m_positionalArgs ), m_highestSpecifiedArgPosition( other.m_highestSpecifiedArgPosition ), m_throwOnUnrecognisedTokens( other.m_throwOnUnrecognisedTokens ) { if( other.m_floatingArg.get() ) m_floatingArg.reset( new Arg( *other.m_floatingArg ) ); } CommandLine& setThrowOnUnrecognisedTokens( bool shouldThrow = true ) { m_throwOnUnrecognisedTokens = shouldThrow; return *this; } OptBuilder operator[]( std::string const& optName ) { m_options.push_back( Arg() ); addOptName( m_options.back(), optName ); OptBuilder builder( &m_options.back() ); return builder; } ArgBuilder operator[]( int position ) { m_positionalArgs.insert( std::make_pair( position, Arg() ) ); if( position > m_highestSpecifiedArgPosition ) m_highestSpecifiedArgPosition = position; setPositionalArg( m_positionalArgs[position], position ); ArgBuilder builder( &m_positionalArgs[position] ); return builder; } // Invoke this with the _ instance ArgBuilder operator[]( UnpositionalTag ) { if( m_floatingArg.get() ) throw std::logic_error( "Only one unpositional argument can be added" ); m_floatingArg.reset( new Arg() ); ArgBuilder builder( m_floatingArg.get() ); return builder; } template void bindProcessName( M C::* field ) { m_boundProcessName = new Detail::BoundDataMember( field ); } template void bindProcessName( void (C::*_unaryMethod)( M ) ) { m_boundProcessName = new Detail::BoundUnaryMethod( _unaryMethod ); } void optUsage( std::ostream& os, std::size_t indent = 0, std::size_t width = Detail::consoleWidth ) const { typename std::vector::const_iterator itBegin = m_options.begin(), itEnd = m_options.end(), it; std::size_t maxWidth = 0; for( it = itBegin; it != itEnd; ++it ) maxWidth = (std::max)( maxWidth, it->commands().size() ); for( it = itBegin; it != itEnd; ++it ) { Detail::Text usage( it->commands(), Detail::TextAttributes() .setWidth( maxWidth+indent ) .setIndent( indent ) ); Detail::Text desc( it->description, Detail::TextAttributes() .setWidth( width - maxWidth - 3 ) ); for( std::size_t i = 0; i < (std::max)( usage.size(), desc.size() ); ++i ) { std::string usageCol = i < usage.size() ? usage[i] : ""; os << usageCol; if( i < desc.size() && !desc[i].empty() ) os << std::string( indent + 2 + maxWidth - usageCol.size(), ' ' ) << desc[i]; os << "\n"; } } } std::string optUsage() const { std::ostringstream oss; optUsage( oss ); return oss.str(); } void argSynopsis( std::ostream& os ) const { for( int i = 1; i <= m_highestSpecifiedArgPosition; ++i ) { if( i > 1 ) os << " "; typename std::map::const_iterator it = m_positionalArgs.find( i ); if( it != m_positionalArgs.end() ) os << "<" << it->second.placeholder << ">"; else if( m_floatingArg.get() ) os << "<" << m_floatingArg->placeholder << ">"; else throw std::logic_error( "non consecutive positional arguments with no floating args" ); } // !TBD No indication of mandatory args if( m_floatingArg.get() ) { if( m_highestSpecifiedArgPosition > 1 ) os << " "; os << "[<" << m_floatingArg->placeholder << "> ...]"; } } std::string argSynopsis() const { std::ostringstream oss; argSynopsis( oss ); return oss.str(); } void usage( std::ostream& os, std::string const& procName ) const { validate(); os << "usage:\n " << procName << " "; argSynopsis( os ); if( !m_options.empty() ) { os << " [options]\n\nwhere options are: \n"; optUsage( os, 2 ); } os << "\n"; } std::string usage( std::string const& procName ) const { std::ostringstream oss; usage( oss, procName ); return oss.str(); } ConfigT parse( std::vector const& args ) const { ConfigT config; parseInto( args, config ); return config; } std::vector parseInto( std::vector const& args, ConfigT& config ) const { std::string processName = args.empty() ? std::string() : args[0]; std::size_t lastSlash = processName.find_last_of( "/\\" ); if( lastSlash != std::string::npos ) processName = processName.substr( lastSlash+1 ); m_boundProcessName.set( config, processName ); std::vector tokens; Parser parser; parser.parseIntoTokens( args, tokens ); return populate( tokens, config ); } std::vector populate( std::vector const& tokens, ConfigT& config ) const { validate(); std::vector unusedTokens = populateOptions( tokens, config ); unusedTokens = populateFixedArgs( unusedTokens, config ); unusedTokens = populateFloatingArgs( unusedTokens, config ); return unusedTokens; } std::vector populateOptions( std::vector const& tokens, ConfigT& config ) const { std::vector unusedTokens; std::vector errors; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; typename std::vector::const_iterator it = m_options.begin(), itEnd = m_options.end(); for(; it != itEnd; ++it ) { Arg const& arg = *it; try { if( ( token.type == Parser::Token::ShortOpt && arg.hasShortName( token.data ) ) || ( token.type == Parser::Token::LongOpt && arg.hasLongName( token.data ) ) ) { if( arg.takesArg() ) { if( i == tokens.size()-1 || tokens[i+1].type != Parser::Token::Positional ) errors.push_back( "Expected argument to option: " + token.data ); else arg.boundField.set( config, tokens[++i].data ); } else { arg.boundField.set( config, "true" ); } break; } } catch( std::exception& ex ) { errors.push_back( std::string( ex.what() ) + "\n- while parsing: (" + arg.commands() + ")" ); } } if( it == itEnd ) { if( token.type == Parser::Token::Positional || !m_throwOnUnrecognisedTokens ) unusedTokens.push_back( token ); else if( errors.empty() && m_throwOnUnrecognisedTokens ) errors.push_back( "unrecognised option: " + token.data ); } } if( !errors.empty() ) { std::ostringstream oss; for( std::vector::const_iterator it = errors.begin(), itEnd = errors.end(); it != itEnd; ++it ) { if( it != errors.begin() ) oss << "\n"; oss << *it; } throw std::runtime_error( oss.str() ); } return unusedTokens; } std::vector populateFixedArgs( std::vector const& tokens, ConfigT& config ) const { std::vector unusedTokens; int position = 1; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; typename std::map::const_iterator it = m_positionalArgs.find( position ); if( it != m_positionalArgs.end() ) it->second.boundField.set( config, token.data ); else unusedTokens.push_back( token ); if( token.type == Parser::Token::Positional ) position++; } return unusedTokens; } std::vector populateFloatingArgs( std::vector const& tokens, ConfigT& config ) const { if( !m_floatingArg.get() ) return tokens; std::vector unusedTokens; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; if( token.type == Parser::Token::Positional ) m_floatingArg->boundField.set( config, token.data ); else unusedTokens.push_back( token ); } return unusedTokens; } void validate() const { if( m_options.empty() && m_positionalArgs.empty() && !m_floatingArg.get() ) throw std::logic_error( "No options or arguments specified" ); for( typename std::vector::const_iterator it = m_options.begin(), itEnd = m_options.end(); it != itEnd; ++it ) it->validate(); } private: Detail::BoundArgFunction m_boundProcessName; std::vector m_options; std::map m_positionalArgs; ArgAutoPtr m_floatingArg; int m_highestSpecifiedArgPosition; bool m_throwOnUnrecognisedTokens; }; } // end namespace Clara STITCH_CLARA_CLOSE_NAMESPACE #undef STITCH_CLARA_OPEN_NAMESPACE #undef STITCH_CLARA_CLOSE_NAMESPACE #endif // TWOBLUECUBES_CLARA_H_INCLUDED #undef STITCH_CLARA_OPEN_NAMESPACE // Restore Clara's value for console width, if present #ifdef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #define CLARA_CONFIG_CONSOLE_WIDTH CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #undef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #endif #include #include namespace Catch { inline void abortAfterFirst( ConfigData& config ) { config.abortAfter = 1; } inline void abortAfterX( ConfigData& config, int x ) { if( x < 1 ) throw std::runtime_error( "Value after -x or --abortAfter must be greater than zero" ); config.abortAfter = x; } inline void addTestOrTags( ConfigData& config, std::string const& _testSpec ) { config.testsOrTags.push_back( _testSpec ); } inline void addSectionToRun( ConfigData& config, std::string const& sectionName ) { config.sectionsToRun.push_back( sectionName ); } inline void addReporterName( ConfigData& config, std::string const& _reporterName ) { config.reporterNames.push_back( _reporterName ); } inline void addWarning( ConfigData& config, std::string const& _warning ) { if( _warning == "NoAssertions" ) config.warnings = static_cast( config.warnings | WarnAbout::NoAssertions ); else throw std::runtime_error( "Unrecognised warning: '" + _warning + '\'' ); } inline void setOrder( ConfigData& config, std::string const& order ) { if( startsWith( "declared", order ) ) config.runOrder = RunTests::InDeclarationOrder; else if( startsWith( "lexical", order ) ) config.runOrder = RunTests::InLexicographicalOrder; else if( startsWith( "random", order ) ) config.runOrder = RunTests::InRandomOrder; else throw std::runtime_error( "Unrecognised ordering: '" + order + '\'' ); } inline void setRngSeed( ConfigData& config, std::string const& seed ) { if( seed == "time" ) { config.rngSeed = static_cast( std::time(0) ); } else { std::stringstream ss; ss << seed; ss >> config.rngSeed; if( ss.fail() ) throw std::runtime_error( "Argument to --rng-seed should be the word 'time' or a number" ); } } inline void setVerbosity( ConfigData& config, int level ) { // !TBD: accept strings? config.verbosity = static_cast( level ); } inline void setShowDurations( ConfigData& config, bool _showDurations ) { config.showDurations = _showDurations ? ShowDurations::Always : ShowDurations::Never; } inline void setUseColour( ConfigData& config, std::string const& value ) { std::string mode = toLower( value ); if( mode == "yes" ) config.useColour = UseColour::Yes; else if( mode == "no" ) config.useColour = UseColour::No; else if( mode == "auto" ) config.useColour = UseColour::Auto; else throw std::runtime_error( "colour mode must be one of: auto, yes or no" ); } inline void forceColour( ConfigData& config ) { config.useColour = UseColour::Yes; } inline void loadTestNamesFromFile( ConfigData& config, std::string const& _filename ) { std::ifstream f( _filename.c_str() ); if( !f.is_open() ) throw std::domain_error( "Unable to load input file: " + _filename ); std::string line; while( std::getline( f, line ) ) { line = trim(line); if( !line.empty() && !startsWith( line, '#' ) ) { if( !startsWith( line, '"' ) ) line = '"' + line + '"'; addTestOrTags( config, line + ',' ); } } } inline Clara::CommandLine makeCommandLineParser() { using namespace Clara; CommandLine cli; cli.bindProcessName( &ConfigData::processName ); cli["-?"]["-h"]["--help"] .describe( "display usage information" ) .bind( &ConfigData::showHelp ); cli["-l"]["--list-tests"] .describe( "list all/matching test cases" ) .bind( &ConfigData::listTests ); cli["-t"]["--list-tags"] .describe( "list all/matching tags" ) .bind( &ConfigData::listTags ); cli["-s"]["--success"] .describe( "include successful tests in output" ) .bind( &ConfigData::showSuccessfulTests ); cli["-b"]["--break"] .describe( "break into debugger on failure" ) .bind( &ConfigData::shouldDebugBreak ); cli["-e"]["--nothrow"] .describe( "skip exception tests" ) .bind( &ConfigData::noThrow ); cli["-i"]["--invisibles"] .describe( "show invisibles (tabs, newlines)" ) .bind( &ConfigData::showInvisibles ); cli["-o"]["--out"] .describe( "output filename" ) .bind( &ConfigData::outputFilename, "filename" ); cli["-r"]["--reporter"] // .placeholder( "name[:filename]" ) .describe( "reporter to use (defaults to console)" ) .bind( &addReporterName, "name" ); cli["-n"]["--name"] .describe( "suite name" ) .bind( &ConfigData::name, "name" ); cli["-a"]["--abort"] .describe( "abort at first failure" ) .bind( &abortAfterFirst ); cli["-x"]["--abortx"] .describe( "abort after x failures" ) .bind( &abortAfterX, "no. failures" ); cli["-w"]["--warn"] .describe( "enable warnings" ) .bind( &addWarning, "warning name" ); // - needs updating if reinstated // cli.into( &setVerbosity ) // .describe( "level of verbosity (0=no output)" ) // .shortOpt( "v") // .longOpt( "verbosity" ) // .placeholder( "level" ); cli[_] .describe( "which test or tests to use" ) .bind( &addTestOrTags, "test name, pattern or tags" ); cli["-d"]["--durations"] .describe( "show test durations" ) .bind( &setShowDurations, "yes|no" ); cli["-f"]["--input-file"] .describe( "load test names to run from a file" ) .bind( &loadTestNamesFromFile, "filename" ); cli["-#"]["--filenames-as-tags"] .describe( "adds a tag for the filename" ) .bind( &ConfigData::filenamesAsTags ); cli["-c"]["--section"] .describe( "specify section to run" ) .bind( &addSectionToRun, "section name" ); // Less common commands which don't have a short form cli["--list-test-names-only"] .describe( "list all/matching test cases names only" ) .bind( &ConfigData::listTestNamesOnly ); cli["--list-extra-info"] .describe( "list all/matching test cases with more info" ) .bind( &ConfigData::listExtraInfo ); cli["--list-reporters"] .describe( "list all reporters" ) .bind( &ConfigData::listReporters ); cli["--order"] .describe( "test case order (defaults to decl)" ) .bind( &setOrder, "decl|lex|rand" ); cli["--rng-seed"] .describe( "set a specific seed for random numbers" ) .bind( &setRngSeed, "'time'|number" ); cli["--force-colour"] .describe( "force colourised output (deprecated)" ) .bind( &forceColour ); cli["--use-colour"] .describe( "should output be colourised" ) .bind( &setUseColour, "yes|no" ); return cli; } } // end namespace Catch // #included from: internal/catch_list.hpp #define TWOBLUECUBES_CATCH_LIST_HPP_INCLUDED // #included from: catch_text.h #define TWOBLUECUBES_CATCH_TEXT_H_INCLUDED #define TBC_TEXT_FORMAT_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH #define CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE Catch // #included from: ../external/tbc_text_format.h // Only use header guard if we are not using an outer namespace #ifndef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE # ifdef TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED # ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED # define TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED # endif # else # define TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED # endif #endif #ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED #include #include #include // Use optional outer namespace #ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE { #endif namespace Tbc { #ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif struct TextAttributes { TextAttributes() : initialIndent( std::string::npos ), indent( 0 ), width( consoleWidth-1 ) {} TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } std::size_t initialIndent; // indent of first line, or npos std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos std::size_t width; // maximum width of text, including indent. Longer text will wrap }; class Text { public: Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) : attr( _attr ) { const std::string wrappableBeforeChars = "[({<\t"; const std::string wrappableAfterChars = "])}>-,./|\\"; const std::string wrappableInsteadOfChars = " \n\r"; std::string indent = _attr.initialIndent != std::string::npos ? std::string( _attr.initialIndent, ' ' ) : std::string( _attr.indent, ' ' ); typedef std::string::const_iterator iterator; iterator it = _str.begin(); const iterator strEnd = _str.end(); while( it != strEnd ) { if( lines.size() >= 1000 ) { lines.push_back( "... message truncated due to excessive size" ); return; } std::string suffix; std::size_t width = (std::min)( static_cast( strEnd-it ), _attr.width-static_cast( indent.size() ) ); iterator itEnd = it+width; iterator itNext = _str.end(); iterator itNewLine = std::find( it, itEnd, '\n' ); if( itNewLine != itEnd ) itEnd = itNewLine; if( itEnd != strEnd ) { bool foundWrapPoint = false; iterator findIt = itEnd; do { if( wrappableAfterChars.find( *findIt ) != std::string::npos && findIt != itEnd ) { itEnd = findIt+1; itNext = findIt+1; foundWrapPoint = true; } else if( findIt > it && wrappableBeforeChars.find( *findIt ) != std::string::npos ) { itEnd = findIt; itNext = findIt; foundWrapPoint = true; } else if( wrappableInsteadOfChars.find( *findIt ) != std::string::npos ) { itNext = findIt+1; itEnd = findIt; foundWrapPoint = true; } if( findIt == it ) break; else --findIt; } while( !foundWrapPoint ); if( !foundWrapPoint ) { // No good wrap char, so we'll break mid word and add a hyphen --itEnd; itNext = itEnd; suffix = "-"; } else { while( itEnd > it && wrappableInsteadOfChars.find( *(itEnd-1) ) != std::string::npos ) --itEnd; } } lines.push_back( indent + std::string( it, itEnd ) + suffix ); if( indent.size() != _attr.indent ) indent = std::string( _attr.indent, ' ' ); it = itNext; } } typedef std::vector::const_iterator const_iterator; const_iterator begin() const { return lines.begin(); } const_iterator end() const { return lines.end(); } std::string const& last() const { return lines.back(); } std::size_t size() const { return lines.size(); } std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } std::string toString() const { std::ostringstream oss; oss << *this; return oss.str(); } inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); it != itEnd; ++it ) { if( it != _text.begin() ) _stream << "\n"; _stream << *it; } return _stream; } private: std::string str; TextAttributes attr; std::vector lines; }; } // end namespace Tbc #ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE } // end outer namespace #endif #endif // TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED #undef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace Catch { using Tbc::Text; using Tbc::TextAttributes; } // #included from: catch_console_colour.hpp #define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_HPP_INCLUDED namespace Catch { struct Colour { enum Code { None = 0, White, Red, Green, Blue, Cyan, Yellow, Grey, Bright = 0x10, BrightRed = Bright | Red, BrightGreen = Bright | Green, LightGrey = Bright | Grey, BrightWhite = Bright | White, // By intention FileName = LightGrey, Warning = Yellow, ResultError = BrightRed, ResultSuccess = BrightGreen, ResultExpectedFailure = Warning, Error = BrightRed, Success = Green, OriginalExpression = Cyan, ReconstructedExpression = Yellow, SecondaryText = LightGrey, Headers = White }; // Use constructed object for RAII guard Colour( Code _colourCode ); Colour( Colour const& other ); ~Colour(); // Use static method for one-shot changes static void use( Code _colourCode ); private: bool m_moved; }; inline std::ostream& operator << ( std::ostream& os, Colour const& ) { return os; } } // end namespace Catch // #included from: catch_interfaces_reporter.h #define TWOBLUECUBES_CATCH_INTERFACES_REPORTER_H_INCLUDED #include #include #include namespace Catch { struct ReporterConfig { explicit ReporterConfig( Ptr const& _fullConfig ) : m_stream( &_fullConfig->stream() ), m_fullConfig( _fullConfig ) {} ReporterConfig( Ptr const& _fullConfig, std::ostream& _stream ) : m_stream( &_stream ), m_fullConfig( _fullConfig ) {} std::ostream& stream() const { return *m_stream; } Ptr fullConfig() const { return m_fullConfig; } private: std::ostream* m_stream; Ptr m_fullConfig; }; struct ReporterPreferences { ReporterPreferences() : shouldRedirectStdOut( false ) {} bool shouldRedirectStdOut; }; template struct LazyStat : Option { LazyStat() : used( false ) {} LazyStat& operator=( T const& _value ) { Option::operator=( _value ); used = false; return *this; } void reset() { Option::reset(); used = false; } bool used; }; struct TestRunInfo { TestRunInfo( std::string const& _name ) : name( _name ) {} std::string name; }; struct GroupInfo { GroupInfo( std::string const& _name, std::size_t _groupIndex, std::size_t _groupsCount ) : name( _name ), groupIndex( _groupIndex ), groupsCounts( _groupsCount ) {} std::string name; std::size_t groupIndex; std::size_t groupsCounts; }; struct AssertionStats { AssertionStats( AssertionResult const& _assertionResult, std::vector const& _infoMessages, Totals const& _totals ) : assertionResult( _assertionResult ), infoMessages( _infoMessages ), totals( _totals ) { if( assertionResult.hasMessage() ) { // Copy message into messages list. // !TBD This should have been done earlier, somewhere MessageBuilder builder( assertionResult.getTestMacroName(), assertionResult.getSourceInfo(), assertionResult.getResultType() ); builder << assertionResult.getMessage(); builder.m_info.message = builder.m_stream.str(); infoMessages.push_back( builder.m_info ); } } virtual ~AssertionStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS AssertionStats( AssertionStats const& ) = default; AssertionStats( AssertionStats && ) = default; AssertionStats& operator = ( AssertionStats const& ) = default; AssertionStats& operator = ( AssertionStats && ) = default; # endif AssertionResult assertionResult; std::vector infoMessages; Totals totals; }; struct SectionStats { SectionStats( SectionInfo const& _sectionInfo, Counts const& _assertions, double _durationInSeconds, bool _missingAssertions ) : sectionInfo( _sectionInfo ), assertions( _assertions ), durationInSeconds( _durationInSeconds ), missingAssertions( _missingAssertions ) {} virtual ~SectionStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS SectionStats( SectionStats const& ) = default; SectionStats( SectionStats && ) = default; SectionStats& operator = ( SectionStats const& ) = default; SectionStats& operator = ( SectionStats && ) = default; # endif SectionInfo sectionInfo; Counts assertions; double durationInSeconds; bool missingAssertions; }; struct TestCaseStats { TestCaseStats( TestCaseInfo const& _testInfo, Totals const& _totals, std::string const& _stdOut, std::string const& _stdErr, bool _aborting ) : testInfo( _testInfo ), totals( _totals ), stdOut( _stdOut ), stdErr( _stdErr ), aborting( _aborting ) {} virtual ~TestCaseStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS TestCaseStats( TestCaseStats const& ) = default; TestCaseStats( TestCaseStats && ) = default; TestCaseStats& operator = ( TestCaseStats const& ) = default; TestCaseStats& operator = ( TestCaseStats && ) = default; # endif TestCaseInfo testInfo; Totals totals; std::string stdOut; std::string stdErr; bool aborting; }; struct TestGroupStats { TestGroupStats( GroupInfo const& _groupInfo, Totals const& _totals, bool _aborting ) : groupInfo( _groupInfo ), totals( _totals ), aborting( _aborting ) {} TestGroupStats( GroupInfo const& _groupInfo ) : groupInfo( _groupInfo ), aborting( false ) {} virtual ~TestGroupStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS TestGroupStats( TestGroupStats const& ) = default; TestGroupStats( TestGroupStats && ) = default; TestGroupStats& operator = ( TestGroupStats const& ) = default; TestGroupStats& operator = ( TestGroupStats && ) = default; # endif GroupInfo groupInfo; Totals totals; bool aborting; }; struct TestRunStats { TestRunStats( TestRunInfo const& _runInfo, Totals const& _totals, bool _aborting ) : runInfo( _runInfo ), totals( _totals ), aborting( _aborting ) {} virtual ~TestRunStats(); # ifndef CATCH_CONFIG_CPP11_GENERATED_METHODS TestRunStats( TestRunStats const& _other ) : runInfo( _other.runInfo ), totals( _other.totals ), aborting( _other.aborting ) {} # else TestRunStats( TestRunStats const& ) = default; TestRunStats( TestRunStats && ) = default; TestRunStats& operator = ( TestRunStats const& ) = default; TestRunStats& operator = ( TestRunStats && ) = default; # endif TestRunInfo runInfo; Totals totals; bool aborting; }; class MultipleReporters; struct IStreamingReporter : IShared { virtual ~IStreamingReporter(); // Implementing class must also provide the following static method: // static std::string getDescription(); virtual ReporterPreferences getPreferences() const = 0; virtual void noMatchingTestCases( std::string const& spec ) = 0; virtual void testRunStarting( TestRunInfo const& testRunInfo ) = 0; virtual void testGroupStarting( GroupInfo const& groupInfo ) = 0; virtual void testCaseStarting( TestCaseInfo const& testInfo ) = 0; virtual void sectionStarting( SectionInfo const& sectionInfo ) = 0; virtual void assertionStarting( AssertionInfo const& assertionInfo ) = 0; // The return value indicates if the messages buffer should be cleared: virtual bool assertionEnded( AssertionStats const& assertionStats ) = 0; virtual void sectionEnded( SectionStats const& sectionStats ) = 0; virtual void testCaseEnded( TestCaseStats const& testCaseStats ) = 0; virtual void testGroupEnded( TestGroupStats const& testGroupStats ) = 0; virtual void testRunEnded( TestRunStats const& testRunStats ) = 0; virtual void skipTest( TestCaseInfo const& testInfo ) = 0; virtual MultipleReporters* tryAsMulti() { return CATCH_NULL; } }; struct IReporterFactory : IShared { virtual ~IReporterFactory(); virtual IStreamingReporter* create( ReporterConfig const& config ) const = 0; virtual std::string getDescription() const = 0; }; struct IReporterRegistry { typedef std::map > FactoryMap; typedef std::vector > Listeners; virtual ~IReporterRegistry(); virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const = 0; virtual FactoryMap const& getFactories() const = 0; virtual Listeners const& getListeners() const = 0; }; Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ); } #include #include namespace Catch { inline std::size_t listTests( Config const& config ) { TestSpec testSpec = config.testSpec(); if( config.testSpec().hasFilters() ) Catch::cout() << "Matching test cases:\n"; else { Catch::cout() << "All available test cases:\n"; testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); } std::size_t matchedTests = 0; TextAttributes nameAttr, descAttr, tagsAttr; nameAttr.setInitialIndent( 2 ).setIndent( 4 ); descAttr.setIndent( 4 ); tagsAttr.setIndent( 6 ); std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { matchedTests++; TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); Colour::Code colour = testCaseInfo.isHidden() ? Colour::SecondaryText : Colour::None; Colour colourGuard( colour ); Catch::cout() << Text( testCaseInfo.name, nameAttr ) << std::endl; if( config.listExtraInfo() ) { Catch::cout() << " " << testCaseInfo.lineInfo << std::endl; std::string description = testCaseInfo.description; if( description.empty() ) description = "(NO DESCRIPTION)"; Catch::cout() << Text( description, descAttr ) << std::endl; } if( !testCaseInfo.tags.empty() ) Catch::cout() << Text( testCaseInfo.tagsAsString, tagsAttr ) << std::endl; } if( !config.testSpec().hasFilters() ) Catch::cout() << pluralise( matchedTests, "test case" ) << '\n' << std::endl; else Catch::cout() << pluralise( matchedTests, "matching test case" ) << '\n' << std::endl; return matchedTests; } inline std::size_t listTestsNamesOnly( Config const& config ) { TestSpec testSpec = config.testSpec(); if( !config.testSpec().hasFilters() ) testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); std::size_t matchedTests = 0; std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { matchedTests++; TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); if( startsWith( testCaseInfo.name, '#' ) ) Catch::cout() << '"' << testCaseInfo.name << '"'; else Catch::cout() << testCaseInfo.name; if ( config.listExtraInfo() ) Catch::cout() << "\t@" << testCaseInfo.lineInfo; Catch::cout() << std::endl; } return matchedTests; } struct TagInfo { TagInfo() : count ( 0 ) {} void add( std::string const& spelling ) { ++count; spellings.insert( spelling ); } std::string all() const { std::string out; for( std::set::const_iterator it = spellings.begin(), itEnd = spellings.end(); it != itEnd; ++it ) out += "[" + *it + "]"; return out; } std::set spellings; std::size_t count; }; inline std::size_t listTags( Config const& config ) { TestSpec testSpec = config.testSpec(); if( config.testSpec().hasFilters() ) Catch::cout() << "Tags for matching test cases:\n"; else { Catch::cout() << "All available tags:\n"; testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); } std::map tagCounts; std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { for( std::set::const_iterator tagIt = it->getTestCaseInfo().tags.begin(), tagItEnd = it->getTestCaseInfo().tags.end(); tagIt != tagItEnd; ++tagIt ) { std::string tagName = *tagIt; std::string lcaseTagName = toLower( tagName ); std::map::iterator countIt = tagCounts.find( lcaseTagName ); if( countIt == tagCounts.end() ) countIt = tagCounts.insert( std::make_pair( lcaseTagName, TagInfo() ) ).first; countIt->second.add( tagName ); } } for( std::map::const_iterator countIt = tagCounts.begin(), countItEnd = tagCounts.end(); countIt != countItEnd; ++countIt ) { std::ostringstream oss; oss << " " << std::setw(2) << countIt->second.count << " "; Text wrapper( countIt->second.all(), TextAttributes() .setInitialIndent( 0 ) .setIndent( oss.str().size() ) .setWidth( CATCH_CONFIG_CONSOLE_WIDTH-10 ) ); Catch::cout() << oss.str() << wrapper << '\n'; } Catch::cout() << pluralise( tagCounts.size(), "tag" ) << '\n' << std::endl; return tagCounts.size(); } inline std::size_t listReporters( Config const& /*config*/ ) { Catch::cout() << "Available reporters:\n"; IReporterRegistry::FactoryMap const& factories = getRegistryHub().getReporterRegistry().getFactories(); IReporterRegistry::FactoryMap::const_iterator itBegin = factories.begin(), itEnd = factories.end(), it; std::size_t maxNameLen = 0; for(it = itBegin; it != itEnd; ++it ) maxNameLen = (std::max)( maxNameLen, it->first.size() ); for(it = itBegin; it != itEnd; ++it ) { Text wrapper( it->second->getDescription(), TextAttributes() .setInitialIndent( 0 ) .setIndent( 7+maxNameLen ) .setWidth( CATCH_CONFIG_CONSOLE_WIDTH - maxNameLen-8 ) ); Catch::cout() << " " << it->first << ':' << std::string( maxNameLen - it->first.size() + 2, ' ' ) << wrapper << '\n'; } Catch::cout() << std::endl; return factories.size(); } inline Option list( Config const& config ) { Option listedCount; if( config.listTests() || ( config.listExtraInfo() && !config.listTestNamesOnly() ) ) listedCount = listedCount.valueOr(0) + listTests( config ); if( config.listTestNamesOnly() ) listedCount = listedCount.valueOr(0) + listTestsNamesOnly( config ); if( config.listTags() ) listedCount = listedCount.valueOr(0) + listTags( config ); if( config.listReporters() ) listedCount = listedCount.valueOr(0) + listReporters( config ); return listedCount; } } // end namespace Catch // #included from: internal/catch_run_context.hpp #define TWOBLUECUBES_CATCH_RUNNER_IMPL_HPP_INCLUDED // #included from: catch_test_case_tracker.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_TRACKER_HPP_INCLUDED #include #include #include #include #include CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS namespace Catch { namespace TestCaseTracking { struct NameAndLocation { std::string name; SourceLineInfo location; NameAndLocation( std::string const& _name, SourceLineInfo const& _location ) : name( _name ), location( _location ) {} }; struct ITracker : SharedImpl<> { virtual ~ITracker(); // static queries virtual NameAndLocation const& nameAndLocation() const = 0; // dynamic queries virtual bool isComplete() const = 0; // Successfully completed or failed virtual bool isSuccessfullyCompleted() const = 0; virtual bool isOpen() const = 0; // Started but not complete virtual bool hasChildren() const = 0; virtual ITracker& parent() = 0; // actions virtual void close() = 0; // Successfully complete virtual void fail() = 0; virtual void markAsNeedingAnotherRun() = 0; virtual void addChild( Ptr const& child ) = 0; virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) = 0; virtual void openChild() = 0; // Debug/ checking virtual bool isSectionTracker() const = 0; virtual bool isIndexTracker() const = 0; }; class TrackerContext { enum RunState { NotStarted, Executing, CompletedCycle }; Ptr m_rootTracker; ITracker* m_currentTracker; RunState m_runState; public: static TrackerContext& instance() { static TrackerContext s_instance; return s_instance; } TrackerContext() : m_currentTracker( CATCH_NULL ), m_runState( NotStarted ) {} ITracker& startRun(); void endRun() { m_rootTracker.reset(); m_currentTracker = CATCH_NULL; m_runState = NotStarted; } void startCycle() { m_currentTracker = m_rootTracker.get(); m_runState = Executing; } void completeCycle() { m_runState = CompletedCycle; } bool completedCycle() const { return m_runState == CompletedCycle; } ITracker& currentTracker() { return *m_currentTracker; } void setCurrentTracker( ITracker* tracker ) { m_currentTracker = tracker; } }; class TrackerBase : public ITracker { protected: enum CycleState { NotStarted, Executing, ExecutingChildren, NeedsAnotherRun, CompletedSuccessfully, Failed }; class TrackerHasName { NameAndLocation m_nameAndLocation; public: TrackerHasName( NameAndLocation const& nameAndLocation ) : m_nameAndLocation( nameAndLocation ) {} bool operator ()( Ptr const& tracker ) { return tracker->nameAndLocation().name == m_nameAndLocation.name && tracker->nameAndLocation().location == m_nameAndLocation.location; } }; typedef std::vector > Children; NameAndLocation m_nameAndLocation; TrackerContext& m_ctx; ITracker* m_parent; Children m_children; CycleState m_runState; public: TrackerBase( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) : m_nameAndLocation( nameAndLocation ), m_ctx( ctx ), m_parent( parent ), m_runState( NotStarted ) {} virtual ~TrackerBase(); virtual NameAndLocation const& nameAndLocation() const CATCH_OVERRIDE { return m_nameAndLocation; } virtual bool isComplete() const CATCH_OVERRIDE { return m_runState == CompletedSuccessfully || m_runState == Failed; } virtual bool isSuccessfullyCompleted() const CATCH_OVERRIDE { return m_runState == CompletedSuccessfully; } virtual bool isOpen() const CATCH_OVERRIDE { return m_runState != NotStarted && !isComplete(); } virtual bool hasChildren() const CATCH_OVERRIDE { return !m_children.empty(); } virtual void addChild( Ptr const& child ) CATCH_OVERRIDE { m_children.push_back( child ); } virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) CATCH_OVERRIDE { Children::const_iterator it = std::find_if( m_children.begin(), m_children.end(), TrackerHasName( nameAndLocation ) ); return( it != m_children.end() ) ? it->get() : CATCH_NULL; } virtual ITracker& parent() CATCH_OVERRIDE { assert( m_parent ); // Should always be non-null except for root return *m_parent; } virtual void openChild() CATCH_OVERRIDE { if( m_runState != ExecutingChildren ) { m_runState = ExecutingChildren; if( m_parent ) m_parent->openChild(); } } virtual bool isSectionTracker() const CATCH_OVERRIDE { return false; } virtual bool isIndexTracker() const CATCH_OVERRIDE { return false; } void open() { m_runState = Executing; moveToThis(); if( m_parent ) m_parent->openChild(); } virtual void close() CATCH_OVERRIDE { // Close any still open children (e.g. generators) while( &m_ctx.currentTracker() != this ) m_ctx.currentTracker().close(); switch( m_runState ) { case NotStarted: case CompletedSuccessfully: case Failed: throw std::logic_error( "Illogical state" ); case NeedsAnotherRun: break;; case Executing: m_runState = CompletedSuccessfully; break; case ExecutingChildren: if( m_children.empty() || m_children.back()->isComplete() ) m_runState = CompletedSuccessfully; break; default: throw std::logic_error( "Unexpected state" ); } moveToParent(); m_ctx.completeCycle(); } virtual void fail() CATCH_OVERRIDE { m_runState = Failed; if( m_parent ) m_parent->markAsNeedingAnotherRun(); moveToParent(); m_ctx.completeCycle(); } virtual void markAsNeedingAnotherRun() CATCH_OVERRIDE { m_runState = NeedsAnotherRun; } private: void moveToParent() { assert( m_parent ); m_ctx.setCurrentTracker( m_parent ); } void moveToThis() { m_ctx.setCurrentTracker( this ); } }; class SectionTracker : public TrackerBase { std::vector m_filters; public: SectionTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) : TrackerBase( nameAndLocation, ctx, parent ) { if( parent ) { while( !parent->isSectionTracker() ) parent = &parent->parent(); SectionTracker& parentSection = static_cast( *parent ); addNextFilters( parentSection.m_filters ); } } virtual ~SectionTracker(); virtual bool isSectionTracker() const CATCH_OVERRIDE { return true; } static SectionTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation ) { SectionTracker* section = CATCH_NULL; ITracker& currentTracker = ctx.currentTracker(); if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { assert( childTracker ); assert( childTracker->isSectionTracker() ); section = static_cast( childTracker ); } else { section = new SectionTracker( nameAndLocation, ctx, ¤tTracker ); currentTracker.addChild( section ); } if( !ctx.completedCycle() ) section->tryOpen(); return *section; } void tryOpen() { if( !isComplete() && (m_filters.empty() || m_filters[0].empty() || m_filters[0] == m_nameAndLocation.name ) ) open(); } void addInitialFilters( std::vector const& filters ) { if( !filters.empty() ) { m_filters.push_back(""); // Root - should never be consulted m_filters.push_back(""); // Test Case - not a section filter m_filters.insert( m_filters.end(), filters.begin(), filters.end() ); } } void addNextFilters( std::vector const& filters ) { if( filters.size() > 1 ) m_filters.insert( m_filters.end(), ++filters.begin(), filters.end() ); } }; class IndexTracker : public TrackerBase { int m_size; int m_index; public: IndexTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent, int size ) : TrackerBase( nameAndLocation, ctx, parent ), m_size( size ), m_index( -1 ) {} virtual ~IndexTracker(); virtual bool isIndexTracker() const CATCH_OVERRIDE { return true; } static IndexTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation, int size ) { IndexTracker* tracker = CATCH_NULL; ITracker& currentTracker = ctx.currentTracker(); if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { assert( childTracker ); assert( childTracker->isIndexTracker() ); tracker = static_cast( childTracker ); } else { tracker = new IndexTracker( nameAndLocation, ctx, ¤tTracker, size ); currentTracker.addChild( tracker ); } if( !ctx.completedCycle() && !tracker->isComplete() ) { if( tracker->m_runState != ExecutingChildren && tracker->m_runState != NeedsAnotherRun ) tracker->moveNext(); tracker->open(); } return *tracker; } int index() const { return m_index; } void moveNext() { m_index++; m_children.clear(); } virtual void close() CATCH_OVERRIDE { TrackerBase::close(); if( m_runState == CompletedSuccessfully && m_index < m_size-1 ) m_runState = Executing; } }; inline ITracker& TrackerContext::startRun() { m_rootTracker = new SectionTracker( NameAndLocation( "{root}", CATCH_INTERNAL_LINEINFO ), *this, CATCH_NULL ); m_currentTracker = CATCH_NULL; m_runState = Executing; return *m_rootTracker; } } // namespace TestCaseTracking using TestCaseTracking::ITracker; using TestCaseTracking::TrackerContext; using TestCaseTracking::SectionTracker; using TestCaseTracking::IndexTracker; } // namespace Catch CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS // #included from: catch_fatal_condition.hpp #define TWOBLUECUBES_CATCH_FATAL_CONDITION_H_INCLUDED namespace Catch { // Report the error condition inline void reportFatal( std::string const& message ) { IContext& context = Catch::getCurrentContext(); IResultCapture* resultCapture = context.getResultCapture(); resultCapture->handleFatalErrorCondition( message ); } } // namespace Catch #if defined ( CATCH_PLATFORM_WINDOWS ) ///////////////////////////////////////// // #included from: catch_windows_h_proxy.h #define TWOBLUECUBES_CATCH_WINDOWS_H_PROXY_H_INCLUDED #ifdef CATCH_DEFINES_NOMINMAX # define NOMINMAX #endif #ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN #endif #ifdef __AFXDLL #include #else #include #endif #ifdef CATCH_DEFINES_NOMINMAX # undef NOMINMAX #endif #ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN # undef WIN32_LEAN_AND_MEAN #endif # if !defined ( CATCH_CONFIG_WINDOWS_SEH ) namespace Catch { struct FatalConditionHandler { void reset() {} }; } # else // CATCH_CONFIG_WINDOWS_SEH is defined namespace Catch { struct SignalDefs { DWORD id; const char* name; }; extern SignalDefs signalDefs[]; // There is no 1-1 mapping between signals and windows exceptions. // Windows can easily distinguish between SO and SigSegV, // but SigInt, SigTerm, etc are handled differently. SignalDefs signalDefs[] = { { EXCEPTION_ILLEGAL_INSTRUCTION, "SIGILL - Illegal instruction signal" }, { EXCEPTION_STACK_OVERFLOW, "SIGSEGV - Stack overflow" }, { EXCEPTION_ACCESS_VIOLATION, "SIGSEGV - Segmentation violation signal" }, { EXCEPTION_INT_DIVIDE_BY_ZERO, "Divide by zero error" }, }; struct FatalConditionHandler { static LONG CALLBACK handleVectoredException(PEXCEPTION_POINTERS ExceptionInfo) { for (int i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { if (ExceptionInfo->ExceptionRecord->ExceptionCode == signalDefs[i].id) { reportFatal(signalDefs[i].name); } } // If its not an exception we care about, pass it along. // This stops us from eating debugger breaks etc. return EXCEPTION_CONTINUE_SEARCH; } FatalConditionHandler() { isSet = true; // 32k seems enough for Catch to handle stack overflow, // but the value was found experimentally, so there is no strong guarantee guaranteeSize = 32 * 1024; exceptionHandlerHandle = CATCH_NULL; // Register as first handler in current chain exceptionHandlerHandle = AddVectoredExceptionHandler(1, handleVectoredException); // Pass in guarantee size to be filled SetThreadStackGuarantee(&guaranteeSize); } static void reset() { if (isSet) { // Unregister handler and restore the old guarantee RemoveVectoredExceptionHandler(exceptionHandlerHandle); SetThreadStackGuarantee(&guaranteeSize); exceptionHandlerHandle = CATCH_NULL; isSet = false; } } ~FatalConditionHandler() { reset(); } private: static bool isSet; static ULONG guaranteeSize; static PVOID exceptionHandlerHandle; }; bool FatalConditionHandler::isSet = false; ULONG FatalConditionHandler::guaranteeSize = 0; PVOID FatalConditionHandler::exceptionHandlerHandle = CATCH_NULL; } // namespace Catch # endif // CATCH_CONFIG_WINDOWS_SEH #else // Not Windows - assumed to be POSIX compatible ////////////////////////// # if !defined(CATCH_CONFIG_POSIX_SIGNALS) namespace Catch { struct FatalConditionHandler { void reset() {} }; } # else // CATCH_CONFIG_POSIX_SIGNALS is defined #include namespace Catch { struct SignalDefs { int id; const char* name; }; extern SignalDefs signalDefs[]; SignalDefs signalDefs[] = { { SIGINT, "SIGINT - Terminal interrupt signal" }, { SIGILL, "SIGILL - Illegal instruction signal" }, { SIGFPE, "SIGFPE - Floating point error signal" }, { SIGSEGV, "SIGSEGV - Segmentation violation signal" }, { SIGTERM, "SIGTERM - Termination request signal" }, { SIGABRT, "SIGABRT - Abort (abnormal termination) signal" } }; struct FatalConditionHandler { static bool isSet; static struct sigaction oldSigActions [sizeof(signalDefs)/sizeof(SignalDefs)]; static stack_t oldSigStack; static char altStackMem[SIGSTKSZ]; static void handleSignal( int sig ) { std::string name = ""; for (std::size_t i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { SignalDefs &def = signalDefs[i]; if (sig == def.id) { name = def.name; break; } } reset(); reportFatal(name); raise( sig ); } FatalConditionHandler() { isSet = true; stack_t sigStack; sigStack.ss_sp = altStackMem; sigStack.ss_size = SIGSTKSZ; sigStack.ss_flags = 0; sigaltstack(&sigStack, &oldSigStack); struct sigaction sa = { 0 }; sa.sa_handler = handleSignal; sa.sa_flags = SA_ONSTACK; for (std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i) { sigaction(signalDefs[i].id, &sa, &oldSigActions[i]); } } ~FatalConditionHandler() { reset(); } static void reset() { if( isSet ) { // Set signals back to previous values -- hopefully nobody overwrote them in the meantime for( std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i ) { sigaction(signalDefs[i].id, &oldSigActions[i], CATCH_NULL); } // Return the old stack sigaltstack(&oldSigStack, CATCH_NULL); isSet = false; } } }; bool FatalConditionHandler::isSet = false; struct sigaction FatalConditionHandler::oldSigActions[sizeof(signalDefs)/sizeof(SignalDefs)] = {}; stack_t FatalConditionHandler::oldSigStack = {}; char FatalConditionHandler::altStackMem[SIGSTKSZ] = {}; } // namespace Catch # endif // CATCH_CONFIG_POSIX_SIGNALS #endif // not Windows #include #include namespace Catch { class StreamRedirect { public: StreamRedirect( std::ostream& stream, std::string& targetString ) : m_stream( stream ), m_prevBuf( stream.rdbuf() ), m_targetString( targetString ) { stream.rdbuf( m_oss.rdbuf() ); } ~StreamRedirect() { m_targetString += m_oss.str(); m_stream.rdbuf( m_prevBuf ); } private: std::ostream& m_stream; std::streambuf* m_prevBuf; std::ostringstream m_oss; std::string& m_targetString; }; /////////////////////////////////////////////////////////////////////////// class RunContext : public IResultCapture, public IRunner { RunContext( RunContext const& ); void operator =( RunContext const& ); public: explicit RunContext( Ptr const& _config, Ptr const& reporter ) : m_runInfo( _config->name() ), m_context( getCurrentMutableContext() ), m_activeTestCase( CATCH_NULL ), m_config( _config ), m_reporter( reporter ), m_shouldReportUnexpected ( true ) { m_context.setRunner( this ); m_context.setConfig( m_config ); m_context.setResultCapture( this ); m_reporter->testRunStarting( m_runInfo ); } virtual ~RunContext() { m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, aborting() ) ); } void testGroupStarting( std::string const& testSpec, std::size_t groupIndex, std::size_t groupsCount ) { m_reporter->testGroupStarting( GroupInfo( testSpec, groupIndex, groupsCount ) ); } void testGroupEnded( std::string const& testSpec, Totals const& totals, std::size_t groupIndex, std::size_t groupsCount ) { m_reporter->testGroupEnded( TestGroupStats( GroupInfo( testSpec, groupIndex, groupsCount ), totals, aborting() ) ); } Totals runTest( TestCase const& testCase ) { Totals prevTotals = m_totals; std::string redirectedCout; std::string redirectedCerr; TestCaseInfo testInfo = testCase.getTestCaseInfo(); m_reporter->testCaseStarting( testInfo ); m_activeTestCase = &testCase; do { ITracker& rootTracker = m_trackerContext.startRun(); assert( rootTracker.isSectionTracker() ); static_cast( rootTracker ).addInitialFilters( m_config->getSectionsToRun() ); do { m_trackerContext.startCycle(); m_testCaseTracker = &SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( testInfo.name, testInfo.lineInfo ) ); runCurrentTest( redirectedCout, redirectedCerr ); } while( !m_testCaseTracker->isSuccessfullyCompleted() && !aborting() ); } // !TBD: deprecated - this will be replaced by indexed trackers while( getCurrentContext().advanceGeneratorsForCurrentTest() && !aborting() ); Totals deltaTotals = m_totals.delta( prevTotals ); if( testInfo.expectedToFail() && deltaTotals.testCases.passed > 0 ) { deltaTotals.assertions.failed++; deltaTotals.testCases.passed--; deltaTotals.testCases.failed++; } m_totals.testCases += deltaTotals.testCases; m_reporter->testCaseEnded( TestCaseStats( testInfo, deltaTotals, redirectedCout, redirectedCerr, aborting() ) ); m_activeTestCase = CATCH_NULL; m_testCaseTracker = CATCH_NULL; return deltaTotals; } Ptr config() const { return m_config; } private: // IResultCapture virtual void assertionEnded( AssertionResult const& result ) { if( result.getResultType() == ResultWas::Ok ) { m_totals.assertions.passed++; } else if( !result.isOk() ) { m_totals.assertions.failed++; } // We have no use for the return value (whether messages should be cleared), because messages were made scoped // and should be let to clear themselves out. static_cast(m_reporter->assertionEnded(AssertionStats(result, m_messages, m_totals))); // Reset working state m_lastAssertionInfo = AssertionInfo( "", m_lastAssertionInfo.lineInfo, "{Unknown expression after the reported line}" , m_lastAssertionInfo.resultDisposition ); m_lastResult = result; } virtual bool sectionStarted ( SectionInfo const& sectionInfo, Counts& assertions ) { ITracker& sectionTracker = SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( sectionInfo.name, sectionInfo.lineInfo ) ); if( !sectionTracker.isOpen() ) return false; m_activeSections.push_back( §ionTracker ); m_lastAssertionInfo.lineInfo = sectionInfo.lineInfo; m_reporter->sectionStarting( sectionInfo ); assertions = m_totals.assertions; return true; } bool testForMissingAssertions( Counts& assertions ) { if( assertions.total() != 0 ) return false; if( !m_config->warnAboutMissingAssertions() ) return false; if( m_trackerContext.currentTracker().hasChildren() ) return false; m_totals.assertions.failed++; assertions.failed++; return true; } virtual void sectionEnded( SectionEndInfo const& endInfo ) { Counts assertions = m_totals.assertions - endInfo.prevAssertions; bool missingAssertions = testForMissingAssertions( assertions ); if( !m_activeSections.empty() ) { m_activeSections.back()->close(); m_activeSections.pop_back(); } m_reporter->sectionEnded( SectionStats( endInfo.sectionInfo, assertions, endInfo.durationInSeconds, missingAssertions ) ); m_messages.clear(); } virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) { if( m_unfinishedSections.empty() ) m_activeSections.back()->fail(); else m_activeSections.back()->close(); m_activeSections.pop_back(); m_unfinishedSections.push_back( endInfo ); } virtual void pushScopedMessage( MessageInfo const& message ) { m_messages.push_back( message ); } virtual void popScopedMessage( MessageInfo const& message ) { m_messages.erase( std::remove( m_messages.begin(), m_messages.end(), message ), m_messages.end() ); } virtual std::string getCurrentTestName() const { return m_activeTestCase ? m_activeTestCase->getTestCaseInfo().name : std::string(); } virtual const AssertionResult* getLastResult() const { return &m_lastResult; } virtual void exceptionEarlyReported() { m_shouldReportUnexpected = false; } virtual void handleFatalErrorCondition( std::string const& message ) { // Don't rebuild the result -- the stringification itself can cause more fatal errors // Instead, fake a result data. AssertionResultData tempResult; tempResult.resultType = ResultWas::FatalErrorCondition; tempResult.message = message; AssertionResult result(m_lastAssertionInfo, tempResult); getResultCapture().assertionEnded(result); handleUnfinishedSections(); // Recreate section for test case (as we will lose the one that was in scope) TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); Counts assertions; assertions.failed = 1; SectionStats testCaseSectionStats( testCaseSection, assertions, 0, false ); m_reporter->sectionEnded( testCaseSectionStats ); TestCaseInfo testInfo = m_activeTestCase->getTestCaseInfo(); Totals deltaTotals; deltaTotals.testCases.failed = 1; m_reporter->testCaseEnded( TestCaseStats( testInfo, deltaTotals, std::string(), std::string(), false ) ); m_totals.testCases.failed++; testGroupEnded( std::string(), m_totals, 1, 1 ); m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, false ) ); } public: // !TBD We need to do this another way! bool aborting() const { return m_totals.assertions.failed == static_cast( m_config->abortAfter() ); } private: void runCurrentTest( std::string& redirectedCout, std::string& redirectedCerr ) { TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); m_reporter->sectionStarting( testCaseSection ); Counts prevAssertions = m_totals.assertions; double duration = 0; m_shouldReportUnexpected = true; try { m_lastAssertionInfo = AssertionInfo( "TEST_CASE", testCaseInfo.lineInfo, "", ResultDisposition::Normal ); seedRng( *m_config ); Timer timer; timer.start(); if( m_reporter->getPreferences().shouldRedirectStdOut ) { StreamRedirect coutRedir( Catch::cout(), redirectedCout ); StreamRedirect cerrRedir( Catch::cerr(), redirectedCerr ); invokeActiveTestCase(); } else { invokeActiveTestCase(); } duration = timer.getElapsedSeconds(); } catch( TestFailureException& ) { // This just means the test was aborted due to failure } catch(...) { // Under CATCH_CONFIG_FAST_COMPILE, unexpected exceptions under REQUIRE assertions // are reported without translation at the point of origin. if (m_shouldReportUnexpected) { makeUnexpectedResultBuilder().useActiveException(); } } m_testCaseTracker->close(); handleUnfinishedSections(); m_messages.clear(); Counts assertions = m_totals.assertions - prevAssertions; bool missingAssertions = testForMissingAssertions( assertions ); if( testCaseInfo.okToFail() ) { std::swap( assertions.failedButOk, assertions.failed ); m_totals.assertions.failed -= assertions.failedButOk; m_totals.assertions.failedButOk += assertions.failedButOk; } SectionStats testCaseSectionStats( testCaseSection, assertions, duration, missingAssertions ); m_reporter->sectionEnded( testCaseSectionStats ); } void invokeActiveTestCase() { FatalConditionHandler fatalConditionHandler; // Handle signals m_activeTestCase->invoke(); fatalConditionHandler.reset(); } private: ResultBuilder makeUnexpectedResultBuilder() const { return ResultBuilder( m_lastAssertionInfo.macroName, m_lastAssertionInfo.lineInfo, m_lastAssertionInfo.capturedExpression, m_lastAssertionInfo.resultDisposition ); } void handleUnfinishedSections() { // If sections ended prematurely due to an exception we stored their // infos here so we can tear them down outside the unwind process. for( std::vector::const_reverse_iterator it = m_unfinishedSections.rbegin(), itEnd = m_unfinishedSections.rend(); it != itEnd; ++it ) sectionEnded( *it ); m_unfinishedSections.clear(); } TestRunInfo m_runInfo; IMutableContext& m_context; TestCase const* m_activeTestCase; ITracker* m_testCaseTracker; ITracker* m_currentSectionTracker; AssertionResult m_lastResult; Ptr m_config; Totals m_totals; Ptr m_reporter; std::vector m_messages; AssertionInfo m_lastAssertionInfo; std::vector m_unfinishedSections; std::vector m_activeSections; TrackerContext m_trackerContext; bool m_shouldReportUnexpected; }; IResultCapture& getResultCapture() { if( IResultCapture* capture = getCurrentContext().getResultCapture() ) return *capture; else throw std::logic_error( "No result capture instance" ); } } // end namespace Catch // #included from: internal/catch_version.h #define TWOBLUECUBES_CATCH_VERSION_H_INCLUDED namespace Catch { // Versioning information struct Version { Version( unsigned int _majorVersion, unsigned int _minorVersion, unsigned int _patchNumber, char const * const _branchName, unsigned int _buildNumber ); unsigned int const majorVersion; unsigned int const minorVersion; unsigned int const patchNumber; // buildNumber is only used if branchName is not null char const * const branchName; unsigned int const buildNumber; friend std::ostream& operator << ( std::ostream& os, Version const& version ); private: void operator=( Version const& ); }; inline Version libraryVersion(); } #include #include #include namespace Catch { Ptr createReporter( std::string const& reporterName, Ptr const& config ) { Ptr reporter = getRegistryHub().getReporterRegistry().create( reporterName, config.get() ); if( !reporter ) { std::ostringstream oss; oss << "No reporter registered with name: '" << reporterName << "'"; throw std::domain_error( oss.str() ); } return reporter; } Ptr makeReporter( Ptr const& config ) { std::vector reporters = config->getReporterNames(); if( reporters.empty() ) reporters.push_back( "console" ); Ptr reporter; for( std::vector::const_iterator it = reporters.begin(), itEnd = reporters.end(); it != itEnd; ++it ) reporter = addReporter( reporter, createReporter( *it, config ) ); return reporter; } Ptr addListeners( Ptr const& config, Ptr reporters ) { IReporterRegistry::Listeners listeners = getRegistryHub().getReporterRegistry().getListeners(); for( IReporterRegistry::Listeners::const_iterator it = listeners.begin(), itEnd = listeners.end(); it != itEnd; ++it ) reporters = addReporter(reporters, (*it)->create( ReporterConfig( config ) ) ); return reporters; } Totals runTests( Ptr const& config ) { Ptr iconfig = config.get(); Ptr reporter = makeReporter( config ); reporter = addListeners( iconfig, reporter ); RunContext context( iconfig, reporter ); Totals totals; context.testGroupStarting( config->name(), 1, 1 ); TestSpec testSpec = config->testSpec(); if( !testSpec.hasFilters() ) testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "~[.]" ).testSpec(); // All not hidden tests std::vector const& allTestCases = getAllTestCasesSorted( *iconfig ); for( std::vector::const_iterator it = allTestCases.begin(), itEnd = allTestCases.end(); it != itEnd; ++it ) { if( !context.aborting() && matchTest( *it, testSpec, *iconfig ) ) totals += context.runTest( *it ); else reporter->skipTest( *it ); } context.testGroupEnded( iconfig->name(), totals, 1, 1 ); return totals; } void applyFilenamesAsTags( IConfig const& config ) { std::vector const& tests = getAllTestCasesSorted( config ); for(std::size_t i = 0; i < tests.size(); ++i ) { TestCase& test = const_cast( tests[i] ); std::set tags = test.tags; std::string filename = test.lineInfo.file; std::string::size_type lastSlash = filename.find_last_of( "\\/" ); if( lastSlash != std::string::npos ) filename = filename.substr( lastSlash+1 ); std::string::size_type lastDot = filename.find_last_of( "." ); if( lastDot != std::string::npos ) filename = filename.substr( 0, lastDot ); tags.insert( "#" + filename ); setTags( test, tags ); } } class Session : NonCopyable { static bool alreadyInstantiated; public: struct OnUnusedOptions { enum DoWhat { Ignore, Fail }; }; Session() : m_cli( makeCommandLineParser() ) { if( alreadyInstantiated ) { std::string msg = "Only one instance of Catch::Session can ever be used"; Catch::cerr() << msg << std::endl; throw std::logic_error( msg ); } alreadyInstantiated = true; } ~Session() { Catch::cleanUp(); } void showHelp( std::string const& processName ) { Catch::cout() << "\nCatch v" << libraryVersion() << "\n"; m_cli.usage( Catch::cout(), processName ); Catch::cout() << "For more detail usage please see the project docs\n" << std::endl; } int applyCommandLine( int argc, char const* const* const argv, OnUnusedOptions::DoWhat unusedOptionBehaviour = OnUnusedOptions::Fail ) { try { m_cli.setThrowOnUnrecognisedTokens( unusedOptionBehaviour == OnUnusedOptions::Fail ); m_unusedTokens = m_cli.parseInto( Clara::argsToVector( argc, argv ), m_configData ); if( m_configData.showHelp ) showHelp( m_configData.processName ); m_config.reset(); } catch( std::exception& ex ) { { Colour colourGuard( Colour::Red ); Catch::cerr() << "\nError(s) in input:\n" << Text( ex.what(), TextAttributes().setIndent(2) ) << "\n\n"; } m_cli.usage( Catch::cout(), m_configData.processName ); return (std::numeric_limits::max)(); } return 0; } void useConfigData( ConfigData const& _configData ) { m_configData = _configData; m_config.reset(); } int run( int argc, char const* const* const argv ) { int returnCode = applyCommandLine( argc, argv ); if( returnCode == 0 ) returnCode = run(); return returnCode; } #if defined(WIN32) && defined(UNICODE) int run( int argc, wchar_t const* const* const argv ) { char **utf8Argv = new char *[ argc ]; for ( int i = 0; i < argc; ++i ) { int bufSize = WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, NULL, 0, NULL, NULL ); utf8Argv[ i ] = new char[ bufSize ]; WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, utf8Argv[i], bufSize, NULL, NULL ); } int returnCode = applyCommandLine( argc, utf8Argv ); if( returnCode == 0 ) returnCode = run(); for ( int i = 0; i < argc; ++i ) delete [] utf8Argv[ i ]; delete [] utf8Argv; return returnCode; } #endif int run() { if( m_configData.showHelp ) return 0; try { config(); // Force config to be constructed seedRng( *m_config ); if( m_configData.filenamesAsTags ) applyFilenamesAsTags( *m_config ); // Handle list request if( Option listed = list( config() ) ) return static_cast( *listed ); return static_cast( runTests( m_config ).assertions.failed ); } catch( std::exception& ex ) { Catch::cerr() << ex.what() << std::endl; return (std::numeric_limits::max)(); } } Clara::CommandLine const& cli() const { return m_cli; } std::vector const& unusedTokens() const { return m_unusedTokens; } ConfigData& configData() { return m_configData; } Config& config() { if( !m_config ) m_config = new Config( m_configData ); return *m_config; } private: Clara::CommandLine m_cli; std::vector m_unusedTokens; ConfigData m_configData; Ptr m_config; }; bool Session::alreadyInstantiated = false; } // end namespace Catch // #included from: catch_registry_hub.hpp #define TWOBLUECUBES_CATCH_REGISTRY_HUB_HPP_INCLUDED // #included from: catch_test_case_registry_impl.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_REGISTRY_IMPL_HPP_INCLUDED #include #include #include #include namespace Catch { struct RandomNumberGenerator { typedef std::ptrdiff_t result_type; result_type operator()( result_type n ) const { return rand() % n; } #ifdef CATCH_CONFIG_CPP11_SHUFFLE static constexpr result_type min() { return 0; } static constexpr result_type max() { return 1000000; } result_type operator()() const { return rand() % max(); } #endif template static void shuffle( V& vector ) { RandomNumberGenerator rng; #ifdef CATCH_CONFIG_CPP11_SHUFFLE std::shuffle( vector.begin(), vector.end(), rng ); #else random_shuffle( vector.begin(), vector.end(), rng ); #endif } }; inline std::vector sortTests( IConfig const& config, std::vector const& unsortedTestCases ) { std::vector sorted = unsortedTestCases; switch( config.runOrder() ) { case RunTests::InLexicographicalOrder: std::sort( sorted.begin(), sorted.end() ); break; case RunTests::InRandomOrder: { seedRng( config ); RandomNumberGenerator::shuffle( sorted ); } break; case RunTests::InDeclarationOrder: // already in declaration order break; } return sorted; } bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ) { return testSpec.matches( testCase ) && ( config.allowThrows() || !testCase.throws() ); } void enforceNoDuplicateTestCases( std::vector const& functions ) { std::set seenFunctions; for( std::vector::const_iterator it = functions.begin(), itEnd = functions.end(); it != itEnd; ++it ) { std::pair::const_iterator, bool> prev = seenFunctions.insert( *it ); if( !prev.second ) { std::ostringstream ss; ss << Colour( Colour::Red ) << "error: TEST_CASE( \"" << it->name << "\" ) already defined.\n" << "\tFirst seen at " << prev.first->getTestCaseInfo().lineInfo << '\n' << "\tRedefined at " << it->getTestCaseInfo().lineInfo << std::endl; throw std::runtime_error(ss.str()); } } } std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ) { std::vector filtered; filtered.reserve( testCases.size() ); for( std::vector::const_iterator it = testCases.begin(), itEnd = testCases.end(); it != itEnd; ++it ) if( matchTest( *it, testSpec, config ) ) filtered.push_back( *it ); return filtered; } std::vector const& getAllTestCasesSorted( IConfig const& config ) { return getRegistryHub().getTestCaseRegistry().getAllTestsSorted( config ); } class TestRegistry : public ITestCaseRegistry { public: TestRegistry() : m_currentSortOrder( RunTests::InDeclarationOrder ), m_unnamedCount( 0 ) {} virtual ~TestRegistry(); virtual void registerTest( TestCase const& testCase ) { std::string name = testCase.getTestCaseInfo().name; if( name.empty() ) { std::ostringstream oss; oss << "Anonymous test case " << ++m_unnamedCount; return registerTest( testCase.withName( oss.str() ) ); } m_functions.push_back( testCase ); } virtual std::vector const& getAllTests() const { return m_functions; } virtual std::vector const& getAllTestsSorted( IConfig const& config ) const { if( m_sortedFunctions.empty() ) enforceNoDuplicateTestCases( m_functions ); if( m_currentSortOrder != config.runOrder() || m_sortedFunctions.empty() ) { m_sortedFunctions = sortTests( config, m_functions ); m_currentSortOrder = config.runOrder(); } return m_sortedFunctions; } private: std::vector m_functions; mutable RunTests::InWhatOrder m_currentSortOrder; mutable std::vector m_sortedFunctions; size_t m_unnamedCount; std::ios_base::Init m_ostreamInit; // Forces cout/ cerr to be initialised }; /////////////////////////////////////////////////////////////////////////// class FreeFunctionTestCase : public SharedImpl { public: FreeFunctionTestCase( TestFunction fun ) : m_fun( fun ) {} virtual void invoke() const { m_fun(); } private: virtual ~FreeFunctionTestCase(); TestFunction m_fun; }; inline std::string extractClassName( std::string const& classOrQualifiedMethodName ) { std::string className = classOrQualifiedMethodName; if( startsWith( className, '&' ) ) { std::size_t lastColons = className.rfind( "::" ); std::size_t penultimateColons = className.rfind( "::", lastColons-1 ); if( penultimateColons == std::string::npos ) penultimateColons = 1; className = className.substr( penultimateColons, lastColons-penultimateColons ); } return className; } void registerTestCase ( ITestCase* testCase, char const* classOrQualifiedMethodName, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ) { getMutableRegistryHub().registerTest ( makeTestCase ( testCase, extractClassName( classOrQualifiedMethodName ), nameAndDesc.name, nameAndDesc.description, lineInfo ) ); } void registerTestCaseFunction ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ) { registerTestCase( new FreeFunctionTestCase( function ), "", nameAndDesc, lineInfo ); } /////////////////////////////////////////////////////////////////////////// AutoReg::AutoReg ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ) { registerTestCaseFunction( function, lineInfo, nameAndDesc ); } AutoReg::~AutoReg() {} } // end namespace Catch // #included from: catch_reporter_registry.hpp #define TWOBLUECUBES_CATCH_REPORTER_REGISTRY_HPP_INCLUDED #include namespace Catch { class ReporterRegistry : public IReporterRegistry { public: virtual ~ReporterRegistry() CATCH_OVERRIDE {} virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const CATCH_OVERRIDE { FactoryMap::const_iterator it = m_factories.find( name ); if( it == m_factories.end() ) return CATCH_NULL; return it->second->create( ReporterConfig( config ) ); } void registerReporter( std::string const& name, Ptr const& factory ) { m_factories.insert( std::make_pair( name, factory ) ); } void registerListener( Ptr const& factory ) { m_listeners.push_back( factory ); } virtual FactoryMap const& getFactories() const CATCH_OVERRIDE { return m_factories; } virtual Listeners const& getListeners() const CATCH_OVERRIDE { return m_listeners; } private: FactoryMap m_factories; Listeners m_listeners; }; } // #included from: catch_exception_translator_registry.hpp #define TWOBLUECUBES_CATCH_EXCEPTION_TRANSLATOR_REGISTRY_HPP_INCLUDED #ifdef __OBJC__ #import "Foundation/Foundation.h" #endif namespace Catch { class ExceptionTranslatorRegistry : public IExceptionTranslatorRegistry { public: ~ExceptionTranslatorRegistry() { deleteAll( m_translators ); } virtual void registerTranslator( const IExceptionTranslator* translator ) { m_translators.push_back( translator ); } virtual std::string translateActiveException() const { try { #ifdef __OBJC__ // In Objective-C try objective-c exceptions first @try { return tryTranslators(); } @catch (NSException *exception) { return Catch::toString( [exception description] ); } #else return tryTranslators(); #endif } catch( TestFailureException& ) { throw; } catch( std::exception& ex ) { return ex.what(); } catch( std::string& msg ) { return msg; } catch( const char* msg ) { return msg; } catch(...) { return "Unknown exception"; } } std::string tryTranslators() const { if( m_translators.empty() ) throw; else return m_translators[0]->translate( m_translators.begin()+1, m_translators.end() ); } private: std::vector m_translators; }; } // #included from: catch_tag_alias_registry.h #define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_H_INCLUDED #include namespace Catch { class TagAliasRegistry : public ITagAliasRegistry { public: virtual ~TagAliasRegistry(); virtual Option find( std::string const& alias ) const; virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const; void add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ); private: std::map m_registry; }; } // end namespace Catch namespace Catch { namespace { class RegistryHub : public IRegistryHub, public IMutableRegistryHub { RegistryHub( RegistryHub const& ); void operator=( RegistryHub const& ); public: // IRegistryHub RegistryHub() { } virtual IReporterRegistry const& getReporterRegistry() const CATCH_OVERRIDE { return m_reporterRegistry; } virtual ITestCaseRegistry const& getTestCaseRegistry() const CATCH_OVERRIDE { return m_testCaseRegistry; } virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() CATCH_OVERRIDE { return m_exceptionTranslatorRegistry; } virtual ITagAliasRegistry const& getTagAliasRegistry() const CATCH_OVERRIDE { return m_tagAliasRegistry; } public: // IMutableRegistryHub virtual void registerReporter( std::string const& name, Ptr const& factory ) CATCH_OVERRIDE { m_reporterRegistry.registerReporter( name, factory ); } virtual void registerListener( Ptr const& factory ) CATCH_OVERRIDE { m_reporterRegistry.registerListener( factory ); } virtual void registerTest( TestCase const& testInfo ) CATCH_OVERRIDE { m_testCaseRegistry.registerTest( testInfo ); } virtual void registerTranslator( const IExceptionTranslator* translator ) CATCH_OVERRIDE { m_exceptionTranslatorRegistry.registerTranslator( translator ); } virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) CATCH_OVERRIDE { m_tagAliasRegistry.add( alias, tag, lineInfo ); } private: TestRegistry m_testCaseRegistry; ReporterRegistry m_reporterRegistry; ExceptionTranslatorRegistry m_exceptionTranslatorRegistry; TagAliasRegistry m_tagAliasRegistry; }; // Single, global, instance inline RegistryHub*& getTheRegistryHub() { static RegistryHub* theRegistryHub = CATCH_NULL; if( !theRegistryHub ) theRegistryHub = new RegistryHub(); return theRegistryHub; } } IRegistryHub& getRegistryHub() { return *getTheRegistryHub(); } IMutableRegistryHub& getMutableRegistryHub() { return *getTheRegistryHub(); } void cleanUp() { delete getTheRegistryHub(); getTheRegistryHub() = CATCH_NULL; cleanUpContext(); } std::string translateActiveException() { return getRegistryHub().getExceptionTranslatorRegistry().translateActiveException(); } } // end namespace Catch // #included from: catch_notimplemented_exception.hpp #define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_HPP_INCLUDED #include namespace Catch { NotImplementedException::NotImplementedException( SourceLineInfo const& lineInfo ) : m_lineInfo( lineInfo ) { std::ostringstream oss; oss << lineInfo << ": function "; oss << "not implemented"; m_what = oss.str(); } const char* NotImplementedException::what() const CATCH_NOEXCEPT { return m_what.c_str(); } } // end namespace Catch // #included from: catch_context_impl.hpp #define TWOBLUECUBES_CATCH_CONTEXT_IMPL_HPP_INCLUDED // #included from: catch_stream.hpp #define TWOBLUECUBES_CATCH_STREAM_HPP_INCLUDED #include #include #include namespace Catch { template class StreamBufImpl : public StreamBufBase { char data[bufferSize]; WriterF m_writer; public: StreamBufImpl() { setp( data, data + sizeof(data) ); } ~StreamBufImpl() CATCH_NOEXCEPT { sync(); } private: int overflow( int c ) { sync(); if( c != EOF ) { if( pbase() == epptr() ) m_writer( std::string( 1, static_cast( c ) ) ); else sputc( static_cast( c ) ); } return 0; } int sync() { if( pbase() != pptr() ) { m_writer( std::string( pbase(), static_cast( pptr() - pbase() ) ) ); setp( pbase(), epptr() ); } return 0; } }; /////////////////////////////////////////////////////////////////////////// FileStream::FileStream( std::string const& filename ) { m_ofs.open( filename.c_str() ); if( m_ofs.fail() ) { std::ostringstream oss; oss << "Unable to open file: '" << filename << '\''; throw std::domain_error( oss.str() ); } } std::ostream& FileStream::stream() const { return m_ofs; } struct OutputDebugWriter { void operator()( std::string const&str ) { writeToDebugConsole( str ); } }; DebugOutStream::DebugOutStream() : m_streamBuf( new StreamBufImpl() ), m_os( m_streamBuf.get() ) {} std::ostream& DebugOutStream::stream() const { return m_os; } // Store the streambuf from cout up-front because // cout may get redirected when running tests CoutStream::CoutStream() : m_os( Catch::cout().rdbuf() ) {} std::ostream& CoutStream::stream() const { return m_os; } #ifndef CATCH_CONFIG_NOSTDOUT // If you #define this you must implement these functions std::ostream& cout() { return std::cout; } std::ostream& cerr() { return std::cerr; } #endif } namespace Catch { class Context : public IMutableContext { Context() : m_config( CATCH_NULL ), m_runner( CATCH_NULL ), m_resultCapture( CATCH_NULL ) {} Context( Context const& ); void operator=( Context const& ); public: virtual ~Context() { deleteAllValues( m_generatorsByTestName ); } public: // IContext virtual IResultCapture* getResultCapture() { return m_resultCapture; } virtual IRunner* getRunner() { return m_runner; } virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) { return getGeneratorsForCurrentTest() .getGeneratorInfo( fileInfo, totalSize ) .getCurrentIndex(); } virtual bool advanceGeneratorsForCurrentTest() { IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); return generators && generators->moveNext(); } virtual Ptr getConfig() const { return m_config; } public: // IMutableContext virtual void setResultCapture( IResultCapture* resultCapture ) { m_resultCapture = resultCapture; } virtual void setRunner( IRunner* runner ) { m_runner = runner; } virtual void setConfig( Ptr const& config ) { m_config = config; } friend IMutableContext& getCurrentMutableContext(); private: IGeneratorsForTest* findGeneratorsForCurrentTest() { std::string testName = getResultCapture()->getCurrentTestName(); std::map::const_iterator it = m_generatorsByTestName.find( testName ); return it != m_generatorsByTestName.end() ? it->second : CATCH_NULL; } IGeneratorsForTest& getGeneratorsForCurrentTest() { IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); if( !generators ) { std::string testName = getResultCapture()->getCurrentTestName(); generators = createGeneratorsForTest(); m_generatorsByTestName.insert( std::make_pair( testName, generators ) ); } return *generators; } private: Ptr m_config; IRunner* m_runner; IResultCapture* m_resultCapture; std::map m_generatorsByTestName; }; namespace { Context* currentContext = CATCH_NULL; } IMutableContext& getCurrentMutableContext() { if( !currentContext ) currentContext = new Context(); return *currentContext; } IContext& getCurrentContext() { return getCurrentMutableContext(); } void cleanUpContext() { delete currentContext; currentContext = CATCH_NULL; } } // #included from: catch_console_colour_impl.hpp #define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_IMPL_HPP_INCLUDED // #included from: catch_errno_guard.hpp #define TWOBLUECUBES_CATCH_ERRNO_GUARD_HPP_INCLUDED #include namespace Catch { class ErrnoGuard { public: ErrnoGuard():m_oldErrno(errno){} ~ErrnoGuard() { errno = m_oldErrno; } private: int m_oldErrno; }; } namespace Catch { namespace { struct IColourImpl { virtual ~IColourImpl() {} virtual void use( Colour::Code _colourCode ) = 0; }; struct NoColourImpl : IColourImpl { void use( Colour::Code ) {} static IColourImpl* instance() { static NoColourImpl s_instance; return &s_instance; } }; } // anon namespace } // namespace Catch #if !defined( CATCH_CONFIG_COLOUR_NONE ) && !defined( CATCH_CONFIG_COLOUR_WINDOWS ) && !defined( CATCH_CONFIG_COLOUR_ANSI ) # ifdef CATCH_PLATFORM_WINDOWS # define CATCH_CONFIG_COLOUR_WINDOWS # else # define CATCH_CONFIG_COLOUR_ANSI # endif #endif #if defined ( CATCH_CONFIG_COLOUR_WINDOWS ) ///////////////////////////////////////// namespace Catch { namespace { class Win32ColourImpl : public IColourImpl { public: Win32ColourImpl() : stdoutHandle( GetStdHandle(STD_OUTPUT_HANDLE) ) { CONSOLE_SCREEN_BUFFER_INFO csbiInfo; GetConsoleScreenBufferInfo( stdoutHandle, &csbiInfo ); originalForegroundAttributes = csbiInfo.wAttributes & ~( BACKGROUND_GREEN | BACKGROUND_RED | BACKGROUND_BLUE | BACKGROUND_INTENSITY ); originalBackgroundAttributes = csbiInfo.wAttributes & ~( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_INTENSITY ); } virtual void use( Colour::Code _colourCode ) { switch( _colourCode ) { case Colour::None: return setTextAttribute( originalForegroundAttributes ); case Colour::White: return setTextAttribute( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); case Colour::Red: return setTextAttribute( FOREGROUND_RED ); case Colour::Green: return setTextAttribute( FOREGROUND_GREEN ); case Colour::Blue: return setTextAttribute( FOREGROUND_BLUE ); case Colour::Cyan: return setTextAttribute( FOREGROUND_BLUE | FOREGROUND_GREEN ); case Colour::Yellow: return setTextAttribute( FOREGROUND_RED | FOREGROUND_GREEN ); case Colour::Grey: return setTextAttribute( 0 ); case Colour::LightGrey: return setTextAttribute( FOREGROUND_INTENSITY ); case Colour::BrightRed: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_RED ); case Colour::BrightGreen: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN ); case Colour::BrightWhite: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); case Colour::Bright: throw std::logic_error( "not a colour" ); } } private: void setTextAttribute( WORD _textAttribute ) { SetConsoleTextAttribute( stdoutHandle, _textAttribute | originalBackgroundAttributes ); } HANDLE stdoutHandle; WORD originalForegroundAttributes; WORD originalBackgroundAttributes; }; IColourImpl* platformColourInstance() { static Win32ColourImpl s_instance; Ptr config = getCurrentContext().getConfig(); UseColour::YesOrNo colourMode = config ? config->useColour() : UseColour::Auto; if( colourMode == UseColour::Auto ) colourMode = !isDebuggerActive() ? UseColour::Yes : UseColour::No; return colourMode == UseColour::Yes ? &s_instance : NoColourImpl::instance(); } } // end anon namespace } // end namespace Catch #elif defined( CATCH_CONFIG_COLOUR_ANSI ) ////////////////////////////////////// #include namespace Catch { namespace { // use POSIX/ ANSI console terminal codes // Thanks to Adam Strzelecki for original contribution // (http://github.com/nanoant) // https://github.com/philsquared/Catch/pull/131 class PosixColourImpl : public IColourImpl { public: virtual void use( Colour::Code _colourCode ) { switch( _colourCode ) { case Colour::None: case Colour::White: return setColour( "[0m" ); case Colour::Red: return setColour( "[0;31m" ); case Colour::Green: return setColour( "[0;32m" ); case Colour::Blue: return setColour( "[0;34m" ); case Colour::Cyan: return setColour( "[0;36m" ); case Colour::Yellow: return setColour( "[0;33m" ); case Colour::Grey: return setColour( "[1;30m" ); case Colour::LightGrey: return setColour( "[0;37m" ); case Colour::BrightRed: return setColour( "[1;31m" ); case Colour::BrightGreen: return setColour( "[1;32m" ); case Colour::BrightWhite: return setColour( "[1;37m" ); case Colour::Bright: throw std::logic_error( "not a colour" ); } } static IColourImpl* instance() { static PosixColourImpl s_instance; return &s_instance; } private: void setColour( const char* _escapeCode ) { Catch::cout() << '\033' << _escapeCode; } }; IColourImpl* platformColourInstance() { ErrnoGuard guard; Ptr config = getCurrentContext().getConfig(); UseColour::YesOrNo colourMode = config ? config->useColour() : UseColour::Auto; if( colourMode == UseColour::Auto ) colourMode = (!isDebuggerActive() && isatty(STDOUT_FILENO) ) ? UseColour::Yes : UseColour::No; return colourMode == UseColour::Yes ? PosixColourImpl::instance() : NoColourImpl::instance(); } } // end anon namespace } // end namespace Catch #else // not Windows or ANSI /////////////////////////////////////////////// namespace Catch { static IColourImpl* platformColourInstance() { return NoColourImpl::instance(); } } // end namespace Catch #endif // Windows/ ANSI/ None namespace Catch { Colour::Colour( Code _colourCode ) : m_moved( false ) { use( _colourCode ); } Colour::Colour( Colour const& _other ) : m_moved( false ) { const_cast( _other ).m_moved = true; } Colour::~Colour(){ if( !m_moved ) use( None ); } void Colour::use( Code _colourCode ) { static IColourImpl* impl = platformColourInstance(); impl->use( _colourCode ); } } // end namespace Catch // #included from: catch_generators_impl.hpp #define TWOBLUECUBES_CATCH_GENERATORS_IMPL_HPP_INCLUDED #include #include #include namespace Catch { struct GeneratorInfo : IGeneratorInfo { GeneratorInfo( std::size_t size ) : m_size( size ), m_currentIndex( 0 ) {} bool moveNext() { if( ++m_currentIndex == m_size ) { m_currentIndex = 0; return false; } return true; } std::size_t getCurrentIndex() const { return m_currentIndex; } std::size_t m_size; std::size_t m_currentIndex; }; /////////////////////////////////////////////////////////////////////////// class GeneratorsForTest : public IGeneratorsForTest { public: ~GeneratorsForTest() { deleteAll( m_generatorsInOrder ); } IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) { std::map::const_iterator it = m_generatorsByName.find( fileInfo ); if( it == m_generatorsByName.end() ) { IGeneratorInfo* info = new GeneratorInfo( size ); m_generatorsByName.insert( std::make_pair( fileInfo, info ) ); m_generatorsInOrder.push_back( info ); return *info; } return *it->second; } bool moveNext() { std::vector::const_iterator it = m_generatorsInOrder.begin(); std::vector::const_iterator itEnd = m_generatorsInOrder.end(); for(; it != itEnd; ++it ) { if( (*it)->moveNext() ) return true; } return false; } private: std::map m_generatorsByName; std::vector m_generatorsInOrder; }; IGeneratorsForTest* createGeneratorsForTest() { return new GeneratorsForTest(); } } // end namespace Catch // #included from: catch_assertionresult.hpp #define TWOBLUECUBES_CATCH_ASSERTIONRESULT_HPP_INCLUDED namespace Catch { AssertionInfo::AssertionInfo( char const * _macroName, SourceLineInfo const& _lineInfo, char const * _capturedExpression, ResultDisposition::Flags _resultDisposition, char const * _secondArg) : macroName( _macroName ), lineInfo( _lineInfo ), capturedExpression( _capturedExpression ), resultDisposition( _resultDisposition ), secondArg( _secondArg ) {} AssertionResult::AssertionResult() {} AssertionResult::AssertionResult( AssertionInfo const& info, AssertionResultData const& data ) : m_info( info ), m_resultData( data ) {} AssertionResult::~AssertionResult() {} // Result was a success bool AssertionResult::succeeded() const { return Catch::isOk( m_resultData.resultType ); } // Result was a success, or failure is suppressed bool AssertionResult::isOk() const { return Catch::isOk( m_resultData.resultType ) || shouldSuppressFailure( m_info.resultDisposition ); } ResultWas::OfType AssertionResult::getResultType() const { return m_resultData.resultType; } bool AssertionResult::hasExpression() const { return m_info.capturedExpression[0] != 0; } bool AssertionResult::hasMessage() const { return !m_resultData.message.empty(); } std::string capturedExpressionWithSecondArgument( char const * capturedExpression, char const * secondArg ) { return (secondArg[0] == 0 || secondArg[0] == '"' && secondArg[1] == '"') ? capturedExpression : std::string(capturedExpression) + ", " + secondArg; } std::string AssertionResult::getExpression() const { if( isFalseTest( m_info.resultDisposition ) ) return '!' + capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); else return capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); } std::string AssertionResult::getExpressionInMacro() const { if( m_info.macroName[0] == 0 ) return capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); else return std::string(m_info.macroName) + "( " + capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg) + " )"; } bool AssertionResult::hasExpandedExpression() const { return hasExpression() && getExpandedExpression() != getExpression(); } std::string AssertionResult::getExpandedExpression() const { return m_resultData.reconstructExpression(); } std::string AssertionResult::getMessage() const { return m_resultData.message; } SourceLineInfo AssertionResult::getSourceInfo() const { return m_info.lineInfo; } std::string AssertionResult::getTestMacroName() const { return m_info.macroName; } void AssertionResult::discardDecomposedExpression() const { m_resultData.decomposedExpression = CATCH_NULL; } void AssertionResult::expandDecomposedExpression() const { m_resultData.reconstructExpression(); } } // end namespace Catch // #included from: catch_test_case_info.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_INFO_HPP_INCLUDED #include namespace Catch { inline TestCaseInfo::SpecialProperties parseSpecialTag( std::string const& tag ) { if( startsWith( tag, '.' ) || tag == "hide" || tag == "!hide" ) return TestCaseInfo::IsHidden; else if( tag == "!throws" ) return TestCaseInfo::Throws; else if( tag == "!shouldfail" ) return TestCaseInfo::ShouldFail; else if( tag == "!mayfail" ) return TestCaseInfo::MayFail; else if( tag == "!nonportable" ) return TestCaseInfo::NonPortable; else return TestCaseInfo::None; } inline bool isReservedTag( std::string const& tag ) { return parseSpecialTag( tag ) == TestCaseInfo::None && tag.size() > 0 && !std::isalnum( tag[0] ); } inline void enforceNotReservedTag( std::string const& tag, SourceLineInfo const& _lineInfo ) { if( isReservedTag( tag ) ) { std::ostringstream ss; ss << Colour(Colour::Red) << "Tag name [" << tag << "] not allowed.\n" << "Tag names starting with non alpha-numeric characters are reserved\n" << Colour(Colour::FileName) << _lineInfo << '\n'; throw std::runtime_error(ss.str()); } } TestCase makeTestCase( ITestCase* _testCase, std::string const& _className, std::string const& _name, std::string const& _descOrTags, SourceLineInfo const& _lineInfo ) { bool isHidden( startsWith( _name, "./" ) ); // Legacy support // Parse out tags std::set tags; std::string desc, tag; bool inTag = false; for( std::size_t i = 0; i < _descOrTags.size(); ++i ) { char c = _descOrTags[i]; if( !inTag ) { if( c == '[' ) inTag = true; else desc += c; } else { if( c == ']' ) { TestCaseInfo::SpecialProperties prop = parseSpecialTag( tag ); if( prop == TestCaseInfo::IsHidden ) isHidden = true; else if( prop == TestCaseInfo::None ) enforceNotReservedTag( tag, _lineInfo ); tags.insert( tag ); tag.clear(); inTag = false; } else tag += c; } } if( isHidden ) { tags.insert( "hide" ); tags.insert( "." ); } TestCaseInfo info( _name, _className, desc, tags, _lineInfo ); return TestCase( _testCase, info ); } void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ) { testCaseInfo.tags = tags; testCaseInfo.lcaseTags.clear(); std::ostringstream oss; for( std::set::const_iterator it = tags.begin(), itEnd = tags.end(); it != itEnd; ++it ) { oss << '[' << *it << ']'; std::string lcaseTag = toLower( *it ); testCaseInfo.properties = static_cast( testCaseInfo.properties | parseSpecialTag( lcaseTag ) ); testCaseInfo.lcaseTags.insert( lcaseTag ); } testCaseInfo.tagsAsString = oss.str(); } TestCaseInfo::TestCaseInfo( std::string const& _name, std::string const& _className, std::string const& _description, std::set const& _tags, SourceLineInfo const& _lineInfo ) : name( _name ), className( _className ), description( _description ), lineInfo( _lineInfo ), properties( None ) { setTags( *this, _tags ); } TestCaseInfo::TestCaseInfo( TestCaseInfo const& other ) : name( other.name ), className( other.className ), description( other.description ), tags( other.tags ), lcaseTags( other.lcaseTags ), tagsAsString( other.tagsAsString ), lineInfo( other.lineInfo ), properties( other.properties ) {} bool TestCaseInfo::isHidden() const { return ( properties & IsHidden ) != 0; } bool TestCaseInfo::throws() const { return ( properties & Throws ) != 0; } bool TestCaseInfo::okToFail() const { return ( properties & (ShouldFail | MayFail ) ) != 0; } bool TestCaseInfo::expectedToFail() const { return ( properties & (ShouldFail ) ) != 0; } TestCase::TestCase( ITestCase* testCase, TestCaseInfo const& info ) : TestCaseInfo( info ), test( testCase ) {} TestCase::TestCase( TestCase const& other ) : TestCaseInfo( other ), test( other.test ) {} TestCase TestCase::withName( std::string const& _newName ) const { TestCase other( *this ); other.name = _newName; return other; } void TestCase::swap( TestCase& other ) { test.swap( other.test ); name.swap( other.name ); className.swap( other.className ); description.swap( other.description ); tags.swap( other.tags ); lcaseTags.swap( other.lcaseTags ); tagsAsString.swap( other.tagsAsString ); std::swap( TestCaseInfo::properties, static_cast( other ).properties ); std::swap( lineInfo, other.lineInfo ); } void TestCase::invoke() const { test->invoke(); } bool TestCase::operator == ( TestCase const& other ) const { return test.get() == other.test.get() && name == other.name && className == other.className; } bool TestCase::operator < ( TestCase const& other ) const { return name < other.name; } TestCase& TestCase::operator = ( TestCase const& other ) { TestCase temp( other ); swap( temp ); return *this; } TestCaseInfo const& TestCase::getTestCaseInfo() const { return *this; } } // end namespace Catch // #included from: catch_version.hpp #define TWOBLUECUBES_CATCH_VERSION_HPP_INCLUDED namespace Catch { Version::Version ( unsigned int _majorVersion, unsigned int _minorVersion, unsigned int _patchNumber, char const * const _branchName, unsigned int _buildNumber ) : majorVersion( _majorVersion ), minorVersion( _minorVersion ), patchNumber( _patchNumber ), branchName( _branchName ), buildNumber( _buildNumber ) {} std::ostream& operator << ( std::ostream& os, Version const& version ) { os << version.majorVersion << '.' << version.minorVersion << '.' << version.patchNumber; // branchName is never null -> 0th char is \0 if it is empty if (version.branchName[0]) { os << '-' << version.branchName << '.' << version.buildNumber; } return os; } inline Version libraryVersion() { static Version version( 1, 9, 6, "", 0 ); return version; } } // #included from: catch_message.hpp #define TWOBLUECUBES_CATCH_MESSAGE_HPP_INCLUDED namespace Catch { MessageInfo::MessageInfo( std::string const& _macroName, SourceLineInfo const& _lineInfo, ResultWas::OfType _type ) : macroName( _macroName ), lineInfo( _lineInfo ), type( _type ), sequence( ++globalCount ) {} // This may need protecting if threading support is added unsigned int MessageInfo::globalCount = 0; //////////////////////////////////////////////////////////////////////////// ScopedMessage::ScopedMessage( MessageBuilder const& builder ) : m_info( builder.m_info ) { m_info.message = builder.m_stream.str(); getResultCapture().pushScopedMessage( m_info ); } ScopedMessage::ScopedMessage( ScopedMessage const& other ) : m_info( other.m_info ) {} ScopedMessage::~ScopedMessage() { if ( !std::uncaught_exception() ){ getResultCapture().popScopedMessage(m_info); } } } // end namespace Catch // #included from: catch_legacy_reporter_adapter.hpp #define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_HPP_INCLUDED // #included from: catch_legacy_reporter_adapter.h #define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_H_INCLUDED namespace Catch { // Deprecated struct IReporter : IShared { virtual ~IReporter(); virtual bool shouldRedirectStdout() const = 0; virtual void StartTesting() = 0; virtual void EndTesting( Totals const& totals ) = 0; virtual void StartGroup( std::string const& groupName ) = 0; virtual void EndGroup( std::string const& groupName, Totals const& totals ) = 0; virtual void StartTestCase( TestCaseInfo const& testInfo ) = 0; virtual void EndTestCase( TestCaseInfo const& testInfo, Totals const& totals, std::string const& stdOut, std::string const& stdErr ) = 0; virtual void StartSection( std::string const& sectionName, std::string const& description ) = 0; virtual void EndSection( std::string const& sectionName, Counts const& assertions ) = 0; virtual void NoAssertionsInSection( std::string const& sectionName ) = 0; virtual void NoAssertionsInTestCase( std::string const& testName ) = 0; virtual void Aborted() = 0; virtual void Result( AssertionResult const& result ) = 0; }; class LegacyReporterAdapter : public SharedImpl { public: LegacyReporterAdapter( Ptr const& legacyReporter ); virtual ~LegacyReporterAdapter(); virtual ReporterPreferences getPreferences() const; virtual void noMatchingTestCases( std::string const& ); virtual void testRunStarting( TestRunInfo const& ); virtual void testGroupStarting( GroupInfo const& groupInfo ); virtual void testCaseStarting( TestCaseInfo const& testInfo ); virtual void sectionStarting( SectionInfo const& sectionInfo ); virtual void assertionStarting( AssertionInfo const& ); virtual bool assertionEnded( AssertionStats const& assertionStats ); virtual void sectionEnded( SectionStats const& sectionStats ); virtual void testCaseEnded( TestCaseStats const& testCaseStats ); virtual void testGroupEnded( TestGroupStats const& testGroupStats ); virtual void testRunEnded( TestRunStats const& testRunStats ); virtual void skipTest( TestCaseInfo const& ); private: Ptr m_legacyReporter; }; } namespace Catch { LegacyReporterAdapter::LegacyReporterAdapter( Ptr const& legacyReporter ) : m_legacyReporter( legacyReporter ) {} LegacyReporterAdapter::~LegacyReporterAdapter() {} ReporterPreferences LegacyReporterAdapter::getPreferences() const { ReporterPreferences prefs; prefs.shouldRedirectStdOut = m_legacyReporter->shouldRedirectStdout(); return prefs; } void LegacyReporterAdapter::noMatchingTestCases( std::string const& ) {} void LegacyReporterAdapter::testRunStarting( TestRunInfo const& ) { m_legacyReporter->StartTesting(); } void LegacyReporterAdapter::testGroupStarting( GroupInfo const& groupInfo ) { m_legacyReporter->StartGroup( groupInfo.name ); } void LegacyReporterAdapter::testCaseStarting( TestCaseInfo const& testInfo ) { m_legacyReporter->StartTestCase( testInfo ); } void LegacyReporterAdapter::sectionStarting( SectionInfo const& sectionInfo ) { m_legacyReporter->StartSection( sectionInfo.name, sectionInfo.description ); } void LegacyReporterAdapter::assertionStarting( AssertionInfo const& ) { // Not on legacy interface } bool LegacyReporterAdapter::assertionEnded( AssertionStats const& assertionStats ) { if( assertionStats.assertionResult.getResultType() != ResultWas::Ok ) { for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); it != itEnd; ++it ) { if( it->type == ResultWas::Info ) { ResultBuilder rb( it->macroName.c_str(), it->lineInfo, "", ResultDisposition::Normal ); rb << it->message; rb.setResultType( ResultWas::Info ); AssertionResult result = rb.build(); m_legacyReporter->Result( result ); } } } m_legacyReporter->Result( assertionStats.assertionResult ); return true; } void LegacyReporterAdapter::sectionEnded( SectionStats const& sectionStats ) { if( sectionStats.missingAssertions ) m_legacyReporter->NoAssertionsInSection( sectionStats.sectionInfo.name ); m_legacyReporter->EndSection( sectionStats.sectionInfo.name, sectionStats.assertions ); } void LegacyReporterAdapter::testCaseEnded( TestCaseStats const& testCaseStats ) { m_legacyReporter->EndTestCase ( testCaseStats.testInfo, testCaseStats.totals, testCaseStats.stdOut, testCaseStats.stdErr ); } void LegacyReporterAdapter::testGroupEnded( TestGroupStats const& testGroupStats ) { if( testGroupStats.aborting ) m_legacyReporter->Aborted(); m_legacyReporter->EndGroup( testGroupStats.groupInfo.name, testGroupStats.totals ); } void LegacyReporterAdapter::testRunEnded( TestRunStats const& testRunStats ) { m_legacyReporter->EndTesting( testRunStats.totals ); } void LegacyReporterAdapter::skipTest( TestCaseInfo const& ) { } } // #included from: catch_timer.hpp #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wc++11-long-long" #endif #ifdef CATCH_PLATFORM_WINDOWS #else #include #endif namespace Catch { namespace { #ifdef CATCH_PLATFORM_WINDOWS UInt64 getCurrentTicks() { static UInt64 hz=0, hzo=0; if (!hz) { QueryPerformanceFrequency( reinterpret_cast( &hz ) ); QueryPerformanceCounter( reinterpret_cast( &hzo ) ); } UInt64 t; QueryPerformanceCounter( reinterpret_cast( &t ) ); return ((t-hzo)*1000000)/hz; } #else UInt64 getCurrentTicks() { timeval t; gettimeofday(&t,CATCH_NULL); return static_cast( t.tv_sec ) * 1000000ull + static_cast( t.tv_usec ); } #endif } void Timer::start() { m_ticks = getCurrentTicks(); } unsigned int Timer::getElapsedMicroseconds() const { return static_cast(getCurrentTicks() - m_ticks); } unsigned int Timer::getElapsedMilliseconds() const { return static_cast(getElapsedMicroseconds()/1000); } double Timer::getElapsedSeconds() const { return getElapsedMicroseconds()/1000000.0; } } // namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif // #included from: catch_common.hpp #define TWOBLUECUBES_CATCH_COMMON_HPP_INCLUDED #include #include namespace Catch { bool startsWith( std::string const& s, std::string const& prefix ) { return s.size() >= prefix.size() && std::equal(prefix.begin(), prefix.end(), s.begin()); } bool startsWith( std::string const& s, char prefix ) { return !s.empty() && s[0] == prefix; } bool endsWith( std::string const& s, std::string const& suffix ) { return s.size() >= suffix.size() && std::equal(suffix.rbegin(), suffix.rend(), s.rbegin()); } bool endsWith( std::string const& s, char suffix ) { return !s.empty() && s[s.size()-1] == suffix; } bool contains( std::string const& s, std::string const& infix ) { return s.find( infix ) != std::string::npos; } char toLowerCh(char c) { return static_cast( std::tolower( c ) ); } void toLowerInPlace( std::string& s ) { std::transform( s.begin(), s.end(), s.begin(), toLowerCh ); } std::string toLower( std::string const& s ) { std::string lc = s; toLowerInPlace( lc ); return lc; } std::string trim( std::string const& str ) { static char const* whitespaceChars = "\n\r\t "; std::string::size_type start = str.find_first_not_of( whitespaceChars ); std::string::size_type end = str.find_last_not_of( whitespaceChars ); return start != std::string::npos ? str.substr( start, 1+end-start ) : std::string(); } bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ) { bool replaced = false; std::size_t i = str.find( replaceThis ); while( i != std::string::npos ) { replaced = true; str = str.substr( 0, i ) + withThis + str.substr( i+replaceThis.size() ); if( i < str.size()-withThis.size() ) i = str.find( replaceThis, i+withThis.size() ); else i = std::string::npos; } return replaced; } pluralise::pluralise( std::size_t count, std::string const& label ) : m_count( count ), m_label( label ) {} std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ) { os << pluraliser.m_count << ' ' << pluraliser.m_label; if( pluraliser.m_count != 1 ) os << 's'; return os; } SourceLineInfo::SourceLineInfo() : file(""), line( 0 ){} SourceLineInfo::SourceLineInfo( char const* _file, std::size_t _line ) : file( _file ), line( _line ) {} bool SourceLineInfo::empty() const { return file[0] == '\0'; } bool SourceLineInfo::operator == ( SourceLineInfo const& other ) const { return line == other.line && (file == other.file || std::strcmp(file, other.file) == 0); } bool SourceLineInfo::operator < ( SourceLineInfo const& other ) const { return line < other.line || ( line == other.line && (std::strcmp(file, other.file) < 0)); } void seedRng( IConfig const& config ) { if( config.rngSeed() != 0 ) srand( config.rngSeed() ); } unsigned int rngSeed() { return getCurrentContext().getConfig()->rngSeed(); } std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ) { #ifndef __GNUG__ os << info.file << '(' << info.line << ')'; #else os << info.file << ':' << info.line; #endif return os; } void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ) { std::ostringstream oss; oss << locationInfo << ": Internal Catch error: '" << message << '\''; if( alwaysTrue() ) throw std::logic_error( oss.str() ); } } // #included from: catch_section.hpp #define TWOBLUECUBES_CATCH_SECTION_HPP_INCLUDED namespace Catch { SectionInfo::SectionInfo ( SourceLineInfo const& _lineInfo, std::string const& _name, std::string const& _description ) : name( _name ), description( _description ), lineInfo( _lineInfo ) {} Section::Section( SectionInfo const& info ) : m_info( info ), m_sectionIncluded( getResultCapture().sectionStarted( m_info, m_assertions ) ) { m_timer.start(); } #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable:4996) // std::uncaught_exception is deprecated in C++17 #endif Section::~Section() { if( m_sectionIncluded ) { SectionEndInfo endInfo( m_info, m_assertions, m_timer.getElapsedSeconds() ); if( std::uncaught_exception() ) getResultCapture().sectionEndedEarly( endInfo ); else getResultCapture().sectionEnded( endInfo ); } } #if defined(_MSC_VER) # pragma warning(pop) #endif // This indicates whether the section should be executed or not Section::operator bool() const { return m_sectionIncluded; } } // end namespace Catch // #included from: catch_debugger.hpp #define TWOBLUECUBES_CATCH_DEBUGGER_HPP_INCLUDED #ifdef CATCH_PLATFORM_MAC #include #include #include #include #include namespace Catch{ // The following function is taken directly from the following technical note: // http://developer.apple.com/library/mac/#qa/qa2004/qa1361.html // Returns true if the current process is being debugged (either // running under the debugger or has a debugger attached post facto). bool isDebuggerActive(){ int mib[4]; struct kinfo_proc info; size_t size; // Initialize the flags so that, if sysctl fails for some bizarre // reason, we get a predictable result. info.kp_proc.p_flag = 0; // Initialize mib, which tells sysctl the info we want, in this case // we're looking for information about a specific process ID. mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_PID; mib[3] = getpid(); // Call sysctl. size = sizeof(info); if( sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, CATCH_NULL, 0) != 0 ) { Catch::cerr() << "\n** Call to sysctl failed - unable to determine if debugger is active **\n" << std::endl; return false; } // We're being debugged if the P_TRACED flag is set. return ( (info.kp_proc.p_flag & P_TRACED) != 0 ); } } // namespace Catch #elif defined(CATCH_PLATFORM_LINUX) #include #include namespace Catch{ // The standard POSIX way of detecting a debugger is to attempt to // ptrace() the process, but this needs to be done from a child and not // this process itself to still allow attaching to this process later // if wanted, so is rather heavy. Under Linux we have the PID of the // "debugger" (which doesn't need to be gdb, of course, it could also // be strace, for example) in /proc/$PID/status, so just get it from // there instead. bool isDebuggerActive(){ // Libstdc++ has a bug, where std::ifstream sets errno to 0 // This way our users can properly assert over errno values ErrnoGuard guard; std::ifstream in("/proc/self/status"); for( std::string line; std::getline(in, line); ) { static const int PREFIX_LEN = 11; if( line.compare(0, PREFIX_LEN, "TracerPid:\t") == 0 ) { // We're traced if the PID is not 0 and no other PID starts // with 0 digit, so it's enough to check for just a single // character. return line.length() > PREFIX_LEN && line[PREFIX_LEN] != '0'; } } return false; } } // namespace Catch #elif defined(_MSC_VER) extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); namespace Catch { bool isDebuggerActive() { return IsDebuggerPresent() != 0; } } #elif defined(__MINGW32__) extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); namespace Catch { bool isDebuggerActive() { return IsDebuggerPresent() != 0; } } #else namespace Catch { inline bool isDebuggerActive() { return false; } } #endif // Platform #ifdef CATCH_PLATFORM_WINDOWS namespace Catch { void writeToDebugConsole( std::string const& text ) { ::OutputDebugStringA( text.c_str() ); } } #else namespace Catch { void writeToDebugConsole( std::string const& text ) { // !TBD: Need a version for Mac/ XCode and other IDEs Catch::cout() << text; } } #endif // Platform // #included from: catch_tostring.hpp #define TWOBLUECUBES_CATCH_TOSTRING_HPP_INCLUDED namespace Catch { namespace Detail { const std::string unprintableString = "{?}"; namespace { const int hexThreshold = 255; struct Endianness { enum Arch { Big, Little }; static Arch which() { union _{ int asInt; char asChar[sizeof (int)]; } u; u.asInt = 1; return ( u.asChar[sizeof(int)-1] == 1 ) ? Big : Little; } }; } std::string rawMemoryToString( const void *object, std::size_t size ) { // Reverse order for little endian architectures int i = 0, end = static_cast( size ), inc = 1; if( Endianness::which() == Endianness::Little ) { i = end-1; end = inc = -1; } unsigned char const *bytes = static_cast(object); std::ostringstream os; os << "0x" << std::setfill('0') << std::hex; for( ; i != end; i += inc ) os << std::setw(2) << static_cast(bytes[i]); return os.str(); } } std::string toString( std::string const& value ) { std::string s = value; if( getCurrentContext().getConfig()->showInvisibles() ) { for(size_t i = 0; i < s.size(); ++i ) { std::string subs; switch( s[i] ) { case '\n': subs = "\\n"; break; case '\t': subs = "\\t"; break; default: break; } if( !subs.empty() ) { s = s.substr( 0, i ) + subs + s.substr( i+1 ); ++i; } } } return '"' + s + '"'; } std::string toString( std::wstring const& value ) { std::string s; s.reserve( value.size() ); for(size_t i = 0; i < value.size(); ++i ) s += value[i] <= 0xff ? static_cast( value[i] ) : '?'; return Catch::toString( s ); } std::string toString( const char* const value ) { return value ? Catch::toString( std::string( value ) ) : std::string( "{null string}" ); } std::string toString( char* const value ) { return Catch::toString( static_cast( value ) ); } std::string toString( const wchar_t* const value ) { return value ? Catch::toString( std::wstring(value) ) : std::string( "{null string}" ); } std::string toString( wchar_t* const value ) { return Catch::toString( static_cast( value ) ); } std::string toString( int value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned int value ) { return Catch::toString( static_cast( value ) ); } template std::string fpToString( T value, int precision ) { std::ostringstream oss; oss << std::setprecision( precision ) << std::fixed << value; std::string d = oss.str(); std::size_t i = d.find_last_not_of( '0' ); if( i != std::string::npos && i != d.size()-1 ) { if( d[i] == '.' ) i++; d = d.substr( 0, i+1 ); } return d; } std::string toString( const double value ) { return fpToString( value, 10 ); } std::string toString( const float value ) { return fpToString( value, 5 ) + 'f'; } std::string toString( bool value ) { return value ? "true" : "false"; } std::string toString( char value ) { if ( value == '\r' ) return "'\\r'"; if ( value == '\f' ) return "'\\f'"; if ( value == '\n' ) return "'\\n'"; if ( value == '\t' ) return "'\\t'"; if ( '\0' <= value && value < ' ' ) return toString( static_cast( value ) ); char chstr[] = "' '"; chstr[1] = value; return chstr; } std::string toString( signed char value ) { return toString( static_cast( value ) ); } std::string toString( unsigned char value ) { return toString( static_cast( value ) ); } #ifdef CATCH_CONFIG_CPP11_LONG_LONG std::string toString( long long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned long long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } #endif #ifdef CATCH_CONFIG_CPP11_NULLPTR std::string toString( std::nullptr_t ) { return "nullptr"; } #endif #ifdef __OBJC__ std::string toString( NSString const * const& nsstring ) { if( !nsstring ) return "nil"; return "@" + toString([nsstring UTF8String]); } std::string toString( NSString * CATCH_ARC_STRONG & nsstring ) { if( !nsstring ) return "nil"; return "@" + toString([nsstring UTF8String]); } std::string toString( NSObject* const& nsObject ) { return toString( [nsObject description] ); } #endif } // end namespace Catch // #included from: catch_result_builder.hpp #define TWOBLUECUBES_CATCH_RESULT_BUILDER_HPP_INCLUDED namespace Catch { ResultBuilder::ResultBuilder( char const* macroName, SourceLineInfo const& lineInfo, char const* capturedExpression, ResultDisposition::Flags resultDisposition, char const* secondArg ) : m_assertionInfo( macroName, lineInfo, capturedExpression, resultDisposition, secondArg ), m_shouldDebugBreak( false ), m_shouldThrow( false ), m_guardException( false ) { m_stream().oss.str(""); } ResultBuilder::~ResultBuilder() { #if defined(CATCH_CONFIG_FAST_COMPILE) if ( m_guardException ) { m_stream().oss << "Exception translation was disabled by CATCH_CONFIG_FAST_COMPILE"; captureResult( ResultWas::ThrewException ); getCurrentContext().getResultCapture()->exceptionEarlyReported(); } #endif } ResultBuilder& ResultBuilder::setResultType( ResultWas::OfType result ) { m_data.resultType = result; return *this; } ResultBuilder& ResultBuilder::setResultType( bool result ) { m_data.resultType = result ? ResultWas::Ok : ResultWas::ExpressionFailed; return *this; } void ResultBuilder::endExpression( DecomposedExpression const& expr ) { AssertionResult result = build( expr ); handleResult( result ); } void ResultBuilder::useActiveException( ResultDisposition::Flags resultDisposition ) { m_assertionInfo.resultDisposition = resultDisposition; m_stream().oss << Catch::translateActiveException(); captureResult( ResultWas::ThrewException ); } void ResultBuilder::captureResult( ResultWas::OfType resultType ) { setResultType( resultType ); captureExpression(); } void ResultBuilder::captureExpectedException( std::string const& expectedMessage ) { if( expectedMessage.empty() ) captureExpectedException( Matchers::Impl::MatchAllOf() ); else captureExpectedException( Matchers::Equals( expectedMessage ) ); } void ResultBuilder::captureExpectedException( Matchers::Impl::MatcherBase const& matcher ) { assert( !isFalseTest( m_assertionInfo.resultDisposition ) ); AssertionResultData data = m_data; data.resultType = ResultWas::Ok; data.reconstructedExpression = capturedExpressionWithSecondArgument(m_assertionInfo.capturedExpression, m_assertionInfo.secondArg); std::string actualMessage = Catch::translateActiveException(); if( !matcher.match( actualMessage ) ) { data.resultType = ResultWas::ExpressionFailed; data.reconstructedExpression = actualMessage; } AssertionResult result( m_assertionInfo, data ); handleResult( result ); } void ResultBuilder::captureExpression() { AssertionResult result = build(); handleResult( result ); } void ResultBuilder::handleResult( AssertionResult const& result ) { getResultCapture().assertionEnded( result ); if( !result.isOk() ) { if( getCurrentContext().getConfig()->shouldDebugBreak() ) m_shouldDebugBreak = true; if( getCurrentContext().getRunner()->aborting() || (m_assertionInfo.resultDisposition & ResultDisposition::Normal) ) m_shouldThrow = true; } } void ResultBuilder::react() { #if defined(CATCH_CONFIG_FAST_COMPILE) if (m_shouldDebugBreak) { /////////////////////////////////////////////////////////////////// // To inspect the state during test, you need to go one level up the callstack // To go back to the test and change execution, jump over the throw statement /////////////////////////////////////////////////////////////////// CATCH_BREAK_INTO_DEBUGGER(); } #endif if( m_shouldThrow ) throw Catch::TestFailureException(); } bool ResultBuilder::shouldDebugBreak() const { return m_shouldDebugBreak; } bool ResultBuilder::allowThrows() const { return getCurrentContext().getConfig()->allowThrows(); } AssertionResult ResultBuilder::build() const { return build( *this ); } // CAVEAT: The returned AssertionResult stores a pointer to the argument expr, // a temporary DecomposedExpression, which in turn holds references to // operands, possibly temporary as well. // It should immediately be passed to handleResult; if the expression // needs to be reported, its string expansion must be composed before // the temporaries are destroyed. AssertionResult ResultBuilder::build( DecomposedExpression const& expr ) const { assert( m_data.resultType != ResultWas::Unknown ); AssertionResultData data = m_data; // Flip bool results if FalseTest flag is set if( isFalseTest( m_assertionInfo.resultDisposition ) ) { data.negate( expr.isBinaryExpression() ); } data.message = m_stream().oss.str(); data.decomposedExpression = &expr; // for lazy reconstruction return AssertionResult( m_assertionInfo, data ); } void ResultBuilder::reconstructExpression( std::string& dest ) const { dest = capturedExpressionWithSecondArgument(m_assertionInfo.capturedExpression, m_assertionInfo.secondArg); } void ResultBuilder::setExceptionGuard() { m_guardException = true; } void ResultBuilder::unsetExceptionGuard() { m_guardException = false; } } // end namespace Catch // #included from: catch_tag_alias_registry.hpp #define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_HPP_INCLUDED namespace Catch { TagAliasRegistry::~TagAliasRegistry() {} Option TagAliasRegistry::find( std::string const& alias ) const { std::map::const_iterator it = m_registry.find( alias ); if( it != m_registry.end() ) return it->second; else return Option(); } std::string TagAliasRegistry::expandAliases( std::string const& unexpandedTestSpec ) const { std::string expandedTestSpec = unexpandedTestSpec; for( std::map::const_iterator it = m_registry.begin(), itEnd = m_registry.end(); it != itEnd; ++it ) { std::size_t pos = expandedTestSpec.find( it->first ); if( pos != std::string::npos ) { expandedTestSpec = expandedTestSpec.substr( 0, pos ) + it->second.tag + expandedTestSpec.substr( pos + it->first.size() ); } } return expandedTestSpec; } void TagAliasRegistry::add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) { if( !startsWith( alias, "[@" ) || !endsWith( alias, ']' ) ) { std::ostringstream oss; oss << Colour( Colour::Red ) << "error: tag alias, \"" << alias << "\" is not of the form [@alias name].\n" << Colour( Colour::FileName ) << lineInfo << '\n'; throw std::domain_error( oss.str().c_str() ); } if( !m_registry.insert( std::make_pair( alias, TagAlias( tag, lineInfo ) ) ).second ) { std::ostringstream oss; oss << Colour( Colour::Red ) << "error: tag alias, \"" << alias << "\" already registered.\n" << "\tFirst seen at " << Colour( Colour::Red ) << find(alias)->lineInfo << '\n' << Colour( Colour::Red ) << "\tRedefined at " << Colour( Colour::FileName) << lineInfo << '\n'; throw std::domain_error( oss.str().c_str() ); } } ITagAliasRegistry::~ITagAliasRegistry() {} ITagAliasRegistry const& ITagAliasRegistry::get() { return getRegistryHub().getTagAliasRegistry(); } RegistrarForTagAliases::RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ) { getMutableRegistryHub().registerTagAlias( alias, tag, lineInfo ); } } // end namespace Catch // #included from: catch_matchers_string.hpp namespace Catch { namespace Matchers { namespace StdString { CasedString::CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ) : m_caseSensitivity( caseSensitivity ), m_str( adjustString( str ) ) {} std::string CasedString::adjustString( std::string const& str ) const { return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str; } std::string CasedString::caseSensitivitySuffix() const { return m_caseSensitivity == CaseSensitive::No ? " (case insensitive)" : std::string(); } StringMatcherBase::StringMatcherBase( std::string const& operation, CasedString const& comparator ) : m_comparator( comparator ), m_operation( operation ) { } std::string StringMatcherBase::describe() const { std::string description; description.reserve(5 + m_operation.size() + m_comparator.m_str.size() + m_comparator.caseSensitivitySuffix().size()); description += m_operation; description += ": \""; description += m_comparator.m_str; description += "\""; description += m_comparator.caseSensitivitySuffix(); return description; } EqualsMatcher::EqualsMatcher( CasedString const& comparator ) : StringMatcherBase( "equals", comparator ) {} bool EqualsMatcher::match( std::string const& source ) const { return m_comparator.adjustString( source ) == m_comparator.m_str; } ContainsMatcher::ContainsMatcher( CasedString const& comparator ) : StringMatcherBase( "contains", comparator ) {} bool ContainsMatcher::match( std::string const& source ) const { return contains( m_comparator.adjustString( source ), m_comparator.m_str ); } StartsWithMatcher::StartsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "starts with", comparator ) {} bool StartsWithMatcher::match( std::string const& source ) const { return startsWith( m_comparator.adjustString( source ), m_comparator.m_str ); } EndsWithMatcher::EndsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "ends with", comparator ) {} bool EndsWithMatcher::match( std::string const& source ) const { return endsWith( m_comparator.adjustString( source ), m_comparator.m_str ); } } // namespace StdString StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::EqualsMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::ContainsMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::EndsWithMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::StartsWithMatcher( StdString::CasedString( str, caseSensitivity) ); } } // namespace Matchers } // namespace Catch // #included from: ../reporters/catch_reporter_multi.hpp #define TWOBLUECUBES_CATCH_REPORTER_MULTI_HPP_INCLUDED namespace Catch { class MultipleReporters : public SharedImpl { typedef std::vector > Reporters; Reporters m_reporters; public: void add( Ptr const& reporter ) { m_reporters.push_back( reporter ); } public: // IStreamingReporter virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporters[0]->getPreferences(); } virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->noMatchingTestCases( spec ); } virtual void testRunStarting( TestRunInfo const& testRunInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testRunStarting( testRunInfo ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testGroupStarting( groupInfo ); } virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testCaseStarting( testInfo ); } virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->sectionStarting( sectionInfo ); } virtual void assertionStarting( AssertionInfo const& assertionInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->assertionStarting( assertionInfo ); } // The return value indicates if the messages buffer should be cleared: virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { bool clearBuffer = false; for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) clearBuffer |= (*it)->assertionEnded( assertionStats ); return clearBuffer; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->sectionEnded( sectionStats ); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testCaseEnded( testCaseStats ); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testGroupEnded( testGroupStats ); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testRunEnded( testRunStats ); } virtual void skipTest( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->skipTest( testInfo ); } virtual MultipleReporters* tryAsMulti() CATCH_OVERRIDE { return this; } }; Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ) { Ptr resultingReporter; if( existingReporter ) { MultipleReporters* multi = existingReporter->tryAsMulti(); if( !multi ) { multi = new MultipleReporters; resultingReporter = Ptr( multi ); if( existingReporter ) multi->add( existingReporter ); } else resultingReporter = existingReporter; multi->add( additionalReporter ); } else resultingReporter = additionalReporter; return resultingReporter; } } // end namespace Catch // #included from: ../reporters/catch_reporter_xml.hpp #define TWOBLUECUBES_CATCH_REPORTER_XML_HPP_INCLUDED // #included from: catch_reporter_bases.hpp #define TWOBLUECUBES_CATCH_REPORTER_BASES_HPP_INCLUDED #include #include #include #include namespace Catch { namespace { // Because formatting using c++ streams is stateful, drop down to C is required // Alternatively we could use stringstream, but its performance is... not good. std::string getFormattedDuration( double duration ) { // Max exponent + 1 is required to represent the whole part // + 1 for decimal point // + 3 for the 3 decimal places // + 1 for null terminator const size_t maxDoubleSize = DBL_MAX_10_EXP + 1 + 1 + 3 + 1; char buffer[maxDoubleSize]; // Save previous errno, to prevent sprintf from overwriting it ErrnoGuard guard; #ifdef _MSC_VER sprintf_s(buffer, "%.3f", duration); #else sprintf(buffer, "%.3f", duration); #endif return std::string(buffer); } } struct StreamingReporterBase : SharedImpl { StreamingReporterBase( ReporterConfig const& _config ) : m_config( _config.fullConfig() ), stream( _config.stream() ) { m_reporterPrefs.shouldRedirectStdOut = false; } virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporterPrefs; } virtual ~StreamingReporterBase() CATCH_OVERRIDE; virtual void noMatchingTestCases( std::string const& ) CATCH_OVERRIDE {} virtual void testRunStarting( TestRunInfo const& _testRunInfo ) CATCH_OVERRIDE { currentTestRunInfo = _testRunInfo; } virtual void testGroupStarting( GroupInfo const& _groupInfo ) CATCH_OVERRIDE { currentGroupInfo = _groupInfo; } virtual void testCaseStarting( TestCaseInfo const& _testInfo ) CATCH_OVERRIDE { currentTestCaseInfo = _testInfo; } virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { m_sectionStack.push_back( _sectionInfo ); } virtual void sectionEnded( SectionStats const& /* _sectionStats */ ) CATCH_OVERRIDE { m_sectionStack.pop_back(); } virtual void testCaseEnded( TestCaseStats const& /* _testCaseStats */ ) CATCH_OVERRIDE { currentTestCaseInfo.reset(); } virtual void testGroupEnded( TestGroupStats const& /* _testGroupStats */ ) CATCH_OVERRIDE { currentGroupInfo.reset(); } virtual void testRunEnded( TestRunStats const& /* _testRunStats */ ) CATCH_OVERRIDE { currentTestCaseInfo.reset(); currentGroupInfo.reset(); currentTestRunInfo.reset(); } virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE { // Don't do anything with this by default. // It can optionally be overridden in the derived class. } Ptr m_config; std::ostream& stream; LazyStat currentTestRunInfo; LazyStat currentGroupInfo; LazyStat currentTestCaseInfo; std::vector m_sectionStack; ReporterPreferences m_reporterPrefs; }; struct CumulativeReporterBase : SharedImpl { template struct Node : SharedImpl<> { explicit Node( T const& _value ) : value( _value ) {} virtual ~Node() {} typedef std::vector > ChildNodes; T value; ChildNodes children; }; struct SectionNode : SharedImpl<> { explicit SectionNode( SectionStats const& _stats ) : stats( _stats ) {} virtual ~SectionNode(); bool operator == ( SectionNode const& other ) const { return stats.sectionInfo.lineInfo == other.stats.sectionInfo.lineInfo; } bool operator == ( Ptr const& other ) const { return operator==( *other ); } SectionStats stats; typedef std::vector > ChildSections; typedef std::vector Assertions; ChildSections childSections; Assertions assertions; std::string stdOut; std::string stdErr; }; struct BySectionInfo { BySectionInfo( SectionInfo const& other ) : m_other( other ) {} BySectionInfo( BySectionInfo const& other ) : m_other( other.m_other ) {} bool operator() ( Ptr const& node ) const { return node->stats.sectionInfo.lineInfo == m_other.lineInfo; } private: void operator=( BySectionInfo const& ); SectionInfo const& m_other; }; typedef Node TestCaseNode; typedef Node TestGroupNode; typedef Node TestRunNode; CumulativeReporterBase( ReporterConfig const& _config ) : m_config( _config.fullConfig() ), stream( _config.stream() ) { m_reporterPrefs.shouldRedirectStdOut = false; } ~CumulativeReporterBase(); virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporterPrefs; } virtual void testRunStarting( TestRunInfo const& ) CATCH_OVERRIDE {} virtual void testGroupStarting( GroupInfo const& ) CATCH_OVERRIDE {} virtual void testCaseStarting( TestCaseInfo const& ) CATCH_OVERRIDE {} virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { SectionStats incompleteStats( sectionInfo, Counts(), 0, false ); Ptr node; if( m_sectionStack.empty() ) { if( !m_rootSection ) m_rootSection = new SectionNode( incompleteStats ); node = m_rootSection; } else { SectionNode& parentNode = *m_sectionStack.back(); SectionNode::ChildSections::const_iterator it = std::find_if( parentNode.childSections.begin(), parentNode.childSections.end(), BySectionInfo( sectionInfo ) ); if( it == parentNode.childSections.end() ) { node = new SectionNode( incompleteStats ); parentNode.childSections.push_back( node ); } else node = *it; } m_sectionStack.push_back( node ); m_deepestSection = node; } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { assert( !m_sectionStack.empty() ); SectionNode& sectionNode = *m_sectionStack.back(); sectionNode.assertions.push_back( assertionStats ); // AssertionResult holds a pointer to a temporary DecomposedExpression, // which getExpandedExpression() calls to build the expression string. // Our section stack copy of the assertionResult will likely outlive the // temporary, so it must be expanded or discarded now to avoid calling // a destroyed object later. prepareExpandedExpression( sectionNode.assertions.back().assertionResult ); return true; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { assert( !m_sectionStack.empty() ); SectionNode& node = *m_sectionStack.back(); node.stats = sectionStats; m_sectionStack.pop_back(); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { Ptr node = new TestCaseNode( testCaseStats ); assert( m_sectionStack.size() == 0 ); node->children.push_back( m_rootSection ); m_testCases.push_back( node ); m_rootSection.reset(); assert( m_deepestSection ); m_deepestSection->stdOut = testCaseStats.stdOut; m_deepestSection->stdErr = testCaseStats.stdErr; } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { Ptr node = new TestGroupNode( testGroupStats ); node->children.swap( m_testCases ); m_testGroups.push_back( node ); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { Ptr node = new TestRunNode( testRunStats ); node->children.swap( m_testGroups ); m_testRuns.push_back( node ); testRunEndedCumulative(); } virtual void testRunEndedCumulative() = 0; virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE {} virtual void prepareExpandedExpression( AssertionResult& result ) const { if( result.isOk() ) result.discardDecomposedExpression(); else result.expandDecomposedExpression(); } Ptr m_config; std::ostream& stream; std::vector m_assertions; std::vector > > m_sections; std::vector > m_testCases; std::vector > m_testGroups; std::vector > m_testRuns; Ptr m_rootSection; Ptr m_deepestSection; std::vector > m_sectionStack; ReporterPreferences m_reporterPrefs; }; template char const* getLineOfChars() { static char line[CATCH_CONFIG_CONSOLE_WIDTH] = {0}; if( !*line ) { std::memset( line, C, CATCH_CONFIG_CONSOLE_WIDTH-1 ); line[CATCH_CONFIG_CONSOLE_WIDTH-1] = 0; } return line; } struct TestEventListenerBase : StreamingReporterBase { TestEventListenerBase( ReporterConfig const& _config ) : StreamingReporterBase( _config ) {} virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} virtual bool assertionEnded( AssertionStats const& ) CATCH_OVERRIDE { return false; } }; } // end namespace Catch // #included from: ../internal/catch_reporter_registrars.hpp #define TWOBLUECUBES_CATCH_REPORTER_REGISTRARS_HPP_INCLUDED namespace Catch { template class LegacyReporterRegistrar { class ReporterFactory : public IReporterFactory { virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new LegacyReporterAdapter( new T( config ) ); } virtual std::string getDescription() const { return T::getDescription(); } }; public: LegacyReporterRegistrar( std::string const& name ) { getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); } }; template class ReporterRegistrar { class ReporterFactory : public SharedImpl { // *** Please Note ***: // - If you end up here looking at a compiler error because it's trying to register // your custom reporter class be aware that the native reporter interface has changed // to IStreamingReporter. The "legacy" interface, IReporter, is still supported via // an adapter. Just use REGISTER_LEGACY_REPORTER to take advantage of the adapter. // However please consider updating to the new interface as the old one is now // deprecated and will probably be removed quite soon! // Please contact me via github if you have any questions at all about this. // In fact, ideally, please contact me anyway to let me know you've hit this - as I have // no idea who is actually using custom reporters at all (possibly no-one!). // The new interface is designed to minimise exposure to interface changes in the future. virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new T( config ); } virtual std::string getDescription() const { return T::getDescription(); } }; public: ReporterRegistrar( std::string const& name ) { getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); } }; template class ListenerRegistrar { class ListenerFactory : public SharedImpl { virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new T( config ); } virtual std::string getDescription() const { return std::string(); } }; public: ListenerRegistrar() { getMutableRegistryHub().registerListener( new ListenerFactory() ); } }; } #define INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) \ namespace{ Catch::LegacyReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } #define INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) \ namespace{ Catch::ReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } // Deprecated - use the form without INTERNAL_ #define INTERNAL_CATCH_REGISTER_LISTENER( listenerType ) \ namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } #define CATCH_REGISTER_LISTENER( listenerType ) \ namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } // #included from: ../internal/catch_xmlwriter.hpp #define TWOBLUECUBES_CATCH_XMLWRITER_HPP_INCLUDED #include #include #include #include namespace Catch { class XmlEncode { public: enum ForWhat { ForTextNodes, ForAttributes }; XmlEncode( std::string const& str, ForWhat forWhat = ForTextNodes ) : m_str( str ), m_forWhat( forWhat ) {} void encodeTo( std::ostream& os ) const { // Apostrophe escaping not necessary if we always use " to write attributes // (see: http://www.w3.org/TR/xml/#syntax) for( std::size_t i = 0; i < m_str.size(); ++ i ) { char c = m_str[i]; switch( c ) { case '<': os << "<"; break; case '&': os << "&"; break; case '>': // See: http://www.w3.org/TR/xml/#syntax if( i > 2 && m_str[i-1] == ']' && m_str[i-2] == ']' ) os << ">"; else os << c; break; case '\"': if( m_forWhat == ForAttributes ) os << """; else os << c; break; default: // Escape control chars - based on contribution by @espenalb in PR #465 and // by @mrpi PR #588 if ( ( c >= 0 && c < '\x09' ) || ( c > '\x0D' && c < '\x20') || c=='\x7F' ) { // see http://stackoverflow.com/questions/404107/why-are-control-characters-illegal-in-xml-1-0 os << "\\x" << std::uppercase << std::hex << std::setfill('0') << std::setw(2) << static_cast( c ); } else os << c; } } } friend std::ostream& operator << ( std::ostream& os, XmlEncode const& xmlEncode ) { xmlEncode.encodeTo( os ); return os; } private: std::string m_str; ForWhat m_forWhat; }; class XmlWriter { public: class ScopedElement { public: ScopedElement( XmlWriter* writer ) : m_writer( writer ) {} ScopedElement( ScopedElement const& other ) : m_writer( other.m_writer ){ other.m_writer = CATCH_NULL; } ~ScopedElement() { if( m_writer ) m_writer->endElement(); } ScopedElement& writeText( std::string const& text, bool indent = true ) { m_writer->writeText( text, indent ); return *this; } template ScopedElement& writeAttribute( std::string const& name, T const& attribute ) { m_writer->writeAttribute( name, attribute ); return *this; } private: mutable XmlWriter* m_writer; }; XmlWriter() : m_tagIsOpen( false ), m_needsNewline( false ), m_os( Catch::cout() ) { writeDeclaration(); } XmlWriter( std::ostream& os ) : m_tagIsOpen( false ), m_needsNewline( false ), m_os( os ) { writeDeclaration(); } ~XmlWriter() { while( !m_tags.empty() ) endElement(); } XmlWriter& startElement( std::string const& name ) { ensureTagClosed(); newlineIfNecessary(); m_os << m_indent << '<' << name; m_tags.push_back( name ); m_indent += " "; m_tagIsOpen = true; return *this; } ScopedElement scopedElement( std::string const& name ) { ScopedElement scoped( this ); startElement( name ); return scoped; } XmlWriter& endElement() { newlineIfNecessary(); m_indent = m_indent.substr( 0, m_indent.size()-2 ); if( m_tagIsOpen ) { m_os << "/>"; m_tagIsOpen = false; } else { m_os << m_indent << ""; } m_os << std::endl; m_tags.pop_back(); return *this; } XmlWriter& writeAttribute( std::string const& name, std::string const& attribute ) { if( !name.empty() && !attribute.empty() ) m_os << ' ' << name << "=\"" << XmlEncode( attribute, XmlEncode::ForAttributes ) << '"'; return *this; } XmlWriter& writeAttribute( std::string const& name, bool attribute ) { m_os << ' ' << name << "=\"" << ( attribute ? "true" : "false" ) << '"'; return *this; } template XmlWriter& writeAttribute( std::string const& name, T const& attribute ) { std::ostringstream oss; oss << attribute; return writeAttribute( name, oss.str() ); } XmlWriter& writeText( std::string const& text, bool indent = true ) { if( !text.empty() ){ bool tagWasOpen = m_tagIsOpen; ensureTagClosed(); if( tagWasOpen && indent ) m_os << m_indent; m_os << XmlEncode( text ); m_needsNewline = true; } return *this; } XmlWriter& writeComment( std::string const& text ) { ensureTagClosed(); m_os << m_indent << ""; m_needsNewline = true; return *this; } void writeStylesheetRef( std::string const& url ) { m_os << "\n"; } XmlWriter& writeBlankLine() { ensureTagClosed(); m_os << '\n'; return *this; } void ensureTagClosed() { if( m_tagIsOpen ) { m_os << ">" << std::endl; m_tagIsOpen = false; } } private: XmlWriter( XmlWriter const& ); void operator=( XmlWriter const& ); void writeDeclaration() { m_os << "\n"; } void newlineIfNecessary() { if( m_needsNewline ) { m_os << std::endl; m_needsNewline = false; } } bool m_tagIsOpen; bool m_needsNewline; std::vector m_tags; std::string m_indent; std::ostream& m_os; }; } namespace Catch { class XmlReporter : public StreamingReporterBase { public: XmlReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ), m_xml(_config.stream()), m_sectionDepth( 0 ) { m_reporterPrefs.shouldRedirectStdOut = true; } virtual ~XmlReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results as an XML document"; } virtual std::string getStylesheetRef() const { return std::string(); } void writeSourceInfo( SourceLineInfo const& sourceInfo ) { m_xml .writeAttribute( "filename", sourceInfo.file ) .writeAttribute( "line", sourceInfo.line ); } public: // StreamingReporterBase virtual void noMatchingTestCases( std::string const& s ) CATCH_OVERRIDE { StreamingReporterBase::noMatchingTestCases( s ); } virtual void testRunStarting( TestRunInfo const& testInfo ) CATCH_OVERRIDE { StreamingReporterBase::testRunStarting( testInfo ); std::string stylesheetRef = getStylesheetRef(); if( !stylesheetRef.empty() ) m_xml.writeStylesheetRef( stylesheetRef ); m_xml.startElement( "Catch" ); if( !m_config->name().empty() ) m_xml.writeAttribute( "name", m_config->name() ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { StreamingReporterBase::testGroupStarting( groupInfo ); m_xml.startElement( "Group" ) .writeAttribute( "name", groupInfo.name ); } virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { StreamingReporterBase::testCaseStarting(testInfo); m_xml.startElement( "TestCase" ) .writeAttribute( "name", trim( testInfo.name ) ) .writeAttribute( "description", testInfo.description ) .writeAttribute( "tags", testInfo.tagsAsString ); writeSourceInfo( testInfo.lineInfo ); if ( m_config->showDurations() == ShowDurations::Always ) m_testCaseTimer.start(); m_xml.ensureTagClosed(); } virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { StreamingReporterBase::sectionStarting( sectionInfo ); if( m_sectionDepth++ > 0 ) { m_xml.startElement( "Section" ) .writeAttribute( "name", trim( sectionInfo.name ) ) .writeAttribute( "description", sectionInfo.description ); writeSourceInfo( sectionInfo.lineInfo ); m_xml.ensureTagClosed(); } } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { } virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { AssertionResult const& result = assertionStats.assertionResult; bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); if( includeResults ) { // Print any info messages in tags. for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); it != itEnd; ++it ) { if( it->type == ResultWas::Info ) { m_xml.scopedElement( "Info" ) .writeText( it->message ); } else if ( it->type == ResultWas::Warning ) { m_xml.scopedElement( "Warning" ) .writeText( it->message ); } } } // Drop out if result was successful but we're not printing them. if( !includeResults && result.getResultType() != ResultWas::Warning ) return true; // Print the expression if there is one. if( result.hasExpression() ) { m_xml.startElement( "Expression" ) .writeAttribute( "success", result.succeeded() ) .writeAttribute( "type", result.getTestMacroName() ); writeSourceInfo( result.getSourceInfo() ); m_xml.scopedElement( "Original" ) .writeText( result.getExpression() ); m_xml.scopedElement( "Expanded" ) .writeText( result.getExpandedExpression() ); } // And... Print a result applicable to each result type. switch( result.getResultType() ) { case ResultWas::ThrewException: m_xml.startElement( "Exception" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; case ResultWas::FatalErrorCondition: m_xml.startElement( "FatalErrorCondition" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; case ResultWas::Info: m_xml.scopedElement( "Info" ) .writeText( result.getMessage() ); break; case ResultWas::Warning: // Warning will already have been written break; case ResultWas::ExplicitFailure: m_xml.startElement( "Failure" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; default: break; } if( result.hasExpression() ) m_xml.endElement(); return true; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { StreamingReporterBase::sectionEnded( sectionStats ); if( --m_sectionDepth > 0 ) { XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResults" ); e.writeAttribute( "successes", sectionStats.assertions.passed ); e.writeAttribute( "failures", sectionStats.assertions.failed ); e.writeAttribute( "expectedFailures", sectionStats.assertions.failedButOk ); if ( m_config->showDurations() == ShowDurations::Always ) e.writeAttribute( "durationInSeconds", sectionStats.durationInSeconds ); m_xml.endElement(); } } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { StreamingReporterBase::testCaseEnded( testCaseStats ); XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResult" ); e.writeAttribute( "success", testCaseStats.totals.assertions.allOk() ); if ( m_config->showDurations() == ShowDurations::Always ) e.writeAttribute( "durationInSeconds", m_testCaseTimer.getElapsedSeconds() ); if( !testCaseStats.stdOut.empty() ) m_xml.scopedElement( "StdOut" ).writeText( trim( testCaseStats.stdOut ), false ); if( !testCaseStats.stdErr.empty() ) m_xml.scopedElement( "StdErr" ).writeText( trim( testCaseStats.stdErr ), false ); m_xml.endElement(); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { StreamingReporterBase::testGroupEnded( testGroupStats ); // TODO: Check testGroupStats.aborting and act accordingly. m_xml.scopedElement( "OverallResults" ) .writeAttribute( "successes", testGroupStats.totals.assertions.passed ) .writeAttribute( "failures", testGroupStats.totals.assertions.failed ) .writeAttribute( "expectedFailures", testGroupStats.totals.assertions.failedButOk ); m_xml.endElement(); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { StreamingReporterBase::testRunEnded( testRunStats ); m_xml.scopedElement( "OverallResults" ) .writeAttribute( "successes", testRunStats.totals.assertions.passed ) .writeAttribute( "failures", testRunStats.totals.assertions.failed ) .writeAttribute( "expectedFailures", testRunStats.totals.assertions.failedButOk ); m_xml.endElement(); } private: Timer m_testCaseTimer; XmlWriter m_xml; int m_sectionDepth; }; INTERNAL_CATCH_REGISTER_REPORTER( "xml", XmlReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_junit.hpp #define TWOBLUECUBES_CATCH_REPORTER_JUNIT_HPP_INCLUDED #include namespace Catch { namespace { std::string getCurrentTimestamp() { // Beware, this is not reentrant because of backward compatibility issues // Also, UTC only, again because of backward compatibility (%z is C++11) time_t rawtime; std::time(&rawtime); const size_t timeStampSize = sizeof("2017-01-16T17:06:45Z"); #ifdef _MSC_VER std::tm timeInfo = {}; gmtime_s(&timeInfo, &rawtime); #else std::tm* timeInfo; timeInfo = std::gmtime(&rawtime); #endif char timeStamp[timeStampSize]; const char * const fmt = "%Y-%m-%dT%H:%M:%SZ"; #ifdef _MSC_VER std::strftime(timeStamp, timeStampSize, fmt, &timeInfo); #else std::strftime(timeStamp, timeStampSize, fmt, timeInfo); #endif return std::string(timeStamp); } } class JunitReporter : public CumulativeReporterBase { public: JunitReporter( ReporterConfig const& _config ) : CumulativeReporterBase( _config ), xml( _config.stream() ), m_okToFail( false ) { m_reporterPrefs.shouldRedirectStdOut = true; } virtual ~JunitReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results in an XML format that looks like Ant's junitreport target"; } virtual void noMatchingTestCases( std::string const& /*spec*/ ) CATCH_OVERRIDE {} virtual void testRunStarting( TestRunInfo const& runInfo ) CATCH_OVERRIDE { CumulativeReporterBase::testRunStarting( runInfo ); xml.startElement( "testsuites" ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { suiteTimer.start(); stdOutForSuite.str(""); stdErrForSuite.str(""); unexpectedExceptions = 0; CumulativeReporterBase::testGroupStarting( groupInfo ); } virtual void testCaseStarting( TestCaseInfo const& testCaseInfo ) CATCH_OVERRIDE { m_okToFail = testCaseInfo.okToFail(); } virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { if( assertionStats.assertionResult.getResultType() == ResultWas::ThrewException && !m_okToFail ) unexpectedExceptions++; return CumulativeReporterBase::assertionEnded( assertionStats ); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { stdOutForSuite << testCaseStats.stdOut; stdErrForSuite << testCaseStats.stdErr; CumulativeReporterBase::testCaseEnded( testCaseStats ); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { double suiteTime = suiteTimer.getElapsedSeconds(); CumulativeReporterBase::testGroupEnded( testGroupStats ); writeGroup( *m_testGroups.back(), suiteTime ); } virtual void testRunEndedCumulative() CATCH_OVERRIDE { xml.endElement(); } void writeGroup( TestGroupNode const& groupNode, double suiteTime ) { XmlWriter::ScopedElement e = xml.scopedElement( "testsuite" ); TestGroupStats const& stats = groupNode.value; xml.writeAttribute( "name", stats.groupInfo.name ); xml.writeAttribute( "errors", unexpectedExceptions ); xml.writeAttribute( "failures", stats.totals.assertions.failed-unexpectedExceptions ); xml.writeAttribute( "tests", stats.totals.assertions.total() ); xml.writeAttribute( "hostname", "tbd" ); // !TBD if( m_config->showDurations() == ShowDurations::Never ) xml.writeAttribute( "time", "" ); else xml.writeAttribute( "time", suiteTime ); xml.writeAttribute( "timestamp", getCurrentTimestamp() ); // Write test cases for( TestGroupNode::ChildNodes::const_iterator it = groupNode.children.begin(), itEnd = groupNode.children.end(); it != itEnd; ++it ) writeTestCase( **it ); xml.scopedElement( "system-out" ).writeText( trim( stdOutForSuite.str() ), false ); xml.scopedElement( "system-err" ).writeText( trim( stdErrForSuite.str() ), false ); } void writeTestCase( TestCaseNode const& testCaseNode ) { TestCaseStats const& stats = testCaseNode.value; // All test cases have exactly one section - which represents the // test case itself. That section may have 0-n nested sections assert( testCaseNode.children.size() == 1 ); SectionNode const& rootSection = *testCaseNode.children.front(); std::string className = stats.testInfo.className; if( className.empty() ) { if( rootSection.childSections.empty() ) className = "global"; } writeSection( className, "", rootSection ); } void writeSection( std::string const& className, std::string const& rootName, SectionNode const& sectionNode ) { std::string name = trim( sectionNode.stats.sectionInfo.name ); if( !rootName.empty() ) name = rootName + '/' + name; if( !sectionNode.assertions.empty() || !sectionNode.stdOut.empty() || !sectionNode.stdErr.empty() ) { XmlWriter::ScopedElement e = xml.scopedElement( "testcase" ); if( className.empty() ) { xml.writeAttribute( "classname", name ); xml.writeAttribute( "name", "root" ); } else { xml.writeAttribute( "classname", className ); xml.writeAttribute( "name", name ); } xml.writeAttribute( "time", Catch::toString( sectionNode.stats.durationInSeconds ) ); writeAssertions( sectionNode ); if( !sectionNode.stdOut.empty() ) xml.scopedElement( "system-out" ).writeText( trim( sectionNode.stdOut ), false ); if( !sectionNode.stdErr.empty() ) xml.scopedElement( "system-err" ).writeText( trim( sectionNode.stdErr ), false ); } for( SectionNode::ChildSections::const_iterator it = sectionNode.childSections.begin(), itEnd = sectionNode.childSections.end(); it != itEnd; ++it ) if( className.empty() ) writeSection( name, "", **it ); else writeSection( className, name, **it ); } void writeAssertions( SectionNode const& sectionNode ) { for( SectionNode::Assertions::const_iterator it = sectionNode.assertions.begin(), itEnd = sectionNode.assertions.end(); it != itEnd; ++it ) writeAssertion( *it ); } void writeAssertion( AssertionStats const& stats ) { AssertionResult const& result = stats.assertionResult; if( !result.isOk() ) { std::string elementName; switch( result.getResultType() ) { case ResultWas::ThrewException: case ResultWas::FatalErrorCondition: elementName = "error"; break; case ResultWas::ExplicitFailure: elementName = "failure"; break; case ResultWas::ExpressionFailed: elementName = "failure"; break; case ResultWas::DidntThrowException: elementName = "failure"; break; // We should never see these here: case ResultWas::Info: case ResultWas::Warning: case ResultWas::Ok: case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: elementName = "internalError"; break; } XmlWriter::ScopedElement e = xml.scopedElement( elementName ); xml.writeAttribute( "message", result.getExpandedExpression() ); xml.writeAttribute( "type", result.getTestMacroName() ); std::ostringstream oss; if( !result.getMessage().empty() ) oss << result.getMessage() << '\n'; for( std::vector::const_iterator it = stats.infoMessages.begin(), itEnd = stats.infoMessages.end(); it != itEnd; ++it ) if( it->type == ResultWas::Info ) oss << it->message << '\n'; oss << "at " << result.getSourceInfo(); xml.writeText( oss.str(), false ); } } XmlWriter xml; Timer suiteTimer; std::ostringstream stdOutForSuite; std::ostringstream stdErrForSuite; unsigned int unexpectedExceptions; bool m_okToFail; }; INTERNAL_CATCH_REGISTER_REPORTER( "junit", JunitReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_console.hpp #define TWOBLUECUBES_CATCH_REPORTER_CONSOLE_HPP_INCLUDED #include #include namespace Catch { struct ConsoleReporter : StreamingReporterBase { ConsoleReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ), m_headerPrinted( false ) {} virtual ~ConsoleReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results as plain lines of text"; } virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { stream << "No test cases matched '" << spec << '\'' << std::endl; } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { } virtual bool assertionEnded( AssertionStats const& _assertionStats ) CATCH_OVERRIDE { AssertionResult const& result = _assertionStats.assertionResult; bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); // Drop out if result was successful but we're not printing them. if( !includeResults && result.getResultType() != ResultWas::Warning ) return false; lazyPrint(); AssertionPrinter printer( stream, _assertionStats, includeResults ); printer.print(); stream << std::endl; return true; } virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { m_headerPrinted = false; StreamingReporterBase::sectionStarting( _sectionInfo ); } virtual void sectionEnded( SectionStats const& _sectionStats ) CATCH_OVERRIDE { if( _sectionStats.missingAssertions ) { lazyPrint(); Colour colour( Colour::ResultError ); if( m_sectionStack.size() > 1 ) stream << "\nNo assertions in section"; else stream << "\nNo assertions in test case"; stream << " '" << _sectionStats.sectionInfo.name << "'\n" << std::endl; } if( m_config->showDurations() == ShowDurations::Always ) { stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; } if( m_headerPrinted ) { m_headerPrinted = false; } StreamingReporterBase::sectionEnded( _sectionStats ); } virtual void testCaseEnded( TestCaseStats const& _testCaseStats ) CATCH_OVERRIDE { StreamingReporterBase::testCaseEnded( _testCaseStats ); m_headerPrinted = false; } virtual void testGroupEnded( TestGroupStats const& _testGroupStats ) CATCH_OVERRIDE { if( currentGroupInfo.used ) { printSummaryDivider(); stream << "Summary for group '" << _testGroupStats.groupInfo.name << "':\n"; printTotals( _testGroupStats.totals ); stream << '\n' << std::endl; } StreamingReporterBase::testGroupEnded( _testGroupStats ); } virtual void testRunEnded( TestRunStats const& _testRunStats ) CATCH_OVERRIDE { printTotalsDivider( _testRunStats.totals ); printTotals( _testRunStats.totals ); stream << std::endl; StreamingReporterBase::testRunEnded( _testRunStats ); } private: class AssertionPrinter { void operator= ( AssertionPrinter const& ); public: AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) : stream( _stream ), stats( _stats ), result( _stats.assertionResult ), colour( Colour::None ), message( result.getMessage() ), messages( _stats.infoMessages ), printInfoMessages( _printInfoMessages ) { switch( result.getResultType() ) { case ResultWas::Ok: colour = Colour::Success; passOrFail = "PASSED"; //if( result.hasMessage() ) if( _stats.infoMessages.size() == 1 ) messageLabel = "with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "with messages"; break; case ResultWas::ExpressionFailed: if( result.isOk() ) { colour = Colour::Success; passOrFail = "FAILED - but was ok"; } else { colour = Colour::Error; passOrFail = "FAILED"; } if( _stats.infoMessages.size() == 1 ) messageLabel = "with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "with messages"; break; case ResultWas::ThrewException: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "due to unexpected exception with "; if (_stats.infoMessages.size() == 1) messageLabel += "message"; if (_stats.infoMessages.size() > 1) messageLabel += "messages"; break; case ResultWas::FatalErrorCondition: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "due to a fatal error condition"; break; case ResultWas::DidntThrowException: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "because no exception was thrown where one was expected"; break; case ResultWas::Info: messageLabel = "info"; break; case ResultWas::Warning: messageLabel = "warning"; break; case ResultWas::ExplicitFailure: passOrFail = "FAILED"; colour = Colour::Error; if( _stats.infoMessages.size() == 1 ) messageLabel = "explicitly with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "explicitly with messages"; break; // These cases are here to prevent compiler warnings case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: passOrFail = "** internal error **"; colour = Colour::Error; break; } } void print() const { printSourceInfo(); if( stats.totals.assertions.total() > 0 ) { if( result.isOk() ) stream << '\n'; printResultType(); printOriginalExpression(); printReconstructedExpression(); } else { stream << '\n'; } printMessage(); } private: void printResultType() const { if( !passOrFail.empty() ) { Colour colourGuard( colour ); stream << passOrFail << ":\n"; } } void printOriginalExpression() const { if( result.hasExpression() ) { Colour colourGuard( Colour::OriginalExpression ); stream << " "; stream << result.getExpressionInMacro(); stream << '\n'; } } void printReconstructedExpression() const { if( result.hasExpandedExpression() ) { stream << "with expansion:\n"; Colour colourGuard( Colour::ReconstructedExpression ); stream << Text( result.getExpandedExpression(), TextAttributes().setIndent(2) ) << '\n'; } } void printMessage() const { if( !messageLabel.empty() ) stream << messageLabel << ':' << '\n'; for( std::vector::const_iterator it = messages.begin(), itEnd = messages.end(); it != itEnd; ++it ) { // If this assertion is a warning ignore any INFO messages if( printInfoMessages || it->type != ResultWas::Info ) stream << Text( it->message, TextAttributes().setIndent(2) ) << '\n'; } } void printSourceInfo() const { Colour colourGuard( Colour::FileName ); stream << result.getSourceInfo() << ": "; } std::ostream& stream; AssertionStats const& stats; AssertionResult const& result; Colour::Code colour; std::string passOrFail; std::string messageLabel; std::string message; std::vector messages; bool printInfoMessages; }; void lazyPrint() { if( !currentTestRunInfo.used ) lazyPrintRunInfo(); if( !currentGroupInfo.used ) lazyPrintGroupInfo(); if( !m_headerPrinted ) { printTestCaseAndSectionHeader(); m_headerPrinted = true; } } void lazyPrintRunInfo() { stream << '\n' << getLineOfChars<'~'>() << '\n'; Colour colour( Colour::SecondaryText ); stream << currentTestRunInfo->name << " is a Catch v" << libraryVersion() << " host application.\n" << "Run with -? for options\n\n"; if( m_config->rngSeed() != 0 ) stream << "Randomness seeded to: " << m_config->rngSeed() << "\n\n"; currentTestRunInfo.used = true; } void lazyPrintGroupInfo() { if( !currentGroupInfo->name.empty() && currentGroupInfo->groupsCounts > 1 ) { printClosedHeader( "Group: " + currentGroupInfo->name ); currentGroupInfo.used = true; } } void printTestCaseAndSectionHeader() { assert( !m_sectionStack.empty() ); printOpenHeader( currentTestCaseInfo->name ); if( m_sectionStack.size() > 1 ) { Colour colourGuard( Colour::Headers ); std::vector::const_iterator it = m_sectionStack.begin()+1, // Skip first section (test case) itEnd = m_sectionStack.end(); for( ; it != itEnd; ++it ) printHeaderString( it->name, 2 ); } SourceLineInfo lineInfo = m_sectionStack.back().lineInfo; if( !lineInfo.empty() ){ stream << getLineOfChars<'-'>() << '\n'; Colour colourGuard( Colour::FileName ); stream << lineInfo << '\n'; } stream << getLineOfChars<'.'>() << '\n' << std::endl; } void printClosedHeader( std::string const& _name ) { printOpenHeader( _name ); stream << getLineOfChars<'.'>() << '\n'; } void printOpenHeader( std::string const& _name ) { stream << getLineOfChars<'-'>() << '\n'; { Colour colourGuard( Colour::Headers ); printHeaderString( _name ); } } // if string has a : in first line will set indent to follow it on // subsequent lines void printHeaderString( std::string const& _string, std::size_t indent = 0 ) { std::size_t i = _string.find( ": " ); if( i != std::string::npos ) i+=2; else i = 0; stream << Text( _string, TextAttributes() .setIndent( indent+i) .setInitialIndent( indent ) ) << '\n'; } struct SummaryColumn { SummaryColumn( std::string const& _label, Colour::Code _colour ) : label( _label ), colour( _colour ) {} SummaryColumn addRow( std::size_t count ) { std::ostringstream oss; oss << count; std::string row = oss.str(); for( std::vector::iterator it = rows.begin(); it != rows.end(); ++it ) { while( it->size() < row.size() ) *it = ' ' + *it; while( it->size() > row.size() ) row = ' ' + row; } rows.push_back( row ); return *this; } std::string label; Colour::Code colour; std::vector rows; }; void printTotals( Totals const& totals ) { if( totals.testCases.total() == 0 ) { stream << Colour( Colour::Warning ) << "No tests ran\n"; } else if( totals.assertions.total() > 0 && totals.testCases.allPassed() ) { stream << Colour( Colour::ResultSuccess ) << "All tests passed"; stream << " (" << pluralise( totals.assertions.passed, "assertion" ) << " in " << pluralise( totals.testCases.passed, "test case" ) << ')' << '\n'; } else { std::vector columns; columns.push_back( SummaryColumn( "", Colour::None ) .addRow( totals.testCases.total() ) .addRow( totals.assertions.total() ) ); columns.push_back( SummaryColumn( "passed", Colour::Success ) .addRow( totals.testCases.passed ) .addRow( totals.assertions.passed ) ); columns.push_back( SummaryColumn( "failed", Colour::ResultError ) .addRow( totals.testCases.failed ) .addRow( totals.assertions.failed ) ); columns.push_back( SummaryColumn( "failed as expected", Colour::ResultExpectedFailure ) .addRow( totals.testCases.failedButOk ) .addRow( totals.assertions.failedButOk ) ); printSummaryRow( "test cases", columns, 0 ); printSummaryRow( "assertions", columns, 1 ); } } void printSummaryRow( std::string const& label, std::vector const& cols, std::size_t row ) { for( std::vector::const_iterator it = cols.begin(); it != cols.end(); ++it ) { std::string value = it->rows[row]; if( it->label.empty() ) { stream << label << ": "; if( value != "0" ) stream << value; else stream << Colour( Colour::Warning ) << "- none -"; } else if( value != "0" ) { stream << Colour( Colour::LightGrey ) << " | "; stream << Colour( it->colour ) << value << ' ' << it->label; } } stream << '\n'; } static std::size_t makeRatio( std::size_t number, std::size_t total ) { std::size_t ratio = total > 0 ? CATCH_CONFIG_CONSOLE_WIDTH * number/ total : 0; return ( ratio == 0 && number > 0 ) ? 1 : ratio; } static std::size_t& findMax( std::size_t& i, std::size_t& j, std::size_t& k ) { if( i > j && i > k ) return i; else if( j > k ) return j; else return k; } void printTotalsDivider( Totals const& totals ) { if( totals.testCases.total() > 0 ) { std::size_t failedRatio = makeRatio( totals.testCases.failed, totals.testCases.total() ); std::size_t failedButOkRatio = makeRatio( totals.testCases.failedButOk, totals.testCases.total() ); std::size_t passedRatio = makeRatio( totals.testCases.passed, totals.testCases.total() ); while( failedRatio + failedButOkRatio + passedRatio < CATCH_CONFIG_CONSOLE_WIDTH-1 ) findMax( failedRatio, failedButOkRatio, passedRatio )++; while( failedRatio + failedButOkRatio + passedRatio > CATCH_CONFIG_CONSOLE_WIDTH-1 ) findMax( failedRatio, failedButOkRatio, passedRatio )--; stream << Colour( Colour::Error ) << std::string( failedRatio, '=' ); stream << Colour( Colour::ResultExpectedFailure ) << std::string( failedButOkRatio, '=' ); if( totals.testCases.allPassed() ) stream << Colour( Colour::ResultSuccess ) << std::string( passedRatio, '=' ); else stream << Colour( Colour::Success ) << std::string( passedRatio, '=' ); } else { stream << Colour( Colour::Warning ) << std::string( CATCH_CONFIG_CONSOLE_WIDTH-1, '=' ); } stream << '\n'; } void printSummaryDivider() { stream << getLineOfChars<'-'>() << '\n'; } private: bool m_headerPrinted; }; INTERNAL_CATCH_REGISTER_REPORTER( "console", ConsoleReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_compact.hpp #define TWOBLUECUBES_CATCH_REPORTER_COMPACT_HPP_INCLUDED namespace Catch { struct CompactReporter : StreamingReporterBase { CompactReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ) {} virtual ~CompactReporter(); static std::string getDescription() { return "Reports test results on a single line, suitable for IDEs"; } virtual ReporterPreferences getPreferences() const { ReporterPreferences prefs; prefs.shouldRedirectStdOut = false; return prefs; } virtual void noMatchingTestCases( std::string const& spec ) { stream << "No test cases matched '" << spec << '\'' << std::endl; } virtual void assertionStarting( AssertionInfo const& ) {} virtual bool assertionEnded( AssertionStats const& _assertionStats ) { AssertionResult const& result = _assertionStats.assertionResult; bool printInfoMessages = true; // Drop out if result was successful and we're not printing those if( !m_config->includeSuccessfulResults() && result.isOk() ) { if( result.getResultType() != ResultWas::Warning ) return false; printInfoMessages = false; } AssertionPrinter printer( stream, _assertionStats, printInfoMessages ); printer.print(); stream << std::endl; return true; } virtual void sectionEnded(SectionStats const& _sectionStats) CATCH_OVERRIDE { if (m_config->showDurations() == ShowDurations::Always) { stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; } } virtual void testRunEnded( TestRunStats const& _testRunStats ) { printTotals( _testRunStats.totals ); stream << '\n' << std::endl; StreamingReporterBase::testRunEnded( _testRunStats ); } private: class AssertionPrinter { void operator= ( AssertionPrinter const& ); public: AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) : stream( _stream ) , stats( _stats ) , result( _stats.assertionResult ) , messages( _stats.infoMessages ) , itMessage( _stats.infoMessages.begin() ) , printInfoMessages( _printInfoMessages ) {} void print() { printSourceInfo(); itMessage = messages.begin(); switch( result.getResultType() ) { case ResultWas::Ok: printResultType( Colour::ResultSuccess, passedString() ); printOriginalExpression(); printReconstructedExpression(); if ( ! result.hasExpression() ) printRemainingMessages( Colour::None ); else printRemainingMessages(); break; case ResultWas::ExpressionFailed: if( result.isOk() ) printResultType( Colour::ResultSuccess, failedString() + std::string( " - but was ok" ) ); else printResultType( Colour::Error, failedString() ); printOriginalExpression(); printReconstructedExpression(); printRemainingMessages(); break; case ResultWas::ThrewException: printResultType( Colour::Error, failedString() ); printIssue( "unexpected exception with message:" ); printMessage(); printExpressionWas(); printRemainingMessages(); break; case ResultWas::FatalErrorCondition: printResultType( Colour::Error, failedString() ); printIssue( "fatal error condition with message:" ); printMessage(); printExpressionWas(); printRemainingMessages(); break; case ResultWas::DidntThrowException: printResultType( Colour::Error, failedString() ); printIssue( "expected exception, got none" ); printExpressionWas(); printRemainingMessages(); break; case ResultWas::Info: printResultType( Colour::None, "info" ); printMessage(); printRemainingMessages(); break; case ResultWas::Warning: printResultType( Colour::None, "warning" ); printMessage(); printRemainingMessages(); break; case ResultWas::ExplicitFailure: printResultType( Colour::Error, failedString() ); printIssue( "explicitly" ); printRemainingMessages( Colour::None ); break; // These cases are here to prevent compiler warnings case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: printResultType( Colour::Error, "** internal error **" ); break; } } private: // Colour::LightGrey static Colour::Code dimColour() { return Colour::FileName; } #ifdef CATCH_PLATFORM_MAC static const char* failedString() { return "FAILED"; } static const char* passedString() { return "PASSED"; } #else static const char* failedString() { return "failed"; } static const char* passedString() { return "passed"; } #endif void printSourceInfo() const { Colour colourGuard( Colour::FileName ); stream << result.getSourceInfo() << ':'; } void printResultType( Colour::Code colour, std::string const& passOrFail ) const { if( !passOrFail.empty() ) { { Colour colourGuard( colour ); stream << ' ' << passOrFail; } stream << ':'; } } void printIssue( std::string const& issue ) const { stream << ' ' << issue; } void printExpressionWas() { if( result.hasExpression() ) { stream << ';'; { Colour colour( dimColour() ); stream << " expression was:"; } printOriginalExpression(); } } void printOriginalExpression() const { if( result.hasExpression() ) { stream << ' ' << result.getExpression(); } } void printReconstructedExpression() const { if( result.hasExpandedExpression() ) { { Colour colour( dimColour() ); stream << " for: "; } stream << result.getExpandedExpression(); } } void printMessage() { if ( itMessage != messages.end() ) { stream << " '" << itMessage->message << '\''; ++itMessage; } } void printRemainingMessages( Colour::Code colour = dimColour() ) { if ( itMessage == messages.end() ) return; // using messages.end() directly yields compilation error: std::vector::const_iterator itEnd = messages.end(); const std::size_t N = static_cast( std::distance( itMessage, itEnd ) ); { Colour colourGuard( colour ); stream << " with " << pluralise( N, "message" ) << ':'; } for(; itMessage != itEnd; ) { // If this assertion is a warning ignore any INFO messages if( printInfoMessages || itMessage->type != ResultWas::Info ) { stream << " '" << itMessage->message << '\''; if ( ++itMessage != itEnd ) { Colour colourGuard( dimColour() ); stream << " and"; } } } } private: std::ostream& stream; AssertionStats const& stats; AssertionResult const& result; std::vector messages; std::vector::const_iterator itMessage; bool printInfoMessages; }; // Colour, message variants: // - white: No tests ran. // - red: Failed [both/all] N test cases, failed [both/all] M assertions. // - white: Passed [both/all] N test cases (no assertions). // - red: Failed N tests cases, failed M assertions. // - green: Passed [both/all] N tests cases with M assertions. std::string bothOrAll( std::size_t count ) const { return count == 1 ? std::string() : count == 2 ? "both " : "all " ; } void printTotals( const Totals& totals ) const { if( totals.testCases.total() == 0 ) { stream << "No tests ran."; } else if( totals.testCases.failed == totals.testCases.total() ) { Colour colour( Colour::ResultError ); const std::string qualify_assertions_failed = totals.assertions.failed == totals.assertions.total() ? bothOrAll( totals.assertions.failed ) : std::string(); stream << "Failed " << bothOrAll( totals.testCases.failed ) << pluralise( totals.testCases.failed, "test case" ) << ", " "failed " << qualify_assertions_failed << pluralise( totals.assertions.failed, "assertion" ) << '.'; } else if( totals.assertions.total() == 0 ) { stream << "Passed " << bothOrAll( totals.testCases.total() ) << pluralise( totals.testCases.total(), "test case" ) << " (no assertions)."; } else if( totals.assertions.failed ) { Colour colour( Colour::ResultError ); stream << "Failed " << pluralise( totals.testCases.failed, "test case" ) << ", " "failed " << pluralise( totals.assertions.failed, "assertion" ) << '.'; } else { Colour colour( Colour::ResultSuccess ); stream << "Passed " << bothOrAll( totals.testCases.passed ) << pluralise( totals.testCases.passed, "test case" ) << " with " << pluralise( totals.assertions.passed, "assertion" ) << '.'; } } }; INTERNAL_CATCH_REGISTER_REPORTER( "compact", CompactReporter ) } // end namespace Catch namespace Catch { // These are all here to avoid warnings about not having any out of line // virtual methods NonCopyable::~NonCopyable() {} IShared::~IShared() {} IStream::~IStream() CATCH_NOEXCEPT {} FileStream::~FileStream() CATCH_NOEXCEPT {} CoutStream::~CoutStream() CATCH_NOEXCEPT {} DebugOutStream::~DebugOutStream() CATCH_NOEXCEPT {} StreamBufBase::~StreamBufBase() CATCH_NOEXCEPT {} IContext::~IContext() {} IResultCapture::~IResultCapture() {} ITestCase::~ITestCase() {} ITestCaseRegistry::~ITestCaseRegistry() {} IRegistryHub::~IRegistryHub() {} IMutableRegistryHub::~IMutableRegistryHub() {} IExceptionTranslator::~IExceptionTranslator() {} IExceptionTranslatorRegistry::~IExceptionTranslatorRegistry() {} IReporter::~IReporter() {} IReporterFactory::~IReporterFactory() {} IReporterRegistry::~IReporterRegistry() {} IStreamingReporter::~IStreamingReporter() {} AssertionStats::~AssertionStats() {} SectionStats::~SectionStats() {} TestCaseStats::~TestCaseStats() {} TestGroupStats::~TestGroupStats() {} TestRunStats::~TestRunStats() {} CumulativeReporterBase::SectionNode::~SectionNode() {} CumulativeReporterBase::~CumulativeReporterBase() {} StreamingReporterBase::~StreamingReporterBase() {} ConsoleReporter::~ConsoleReporter() {} CompactReporter::~CompactReporter() {} IRunner::~IRunner() {} IMutableContext::~IMutableContext() {} IConfig::~IConfig() {} XmlReporter::~XmlReporter() {} JunitReporter::~JunitReporter() {} TestRegistry::~TestRegistry() {} FreeFunctionTestCase::~FreeFunctionTestCase() {} IGeneratorInfo::~IGeneratorInfo() {} IGeneratorsForTest::~IGeneratorsForTest() {} WildcardPattern::~WildcardPattern() {} TestSpec::Pattern::~Pattern() {} TestSpec::NamePattern::~NamePattern() {} TestSpec::TagPattern::~TagPattern() {} TestSpec::ExcludedPattern::~ExcludedPattern() {} Matchers::Impl::MatcherUntypedBase::~MatcherUntypedBase() {} void Config::dummy() {} namespace TestCaseTracking { ITracker::~ITracker() {} TrackerBase::~TrackerBase() {} SectionTracker::~SectionTracker() {} IndexTracker::~IndexTracker() {} } } #ifdef __clang__ # pragma clang diagnostic pop #endif #endif #ifdef CATCH_CONFIG_MAIN // #included from: internal/catch_default_main.hpp #define TWOBLUECUBES_CATCH_DEFAULT_MAIN_HPP_INCLUDED #ifndef __OBJC__ #if defined(WIN32) && defined(_UNICODE) && !defined(DO_NOT_USE_WMAIN) // Standard C/C++ Win32 Unicode wmain entry point extern "C" int wmain (int argc, wchar_t * argv[], wchar_t * []) { #else // Standard C/C++ main entry point int main (int argc, char * argv[]) { #endif int result = Catch::Session().run( argc, argv ); return ( result < 0xff ? result : 0xff ); } #else // __OBJC__ // Objective-C entry point int main (int argc, char * const argv[]) { #if !CATCH_ARC_ENABLED NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; #endif Catch::registerTestMethods(); int result = Catch::Session().run( argc, (char* const*)argv ); #if !CATCH_ARC_ENABLED [pool drain]; #endif return ( result < 0xff ? result : 0xff ); } #endif // __OBJC__ #endif #ifdef CLARA_CONFIG_MAIN_NOT_DEFINED # undef CLARA_CONFIG_MAIN #endif ////// // If this config identifier is defined then all CATCH macros are prefixed with CATCH_ #ifdef CATCH_CONFIG_PREFIX_ALL #if defined(CATCH_CONFIG_FAST_COMPILE) #define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #else #define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #endif #define CATCH_REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) #define CATCH_REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) #define CATCH_REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) #define CATCH_CHECK( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) #define CATCH_CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CATCH_CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CATCH_CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) #define CATCH_CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) #define CATCH_CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) #define CATCH_CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) #if defined(CATCH_CONFIG_FAST_COMPILE) #define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #else #define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #endif #define CATCH_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) #define CATCH_WARN( msg ) INTERNAL_CATCH_MSG( "CATCH_WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) #define CATCH_SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) #define CATCH_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) #define CATCH_SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) #ifdef CATCH_CONFIG_VARIADIC_MACROS #define CATCH_TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) #define CATCH_TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) #define CATCH_METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) #define CATCH_REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) #define CATCH_SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) #define CATCH_FAIL( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) #define CATCH_FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #define CATCH_SUCCEED( ... ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #else #define CATCH_TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) #define CATCH_TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) #define CATCH_METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) #define CATCH_REGISTER_TEST_CASE( function, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( function, name, description ) #define CATCH_SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) #define CATCH_FAIL( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) #define CATCH_FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) #define CATCH_SUCCEED( msg ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) #endif #define CATCH_ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) #define CATCH_REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) #define CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) #define CATCH_GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) // "BDD-style" convenience wrappers #ifdef CATCH_CONFIG_VARIADIC_MACROS #define CATCH_SCENARIO( ... ) CATCH_TEST_CASE( "Scenario: " __VA_ARGS__ ) #define CATCH_SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) #else #define CATCH_SCENARIO( name, tags ) CATCH_TEST_CASE( "Scenario: " name, tags ) #define CATCH_SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) #endif #define CATCH_GIVEN( desc ) CATCH_SECTION( std::string( "Given: ") + desc, "" ) #define CATCH_WHEN( desc ) CATCH_SECTION( std::string( " When: ") + desc, "" ) #define CATCH_AND_WHEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) #define CATCH_THEN( desc ) CATCH_SECTION( std::string( " Then: ") + desc, "" ) #define CATCH_AND_THEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) // If CATCH_CONFIG_PREFIX_ALL is not defined then the CATCH_ prefix is not required #else #if defined(CATCH_CONFIG_FAST_COMPILE) #define REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE", Catch::ResultDisposition::Normal, expr ) #define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #else #define REQUIRE( expr ) INTERNAL_CATCH_TEST( "REQUIRE", Catch::ResultDisposition::Normal, expr ) #define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #endif #define REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) #define REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) #define REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) #define REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) #define CHECK( expr ) INTERNAL_CATCH_TEST( "CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) #define CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) #define CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) #define CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) #define CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) #if defined(CATCH_CONFIG_FAST_COMPILE) #define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #else #define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #endif #define INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) #define WARN( msg ) INTERNAL_CATCH_MSG( "WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) #define SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) #define CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) #define SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) #ifdef CATCH_CONFIG_VARIADIC_MACROS #define TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) #define TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) #define METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) #define REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) #define SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) #define FAIL( ... ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) #define FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #define SUCCEED( ... ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #else #define TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) #define TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) #define METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) #define REGISTER_TEST_CASE( method, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( method, name, description ) #define SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) #define FAIL( msg ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) #define FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) #define SUCCEED( msg ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) #endif #define ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) #define REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) #define REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) #define GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) #endif #define CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) // "BDD-style" convenience wrappers #ifdef CATCH_CONFIG_VARIADIC_MACROS #define SCENARIO( ... ) TEST_CASE( "Scenario: " __VA_ARGS__ ) #define SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) #else #define SCENARIO( name, tags ) TEST_CASE( "Scenario: " name, tags ) #define SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) #endif #define GIVEN( desc ) SECTION( std::string(" Given: ") + desc, "" ) #define WHEN( desc ) SECTION( std::string(" When: ") + desc, "" ) #define AND_WHEN( desc ) SECTION( std::string("And when: ") + desc, "" ) #define THEN( desc ) SECTION( std::string(" Then: ") + desc, "" ) #define AND_THEN( desc ) SECTION( std::string(" And: ") + desc, "" ) using Catch::Detail::Approx; // #included from: internal/catch_reenable_warnings.h #define TWOBLUECUBES_CATCH_REENABLE_WARNINGS_H_INCLUDED #ifdef __clang__ # ifdef __ICC // icpc defines the __clang__ macro # pragma warning(pop) # else # pragma clang diagnostic pop # endif #elif defined __GNUC__ # pragma GCC diagnostic pop #endif #endif // TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED testthat/inst/CITATION0000644000176200001440000000116413216707011014224 0ustar liggesuserscitHeader("To cite the testthat package in publications, use:") citEntry( entry = "Article", author = personList(as.person("Hadley Wickham")), title = "testthat: Get Started with Testing", journal = "The R Journal", year = 2011, volume = 3, pages = "5--10", url = "https://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf", textVersion = paste( "Hadley Wickham. testthat: Get Started with Testing.", "The R Journal, vol. 3, no. 1, pp. 5--10, 2011" ) ) citFooter("As testthat is continually evolving, you may want to cite its version number. Find it with 'help(package=testthat)'.")