testthat/0000755000176200001440000000000014172362302012112 5ustar liggesuserstestthat/NAMESPACE0000644000176200001440000001202114172347334013335 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,testthat_results) S3method(as.expectation,default) S3method(as.expectation,error) S3method(as.expectation,expectation) S3method(as.expectation,skip) S3method(as.expectation,warning) S3method(compare,POSIXt) S3method(compare,character) S3method(compare,default) S3method(compare,numeric) S3method(format,expectation) S3method(format,expectation_success) S3method(format,mismatch_character) S3method(format,mismatch_numeric) S3method(is_informative_error,default) S3method(output_replay,character) S3method(output_replay,error) S3method(output_replay,message) S3method(output_replay,recordedplot) S3method(output_replay,source) S3method(output_replay,warning) S3method(print,comparison) S3method(print,expectation) S3method(print,mismatch_character) S3method(print,mismatch_numeric) S3method(print,testthat_results) S3method(snapshot_replay,character) S3method(snapshot_replay,condition) S3method(snapshot_replay,source) S3method(testthat_print,default) export("%>%") export(CheckReporter) export(CompactProgressReporter) export(DebugReporter) export(FailReporter) export(JunitReporter) export(ListReporter) export(LocationReporter) export(MinimalReporter) export(MultiReporter) export(ParallelProgressReporter) export(ProgressReporter) export(RStudioReporter) export(Reporter) export(SilentReporter) export(StopReporter) export(SummaryReporter) export(TapReporter) export(TeamcityReporter) export(announce_snapshot_file) export(auto_test) export(auto_test_package) export(capture_condition) export(capture_error) export(capture_expectation) export(capture_message) export(capture_messages) export(capture_output) export(capture_output_lines) export(capture_warning) export(capture_warnings) export(check_reporter) export(compare) export(compare_file_binary) export(compare_file_text) export(context) export(context_start_file) export(default_compact_reporter) export(default_parallel_reporter) export(default_reporter) export(describe) export(edition_get) export(equals) export(equals_reference) export(evaluate_promise) export(exp_signal) export(expect) export(expect_condition) export(expect_cpp_tests_pass) export(expect_equal) export(expect_equal_to_reference) export(expect_equivalent) export(expect_error) export(expect_failure) export(expect_false) export(expect_gt) export(expect_gte) export(expect_identical) export(expect_invisible) export(expect_is) export(expect_known_hash) export(expect_known_output) export(expect_known_value) export(expect_length) export(expect_less_than) export(expect_lt) export(expect_lte) export(expect_mapequal) export(expect_match) export(expect_message) export(expect_more_than) export(expect_named) export(expect_no_match) export(expect_null) export(expect_output) export(expect_output_file) export(expect_reference) export(expect_s3_class) export(expect_s4_class) export(expect_setequal) export(expect_silent) export(expect_snapshot) export(expect_snapshot_error) export(expect_snapshot_file) export(expect_snapshot_output) export(expect_snapshot_value) export(expect_snapshot_warning) export(expect_success) export(expect_that) export(expect_true) export(expect_type) export(expect_vector) export(expect_visible) export(expect_warning) export(expectation) export(fail) export(find_test_scripts) export(get_reporter) export(gives_warning) export(has_names) export(is.expectation) export(is_a) export(is_equivalent_to) export(is_false) export(is_identical_to) export(is_informative_error) export(is_less_than) export(is_more_than) export(is_null) export(is_parallel) export(is_testing) export(is_true) export(local_edition) export(local_mock) export(local_reproducible_output) export(local_snapshotter) export(local_test_context) export(local_test_directory) export(make_expectation) export(matches) export(new_expectation) export(not) export(prints_text) export(quasi_label) export(run_cpp_tests) export(set_reporter) export(setup) export(show_failure) export(shows_message) export(skip) export(skip_if) export(skip_if_not) export(skip_if_not_installed) export(skip_if_offline) export(skip_if_translated) export(skip_on_appveyor) export(skip_on_bioc) export(skip_on_ci) export(skip_on_covr) export(skip_on_cran) export(skip_on_os) export(skip_on_travis) export(snapshot_accept) export(snapshot_review) export(source_dir) export(source_file) export(source_test_helpers) export(source_test_setup) export(source_test_teardown) export(succeed) export(takes_less_than) export(teardown) export(teardown_env) export(test_check) export(test_dir) export(test_env) export(test_example) export(test_examples) export(test_file) export(test_local) export(test_package) export(test_path) export(test_rd) export(test_that) export(testing_package) export(testthat_example) export(testthat_examples) export(testthat_print) export(testthat_tolerance) export(throws_error) export(try_again) export(use_catch) export(verify_output) export(watch) export(with_mock) export(with_reporter) import(rlang) importFrom(R6,R6Class) importFrom(brio,readLines) importFrom(brio,writeLines) importFrom(magrittr,"%>%") useDynLib(testthat, .registration = TRUE) testthat/LICENSE0000644000176200001440000000007214164710002013111 0ustar liggesusersYEAR: 2013-2019 COPYRIGHT HOLDER: Hadley Wickham; RStudio testthat/README.md0000644000176200001440000000477314167672266013425 0ustar liggesusers # testthat [![CRAN status](https://www.r-pkg.org/badges/version/testthat)](https://cran.r-project.org/package=testthat) [![R-CMD-check](https://github.com/r-lib/testthat/workflows/R-CMD-check/badge.svg)](https://github.com/r-lib/testthat/actions) [![Codecov test coverage](https://codecov.io/gh/r-lib/testthat/branch/main/graph/badge.svg)](https://app.codecov.io/gh/r-lib/testthat?branch=main) ## Overview Testing your code can be painful and tedious, but it greatly increases the quality of your code. **testthat** tries to make testing as fun as possible, so that you get a visceral satisfaction from writing tests. Testing should be addictive, so you do it all the time. To make that happen, testthat: - Provides functions that make it easy to describe what you expect a function to do, including catching errors, warnings, and messages. - Easily integrates in your existing workflow, whether it’s informal testing on the command line, building test suites, or using R CMD check. - Displays test progress visually, showing a pass, fail, or error for every expectation. If you’re using the terminal or a recent version of RStudio, it’ll even colour the output. testthat draws inspiration from the xUnit family of testing packages, as well as from many of the innovative ruby testing libraries, like [rspec](https://rspec.info/), [testy](https://github.com/ahoward/testy), [bacon](https://github.com/leahneukirchen/bacon) and [cucumber](https://cucumber.io). testthat is the most popular unit testing package for R and is used by thousands of CRAN packages. If you’re not familiar with testthat, the [testing chapter](https://r-pkgs.org/tests.html) in [R packages](https://r-pkgs.org) gives a good overview, along with workflow advice and concrete examples. ## Installation ``` r # Install the released version from CRAN install.packages("testthat") # Or the development version from GitHub: # install.packages("devtools") devtools::install_github("r-lib/testthat") ``` ## Usage The easiest way to get started is with [usethis](https://github.com/r-lib/usethis). Assuming you’re in a package directory, just run `usethis::use_test("name")` to create a test file, and set up all the other infrastructure you need. If you’re using RStudio, press Cmd/Ctrl + Shift + T (or run `devtools::test()` if not) to run all the tests in a package. testthat/man/0000755000176200001440000000000014172347710012673 5ustar liggesuserstestthat/man/testthat-package.Rd0000644000176200001440000000224614164710002016404 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/testthat-package.R \docType{package} \name{testthat-package} \alias{testthat} \alias{testthat-package} \title{An R package to make testing fun!} \description{ Try the example below. Have a look at the references and learn more from function documentation such as \code{\link[=test_that]{test_that()}}. } \section{Options}{ \itemize{ \item \code{testthat.use_colours}: Should the output be coloured? (Default: \code{TRUE}). \item \code{testthat.summary.max_reports}: The maximum number of detailed test reports printed for the summary reporter (default: 10). \item \code{testthat.summary.omit_dots}: Omit progress dots in the summary reporter (default: \code{FALSE}). } } \seealso{ Useful links: \itemize{ \item \url{https://testthat.r-lib.org} \item \url{https://github.com/r-lib/testthat} \item Report bugs at \url{https://github.com/r-lib/testthat/issues} } } \author{ \strong{Maintainer}: Hadley Wickham \email{hadley@rstudio.com} Other contributors: \itemize{ \item RStudio [copyright holder, funder] \item R Core team (Implementation of utils::recover()) [contributor] } } \keyword{internal} testthat/man/local_snapshotter.Rd0000644000176200001440000000070114167646004016706 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-reporter.R \name{local_snapshotter} \alias{local_snapshotter} \title{Instantiate local snapshotting context} \usage{ local_snapshotter( snap_dir = NULL, cleanup = FALSE, fail_on_new = FALSE, .env = parent.frame() ) } \description{ Needed if you want to run snapshot tests outside of the usual testthat framework For expert use only. } \keyword{internal} testthat/man/auto_test.Rd0000644000176200001440000000300614164710002015155 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auto-test.R \name{auto_test} \alias{auto_test} \title{Watches code and tests for changes, rerunning tests as appropriate.} \usage{ auto_test( code_path, test_path, reporter = default_reporter(), env = test_env(), hash = TRUE ) } \arguments{ \item{code_path}{path to directory containing code} \item{test_path}{path to directory containing tests} \item{reporter}{test reporter to use} \item{env}{environment in which to execute test suite.} \item{hash}{Passed on to \code{\link[=watch]{watch()}}. When FALSE, uses less accurate modification time stamps, but those are faster for large files.} } \description{ The idea behind \code{auto_test()} is that you just leave it running while you develop your code. Everytime you save a file it will be automatically tested and you can easily see if your changes have caused any test failures. } \details{ The current strategy for rerunning tests is as follows: \itemize{ \item if any code has changed, then those files are reloaded and all tests rerun \item otherwise, each new or modified test is run } In the future, \code{auto_test()} might implement one of the following more intelligent alternatives: \itemize{ \item Use codetools to build up dependency tree and then rerun tests only when a dependency changes. \item Mimic ruby's autotest and rerun only failing tests until they pass, and then rerun all tests. } } \seealso{ \code{\link[=auto_test_package]{auto_test_package()}} } \keyword{debugging} testthat/man/expect_setequal.Rd0000644000176200001440000000301214164710002016336 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-setequal.R \name{expect_setequal} \alias{expect_setequal} \alias{expect_mapequal} \title{Does code return a vector containing the expected values?} \usage{ expect_setequal(object, expected) expect_mapequal(object, expected) } \arguments{ \item{object}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} } \description{ \itemize{ \item \code{expect_setequal(x, y)} tests that every element of \code{x} occurs in \code{y}, and that every element of \code{y} occurs in \code{x}. \item \code{expect_mapequal(x, y)} tests that \code{x} and \code{y} have the same names, and that \code{x[names(y)]} equals \code{y}. } } \details{ Note that \code{expect_setequal()} ignores names, and you will be warned if both \code{object} and \code{expected} have them. } \examples{ expect_setequal(letters, rev(letters)) show_failure(expect_setequal(letters[-1], rev(letters))) x <- list(b = 2, a = 1) expect_mapequal(x, list(a = 1, b = 2)) show_failure(expect_mapequal(x, list(a = 1))) show_failure(expect_mapequal(x, list(a = 1, b = "x"))) show_failure(expect_mapequal(x, list(a = 1, b = 2, c = 3))) } testthat/man/reporter-accessors.Rd0000644000176200001440000000217514164710002017001 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{reporter-accessors} \alias{reporter-accessors} \alias{set_reporter} \alias{get_reporter} \alias{with_reporter} \title{Get and set active reporter.} \usage{ set_reporter(reporter) get_reporter() with_reporter(reporter, code, start_end_reporter = TRUE) } \arguments{ \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{code}{Code to execute.} \item{start_end_reporter}{Should the reporters \code{start_reporter()} and \code{end_reporter()} methods be called? For expert use only.} } \value{ \code{with_reporter()} invisible returns the reporter active when \code{code} was evaluated. } \description{ \code{get_reporter()} and \code{set_reporter()} access and modify the current "active" reporter. Generally, these functions should not be called directly; instead use \code{with_reporter()} to temporarily change, then reset, the active reporter. } \keyword{internal} testthat/man/context_start_file.Rd0000644000176200001440000000050214164710002017044 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/context.R \name{context_start_file} \alias{context_start_file} \title{Start test context from a file name} \usage{ context_start_file(name) } \arguments{ \item{name}{file name} } \description{ For use in external reporters } \keyword{internal} testthat/man/expect_vector.Rd0000644000176200001440000000231514164710002016022 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-vector.R \name{expect_vector} \alias{expect_vector} \title{Does code return a vector with the expected size and/or prototype?} \usage{ expect_vector(object, ptype = NULL, size = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{ptype}{(Optional) Vector prototype to test against. Should be a size-0 (empty) generalised vector.} \item{size}{(Optional) Size to check for.} } \description{ \code{expect_vector()} is a thin wrapper around \code{\link[vctrs:vec_assert]{vctrs::vec_assert()}}, converting the results of that function in to the expectations used by testthat. This means that it used the vctrs of \code{ptype} (prototype) and \code{size}. See details in \url{https://vctrs.r-lib.org/articles/type-size.html} } \examples{ if (requireNamespace("vctrs") && packageVersion("vctrs") > "0.1.0.9002") { expect_vector(1:10, ptype = integer(), size = 10) show_failure(expect_vector(1:10, ptype = integer(), size = 5)) show_failure(expect_vector(1:10, ptype = character(), size = 5)) } } testthat/man/quasi_label.Rd0000644000176200001440000000331514164710002015432 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quasi-label.R \name{quasi_label} \alias{quasi_label} \title{Quasi-labelling} \usage{ quasi_label(quo, label = NULL, arg = "quo") } \arguments{ \item{quo}{A quosure created by \code{rlang::enquo()}.} \item{label}{An optional label to override the default. This is only provided for internal usage. Modern expectations should not include a \code{label} parameter.} \item{arg}{Argument name shown in error message if \code{quo} is missing.} } \value{ A list containing two elements: \item{val}{The evaluate value of \code{quo}} \item{lab}{The quasiquoted label generated from \code{quo}} } \description{ The first argument to every \code{expect_} function can use unquoting to construct better labels. This makes it easy to create informative labels when expectations are used inside a function or a for loop. \code{quasi_label()} wraps up the details, returning the expression and label. } \section{Limitations}{ Because all \code{expect_} function use unquoting to generate more informative labels, you can not use unquoting for other purposes. Instead, you'll need to perform all other unquoting outside of the expectation and only test the results. } \examples{ f <- function(i) if (i > 3) i * 9 else i * 10 i <- 10 # This sort of expression commonly occurs inside a for loop or function # And the failure isn't helpful because you can't see the value of i # that caused the problem: show_failure(expect_equal(f(i), i * 10)) # To overcome this issue, testthat allows you to unquote expressions using # !!. This causes the failure message to show the value rather than the # variable name show_failure(expect_equal(f(!!i), !!(i * 10))) } \keyword{internal} testthat/man/watch.Rd0000644000176200001440000000203613171137773014275 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{watch} \alias{watch} \title{Watch a directory for changes (additions, deletions & modifications).} \usage{ watch(path, callback, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{character vector of paths to watch. Omit trailing backslash.} \item{callback}{function called everytime a change occurs. It should have three parameters: added, deleted, modified, and should return TRUE to keep watching, or FALSE to stop.} \item{pattern}{file pattern passed to \code{\link[=dir]{dir()}}} \item{hash}{hashes are more accurate at detecting changes, but are slower for large files. When FALSE, uses modification time stamps} } \description{ This is used to power the \code{\link[=auto_test]{auto_test()}} and \code{\link[=auto_test_package]{auto_test_package()}} functions which are used to rerun tests whenever source code changes. } \details{ Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to stop the watcher. } \keyword{internal} testthat/man/auto_test_package.Rd0000644000176200001440000000130714164710002016632 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auto-test.R \name{auto_test_package} \alias{auto_test_package} \title{Watches a package for changes, rerunning tests as appropriate.} \usage{ auto_test_package(pkg = ".", reporter = default_reporter(), hash = TRUE) } \arguments{ \item{pkg}{path to package} \item{reporter}{test reporter to use} \item{hash}{Passed on to \code{\link[=watch]{watch()}}. When FALSE, uses less accurate modification time stamps, but those are faster for large files.} } \description{ Watches a package for changes, rerunning tests as appropriate. } \seealso{ \code{\link[=auto_test]{auto_test()}} for details on how method works } \keyword{debugging} testthat/man/test_that.Rd0000644000176200001440000000225314164710002015150 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-that.R \name{test_that} \alias{test_that} \title{Run a test} \usage{ test_that(desc, code) } \arguments{ \item{desc}{Test name. Names should be brief, but evocative. They are only used by humans, so do you} \item{code}{Test code containing expectations. Braces (\code{{}}) should always be used in order to get accurate location data for test failures.} } \value{ When run interactively, returns \code{invisible(TRUE)} if all tests pass, otherwise throws an error. } \description{ A test encapsulates a series of expectations about a small, self-contained set of functionality. Each test lives in a file and contains multiple expectations, like \code{\link[=expect_equal]{expect_equal()}} or \code{\link[=expect_error]{expect_error()}}. Tests are evaluated in their own environments, and should not affect global state. } \examples{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1 / sqrt(2)) expect_equal(cos(pi / 4), 1 / sqrt(2)) expect_equal(tan(pi / 4), 1) }) \dontrun{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1) }) } } testthat/man/expect_equivalent.Rd0000644000176200001440000000341114164710002016673 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-equality.R \name{expect_equivalent} \alias{expect_equivalent} \title{Is an object equal to the expected value, ignoring attributes?} \usage{ expect_equivalent( object, expected, ..., info = NULL, label = NULL, expected.label = NULL ) } \arguments{ \item{object}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{...}{Passed on to \code{\link[=compare]{compare()}}.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{expected.label}{Used to customise failure messages. For expert use only.} } \description{ Compares \code{object} and \code{expected} using \code{\link[=all.equal]{all.equal()}} and \code{check.attributes = FALSE}. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_equivalent()} is deprecated in the 3rd edition. Instead use \code{expect_equal(ignore_attr = TRUE)}. } \examples{ #' # expect_equivalent() ignores attributes a <- b <- 1:3 names(b) <- letters[1:3] \dontrun{ expect_equal(a, b) } expect_equivalent(a, b) } \keyword{internal} testthat/man/local_edition.Rd0000644000176200001440000000121414164710002015752 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/edition.R \name{local_edition} \alias{local_edition} \alias{edition_get} \title{Temporarily change the active testthat edition} \usage{ local_edition(x, .env = parent.frame()) edition_get() } \arguments{ \item{x}{Edition Should be a single integer.} \item{.env}{Environment that controls scope of changes. For expert use only.} } \description{ \code{local_edition()} allows you to temporarily (within a single test or a single test file) change the active edition of testthat. \code{edition_get()} allows you to retrieve the currently active edition. } \keyword{internal} testthat/man/expect_output_file.Rd0000644000176200001440000000153314164710002017060 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-known.R \name{expect_output_file} \alias{expect_output_file} \title{Expectations: is the output or the value equal to a known good value?} \usage{ expect_output_file( object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80 ) } \description{ \code{expect_output_file()} behaves identically to \code{\link[=expect_known_output]{expect_known_output()}}. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_output_file()} is deprecated in the 3rd edition; please use \code{\link[=expect_snapshot_output]{expect_snapshot_output()}} and friends instead. } \keyword{internal} testthat/man/SummaryReporter.Rd0000644000176200001440000000221214164710002016324 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-summary.R \name{SummaryReporter} \alias{SummaryReporter} \title{Test reporter: summary of errors.} \description{ This is a reporter designed for interactive usage: it lets you know which tests have run successfully and as well as fully reporting information about failures and errors. } \details{ You can use the \code{max_reports} field to control the maximum number of detailed reports produced by this reporter. This is useful when running with \code{\link[=auto_test]{auto_test()}} As an additional benefit, this reporter will praise you from time-to-time if all your tests pass. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/expect_null.Rd0000644000176200001440000000253514164710002015476 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-constant.R \name{expect_null} \alias{expect_null} \title{Does code return \code{NULL}?} \usage{ expect_null(object, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ This is a special case because \code{NULL} is a singleton so it's possible check for it either with \code{expect_equal(x, NULL)} or \code{expect_type(x, "NULL")}. } \examples{ x <- NULL y <- 10 expect_null(x) show_failure(expect_null(y)) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} \keyword{internal} testthat/man/expect_known_output.Rd0000644000176200001440000000702614164710002017300 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-known.R \name{expect_known_output} \alias{expect_known_output} \alias{expect_known_value} \alias{expect_equal_to_reference} \alias{expect_known_hash} \title{Expectations: is the output or the value equal to a known good value?} \usage{ expect_known_output( object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80 ) expect_known_value( object, file, update = TRUE, ..., info = NULL, label = NULL, version = 2 ) expect_known_hash(object, hash = NULL) } \arguments{ \item{object}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{file}{File path where known value/output will be stored.} \item{update}{Should the file be updated? Defaults to \code{TRUE}, with the expectation that you'll notice changes because of the first failure, and then see the modified files in git.} \item{...}{Passed on to \code{\link[waldo:compare]{waldo::compare()}}.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{print}{If \code{TRUE} and the result of evaluating \code{code} is visible, print the result using \code{testthat_print()}.} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} \item{version}{The serialization format version to use. The default, 2, was the default format from R 1.4.0 to 3.5.3. Version 3 became the default from R 3.6.0 and can only be read by R versions 3.5.0 and higher.} \item{hash}{Known hash value. Leave empty and you'll be informed what to use in the test output.} } \description{ For complex printed output and objects, it is often challenging to describe exactly what you expect to see. \code{expect_known_value()} and \code{expect_known_output()} provide a slightly weaker guarantee, simply asserting that the values have not changed since the last time that you ran them. } \details{ These expectations should be used in conjunction with git, as otherwise there is no way to revert to previous values. Git is particularly useful in conjunction with \code{expect_known_output()} as the diffs will show you exactly what has changed. Note that known values updates will only be updated when running tests interactively. \verb{R CMD check} clones the package source so any changes to the reference files will occur in a temporary directory, and will not be synchronised back to the source package. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_known_output()} and friends are deprecated in the 3rd edition; please use \code{\link[=expect_snapshot_output]{expect_snapshot_output()}} and friends instead. } \examples{ tmp <- tempfile() # The first run always succeeds expect_known_output(mtcars[1:10, ], tmp, print = TRUE) # Subsequent runs will succeed only if the file is unchanged # This will succeed: expect_known_output(mtcars[1:10, ], tmp, print = TRUE) \dontrun{ # This will fail expect_known_output(mtcars[1:9, ], tmp, print = TRUE) } } \keyword{internal} testthat/man/expect_named.Rd0000644000176200001440000000366714164710002015617 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-named.R \name{expect_named} \alias{expect_named} \title{Does code return a vector with (given) names?} \usage{ expect_named( object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Character vector of expected names. Leave missing to match any names. Use \code{NULL} to check for absence of names.} \item{ignore.order}{If \code{TRUE}, sorts names before comparing to ignore the effect of order.} \item{ignore.case}{If \code{TRUE}, lowercases all names to ignore the effect of case.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ You can either check for the presence of names (leaving \code{expected} blank), specific names (by supplying a vector of names), or absence of names (with \code{NULL}). } \examples{ x <- c(a = 1, b = 2, c = 3) expect_named(x) expect_named(x, c("a", "b", "c")) # Use options to control sensitivity expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) # Can also check for the absence of names with NULL z <- 1:4 expect_named(z, NULL) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/fail.Rd0000644000176200001440000000206714164710002014067 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{fail} \alias{fail} \alias{succeed} \title{Default expectations that always succeed or fail.} \usage{ fail( message = "Failure has been forced", info = NULL, trace_env = caller_env() ) succeed(message = "Success has been forced", info = NULL) } \arguments{ \item{message}{a string to display.} \item{info}{Character vector continuing additional information. Included for backward compatibility only and new expectations should not use it.} \item{trace_env}{If \code{is.null(trace)}, this is used to automatically generate a traceback running from \code{test_code()}/\code{test_file()} to \code{trace_env}. You'll generally only need to set this if you're wrapping an expectation inside another function.} } \description{ These allow you to manually trigger success or failure. Failure is particularly useful to a pre-condition or mark a test as not yet implemented. } \examples{ \dontrun{ test_that("this test fails", fail()) test_that("this test succeeds", succeed()) } } testthat/man/test_env.Rd0000644000176200001440000000101214164710002014770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-env.R \name{test_env} \alias{test_env} \title{Generate default testing environment.} \usage{ test_env(package = NULL) } \description{ We use a new environment which inherits from \code{\link[=globalenv]{globalenv()}} or a package namespace. In an ideal world, we'd avoid putting the global environment on the search path for tests, but it's not currently possible without losing the ability to load packages in tests. } \keyword{internal} testthat/man/LocationReporter.Rd0000644000176200001440000000157414164710002016451 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-location.R \name{LocationReporter} \alias{LocationReporter} \title{Test reporter: location} \description{ This reporter simply prints the location of every expectation and error. This is useful if you're trying to figure out the source of a segfault, or you want to figure out which code triggers a C/C++ breakpoint } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/StopReporter.Rd0000644000176200001440000000215614164710002015623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-stop.R \name{StopReporter} \alias{StopReporter} \title{Test reporter: stop on error} \description{ The default reporter used when \code{\link[=expect_that]{expect_that()}} is run interactively. It responds by \code{\link[=stop]{stop()}}ping on failures and doing nothing otherwise. This will ensure that a failing test will raise an error. } \details{ This should be used when doing a quick and dirty test, or during the final automated testing of R CMD check. Otherwise, use a reporter that runs all tests and gives you more context about the problem. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/capture_condition.Rd0000644000176200001440000000345314164710002016665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprec-condition.R \name{capture_condition} \alias{capture_condition} \alias{capture_error} \alias{capture_expectation} \alias{capture_message} \alias{capture_warning} \alias{capture_messages} \alias{capture_warnings} \title{Capture conditions, including messages, warnings, expectations, and errors.} \usage{ capture_condition(code, entrace = FALSE) capture_error(code, entrace = FALSE) capture_expectation(code, entrace = FALSE) capture_message(code, entrace = FALSE) capture_warning(code, entrace = FALSE) capture_messages(code) capture_warnings(code) } \arguments{ \item{code}{Code to evaluate} \item{entrace}{Whether to add a \link[rlang:trace_back]{backtrace} to the captured condition.} } \value{ Singular functions (\code{capture_condition}, \code{capture_expectation} etc) return a condition object. \code{capture_messages()} and \code{capture_warnings} return a character vector of message text. } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} These functions allow you to capture the side-effects of a function call including printed output, messages and warnings. We no longer recommend that you use these functions, instead relying on the \code{\link[=expect_message]{expect_message()}} and friends to bubble up unmatched conditions. If you just want to silence unimportant warnings, use \code{\link[=suppressWarnings]{suppressWarnings()}}. } \examples{ f <- function() { message("First") warning("Second") message("Third") } capture_message(f()) capture_messages(f()) capture_warning(f()) capture_warnings(f()) # Condition will capture anything capture_condition(f()) } \keyword{internal} testthat/man/is_informative_error.Rd0000644000176200001440000000170114164710002017375 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprec-condition.R \name{is_informative_error} \alias{is_informative_error} \title{Is an error informative?} \usage{ is_informative_error(x, ...) } \arguments{ \item{x}{An error object.} \item{...}{These dots are for future extensions and must be empty.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{is_informative_error()} is a generic predicate that indicates whether testthat users should explicitly test for an error class. Since we no longer recommend you do that, this generic has been deprecated. } \details{ A few classes are hard-coded as uninformative: \itemize{ \item \code{simpleError} \item \code{rlang_error} unless a subclass is detected \item \code{Rcpp::eval_error} \item \code{Rcpp::exception} } } \keyword{internal} testthat/man/oldskool.Rd0000644000176200001440000000307314164710002015000 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{oldskool} \alias{oldskool} \alias{is_null} \alias{is_a} \alias{is_true} \alias{is_false} \alias{has_names} \alias{is_less_than} \alias{is_more_than} \alias{equals} \alias{is_equivalent_to} \alias{is_identical_to} \alias{equals_reference} \alias{shows_message} \alias{gives_warning} \alias{prints_text} \alias{throws_error} \alias{matches} \title{Old-style expectations.} \usage{ is_null() is_a(class) is_true() is_false() has_names(expected, ignore.order = FALSE, ignore.case = FALSE) is_less_than(expected, label = NULL, ...) is_more_than(expected, label = NULL, ...) equals(expected, label = NULL, ...) is_equivalent_to(expected, label = NULL) is_identical_to(expected, label = NULL) equals_reference(file, label = NULL, ...) shows_message(regexp = NULL, all = FALSE, ...) gives_warning(regexp = NULL, all = FALSE, ...) prints_text(regexp = NULL, ...) throws_error(regexp = NULL, ...) matches(regexp, all = TRUE, ...) } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} Initial testthat used a style of testing that looked like \verb{expect_that(a, equals(b)))} this allowed expectations to read like English sentences, but was verbose and a bit too cutesy. This style will continue to work but has been soft-deprecated - it is no longer documented, and new expectations will only use the new style \code{expect_equal(a, b)}. } \keyword{internal} testthat/man/find_reporter.Rd0000644000176200001440000000077213051613152016021 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{find_reporter} \alias{find_reporter} \title{Find reporter object given name or object.} \usage{ find_reporter(reporter) } \arguments{ \item{reporter}{name of reporter(s), or reporter object(s)} } \description{ If not found, will return informative error message. Pass a character vector to create a \link{MultiReporter} composed of individual reporters. Will return null if given NULL. } \keyword{internal} testthat/man/CheckReporter.Rd0000644000176200001440000000150414164710002015707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-check.R \name{CheckReporter} \alias{CheckReporter} \title{Check reporter: 13 line summary of problems} \description{ \verb{R CMD check} displays only the last 13 lines of the result, so this report is designed to ensure that you see something useful there. } \seealso{ Other reporters: \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/test_package.Rd0000644000176200001440000000514414164710002015605 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-package.R \name{test_package} \alias{test_package} \alias{test_check} \alias{test_local} \title{Run all tests in a package} \usage{ test_package(package, reporter = check_reporter(), ...) test_check(package, reporter = check_reporter(), ...) test_local(path = ".", reporter = NULL, ...) } \arguments{ \item{package}{If these tests belong to a package, the name of the package.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{...}{Additional arguments passed to \code{\link[=test_dir]{test_dir()}}} \item{path}{Path to directory containing tests.} } \value{ A list (invisibly) containing data about the test results. } \description{ \itemize{ \item \code{test_local()} tests a local source package. \item \code{test_package()} tests an installed package. \item \code{test_check()} checks a package during \verb{R CMD check}. } Tests live in \code{tests/testthat}. } \section{\verb{R CMD check}}{ To run testthat automatically from \verb{R CMD check}, make sure you have a \code{tests/testthat.R} that contains:\preformatted{library(testthat) library(yourpackage) test_check("yourpackage") } } \section{Special files}{ There are two types of \code{.R} file that have special behaviour: \itemize{ \item Test files start with \code{test} and are executed in alphabetical order. \item Setup files start with \code{setup} and are executed before tests. If clean up is needed after all tests have been run, you can use \code{withr::defer(clean_up(), teardown_env())}. See \code{vignette("test-fixtures")} for more details. } There are two other types of special file that we no longer recommend using: \itemize{ \item Helper files start with \code{helper} and are executed before tests are run. They're also loaded by \code{devtools::load_all()}, so there's no real point to them and you should just put your helper code in \verb{R/}. \item Teardown files start with \code{teardown} and are executed after the tests are run. Now we recommend interleave setup and cleanup code in \verb{setup-} files, making it easier to check that you automatically clean up every mess that you make. } All other files are ignored by testthat. } \section{Environments}{ Each test is run in a clean environment to keep tests as isolated as possible. For package tests, that environment that inherits from the package's namespace environment, so that tests can access internal functions and objects. } testthat/man/expect_match.Rd0000644000176200001440000000542414164710002015620 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectations-matches.R \name{expect_match} \alias{expect_match} \alias{expect_no_match} \title{Does a string match a regular expression?} \usage{ expect_match( object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL ) expect_no_match( object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against.} \item{perl}{logical. Should Perl-compatible regexps be used?} \item{fixed}{logical. If \code{TRUE}, \code{pattern} is a string to be matched as is. Overrides all conflicting arguments.} \item{...}{ Arguments passed on to \code{\link[base:grep]{base::grepl}} \describe{ \item{\code{ignore.case}}{if \code{FALSE}, the pattern matching is \emph{case sensitive} and if \code{TRUE}, case is ignored during matching.} \item{\code{useBytes}}{logical. If \code{TRUE} the matching is done byte-by-byte rather than character-by-character. See \sQuote{Details}.} }} \item{all}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE).} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ Does a string match a regular expression? } \details{ \code{expect_match()} is a wrapper around \code{\link[=grepl]{grepl()}}. See its documentation for more detail about the individual arguments. \code{expect_no_match()} provides the complementary case, checking that a string \emph{does not} match a regular expression. } \section{Functions}{ \itemize{ \item \code{expect_no_match}: Check that a string doesn't match a regular expression. }} \examples{ expect_match("Testing is fun", "fun") expect_match("Testing is fun", "f.n") expect_no_match("Testing is fun", "horrible") \dontrun{ expect_match("Testing is fun", "horrible") # Zero-length inputs always fail expect_match(character(), ".") } } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} \keyword{internal} testthat/man/MultiReporter.Rd0000644000176200001440000000150114164710002015761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-multi.R \name{MultiReporter} \alias{MultiReporter} \title{Multi reporter: combine several reporters in one.} \description{ This reporter is useful to use several reporters at the same time, e.g. adding a custom reporter without removing the current one. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/not.Rd0000644000176200001440000000066413171137773013774 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{not} \alias{not} \title{Negate an expectation} \usage{ not(f) } \arguments{ \item{f}{an existing expectation function} } \description{ This negates an expectation, making it possible to express that you want the opposite of a standard expectation. This function is deprecated and will be removed in a future version. } \keyword{internal} testthat/man/testthat_examples.Rd0000644000176200001440000000106014164710002016702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/example.R \name{testthat_examples} \alias{testthat_examples} \alias{testthat_example} \title{Retrieve paths to built-in example test files} \usage{ testthat_examples() testthat_example(filename) } \arguments{ \item{filename}{Name of test file} } \description{ \code{testthat_examples()} retrieves path to directory of test files, \code{testthat_example()} retrieves path to a single test file. } \examples{ dir(testthat_examples()) testthat_example("success") } \keyword{internal} testthat/man/test_file.Rd0000644000176200001440000000427414164710002015134 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{test_file} \alias{test_file} \title{Run all tests in a single file} \usage{ test_file(path, reporter = default_compact_reporter(), package = NULL, ...) } \arguments{ \item{path}{Path to file.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{package}{If these tests belong to a package, the name of the package.} \item{...}{Additional parameters passed on to \code{test_dir()}} } \value{ A list (invisibly) containing data about the test results. } \description{ Helper, setup, and teardown files located in the same directory as the test will also be run. } \section{Special files}{ There are two types of \code{.R} file that have special behaviour: \itemize{ \item Test files start with \code{test} and are executed in alphabetical order. \item Setup files start with \code{setup} and are executed before tests. If clean up is needed after all tests have been run, you can use \code{withr::defer(clean_up(), teardown_env())}. See \code{vignette("test-fixtures")} for more details. } There are two other types of special file that we no longer recommend using: \itemize{ \item Helper files start with \code{helper} and are executed before tests are run. They're also loaded by \code{devtools::load_all()}, so there's no real point to them and you should just put your helper code in \verb{R/}. \item Teardown files start with \code{teardown} and are executed after the tests are run. Now we recommend interleave setup and cleanup code in \verb{setup-} files, making it easier to check that you automatically clean up every mess that you make. } All other files are ignored by testthat. } \section{Environments}{ Each test is run in a clean environment to keep tests as isolated as possible. For package tests, that environment that inherits from the package's namespace environment, so that tests can access internal functions and objects. } \examples{ path <- testthat_example("success") test_file(path) test_file(path, reporter = "minimal") } testthat/man/ProgressReporter.Rd0000644000176200001440000000254514164710002016504 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-progress.R \name{ProgressReporter} \alias{ProgressReporter} \alias{CompactProgressReporter} \alias{ParallelProgressReporter} \title{Test reporter: interactive progress bar of errors.} \description{ \code{ProgressReporter} is designed for interactive use. Its goal is to give you actionable insights to help you understand the status of your code. This reporter also praises you from time-to-time if all your tests pass. It's the default reporter for \code{\link[=test_dir]{test_dir()}}. \code{ParallelProgressReporter} is very similar to \code{ProgressReporter}, but works better for packages that want parallel tests. \code{CompactProgressReporter} is a minimal version of \code{ProgressReporter} designed for use with single files. It's the default reporter for \code{\link[=test_file]{test_file()}}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/source_file.Rd0000644000176200001440000000217414164710002015452 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/source.R \name{source_file} \alias{source_file} \alias{source_dir} \alias{source_test_helpers} \alias{source_test_setup} \alias{source_test_teardown} \title{Source a file, directory of files, or various important subsets} \usage{ source_file(path, env = test_env(), chdir = TRUE, wrap = TRUE) source_dir( path, pattern = "\\\\.[rR]$", env = test_env(), chdir = TRUE, wrap = TRUE ) source_test_helpers(path = "tests/testthat", env = test_env()) source_test_setup(path = "tests/testthat", env = test_env()) source_test_teardown(path = "tests/testthat", env = test_env()) } \arguments{ \item{path}{Path to files.} \item{env}{Environment in which to evaluate code.} \item{chdir}{Change working directory to \code{dirname(path)}?} \item{wrap}{Automatically wrap all code within \code{\link[=test_that]{test_that()}}? This ensures that all expectations are reported, even if outside a test block.} \item{pattern}{Regular expression used to filter files.} } \description{ These are used by \code{\link[=test_dir]{test_dir()}} and friends } \keyword{internal} testthat/man/capture_output.Rd0000644000176200001440000000260314164710002016233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/capture-output.R \name{capture_output} \alias{capture_output} \alias{capture_output_lines} \alias{testthat_print} \title{Capture output to console} \usage{ capture_output(code, print = FALSE, width = 80) capture_output_lines(code, print = FALSE, width = 80) testthat_print(x) } \arguments{ \item{code}{Code to evaluate.} \item{print}{If \code{TRUE} and the result of evaluating \code{code} is visible, print the result using \code{testthat_print()}.} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} } \value{ \code{capture_output()} returns a single string. \code{capture_output_lines()} returns a character vector with one entry for each line } \description{ Evaluates \code{code} in a special context in which all output is captured, similar to \code{\link[=capture.output]{capture.output()}}. } \details{ Results are printed using the \code{testthat_print()} generic, which defaults to \code{print()}, giving you the ability to customise the printing of your object in tests, if needed. } \examples{ capture_output({ cat("Hi!\n") cat("Bye\n") }) capture_output_lines({ cat("Hi!\n") cat("Bye\n") }) capture_output("Hi") capture_output("Hi", print = TRUE) } \keyword{internal} testthat/man/DebugReporter.Rd0000644000176200001440000000142714164710002015724 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-debug.R \name{DebugReporter} \alias{DebugReporter} \title{Test reporter: start recovery.} \description{ This reporter will call a modified version of \code{\link[=recover]{recover()}} on all broken expectations. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/test_dir.Rd0000644000176200001440000000673714164710002015001 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{test_dir} \alias{test_dir} \title{Run all tests in a directory} \usage{ test_dir( path, filter = NULL, reporter = NULL, env = NULL, ..., load_helpers = TRUE, stop_on_failure = TRUE, stop_on_warning = FALSE, wrap = lifecycle::deprecated(), package = NULL, load_package = c("none", "installed", "source") ) } \arguments{ \item{path}{Path to directory containing tests.} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching is performed on the file name after it's stripped of \code{"test-"} and \code{".R"}.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{env}{Environment in which to execute the tests. Expert use only.} \item{...}{Additional arguments passed to \code{\link[=grepl]{grepl()}} to control filtering.} \item{load_helpers}{Source helper files before running the tests? See \code{\link[=source_test_helpers]{source_test_helpers()}} for more details.} \item{stop_on_failure}{If \code{TRUE}, throw an error if any tests fail.} \item{stop_on_warning}{If \code{TRUE}, throw an error if any tests generate warnings.} \item{wrap}{DEPRECATED} \item{package}{If these tests belong to a package, the name of the package.} \item{load_package}{Strategy to use for load package code: \itemize{ \item "none", the default, doesn't load the package. \item "installed", uses \code{\link[=library]{library()}} to load an installed package. \item "source", uses \code{\link[pkgload:load_all]{pkgload::load_all()}} to a source package. }} } \value{ A list (invisibly) containing data about the test results. } \description{ This function is the low-level workhorse that powers \code{\link[=test_local]{test_local()}} and \code{\link[=test_package]{test_package()}}. Generally, you should not call this function directly. In particular, you are responsible for ensuring that the functions to test are available in the test \code{env} (e.g. via \code{load_package}). } \section{Special files}{ There are two types of \code{.R} file that have special behaviour: \itemize{ \item Test files start with \code{test} and are executed in alphabetical order. \item Setup files start with \code{setup} and are executed before tests. If clean up is needed after all tests have been run, you can use \code{withr::defer(clean_up(), teardown_env())}. See \code{vignette("test-fixtures")} for more details. } There are two other types of special file that we no longer recommend using: \itemize{ \item Helper files start with \code{helper} and are executed before tests are run. They're also loaded by \code{devtools::load_all()}, so there's no real point to them and you should just put your helper code in \verb{R/}. \item Teardown files start with \code{teardown} and are executed after the tests are run. Now we recommend interleave setup and cleanup code in \verb{setup-} files, making it easier to check that you automatically clean up every mess that you make. } All other files are ignored by testthat. } \section{Environments}{ Each test is run in a clean environment to keep tests as isolated as possible. For package tests, that environment that inherits from the package's namespace environment, so that tests can access internal functions and objects. } \keyword{internal} testthat/man/TeamcityReporter.Rd0000644000176200001440000000157214164710002016456 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-teamcity.R \name{TeamcityReporter} \alias{TeamcityReporter} \title{Test reporter: Teamcity format.} \description{ This reporter will output results in the Teamcity message format. For more information about Teamcity messages, see http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}} } \concept{reporters} testthat/man/safe_digest.Rd0000644000176200001440000000071613051613152015432 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{safe_digest} \alias{safe_digest} \title{Compute a digest of a filename, returning NA if the file doesn't exist.} \usage{ safe_digest(path) } \arguments{ \item{filename}{filename to compute digest on} } \value{ a digest of the file, or NA if it doesn't exist. } \description{ Compute a digest of a filename, returning NA if the file doesn't exist. } \keyword{internal} testthat/man/equality-expectations.Rd0000644000176200001440000000666014164710002017520 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-equality.R \name{equality-expectations} \alias{equality-expectations} \alias{expect_equal} \alias{expect_identical} \title{Does code return the expected value?} \usage{ expect_equal( object, expected, ..., tolerance = if (edition_get() >= 3) testthat_tolerance(), info = NULL, label = NULL, expected.label = NULL ) expect_identical( object, expected, info = NULL, label = NULL, expected.label = NULL, ... ) } \arguments{ \item{object, expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{...}{\strong{3e}: passed on to \code{\link[waldo:compare]{waldo::compare()}}. See its docs to see other ways to control comparison. \strong{2e}: passed on to \code{\link[=compare]{compare()}}/\code{\link[=identical]{identical()}}.} \item{tolerance}{\strong{3e}: passed on to \code{\link[waldo:compare]{waldo::compare()}}. If non-\code{NULL}, will ignore small floating point differences. It uses same algorithm as \code{\link[=all.equal]{all.equal()}} so the tolerance is usually relative (i.e. \verb{mean(abs(x - y) / mean(abs(y)) < tolerance}), except when the differences are very small, when it becomes absolute (i.e. \verb{mean(abs(x - y) < tolerance}). See waldo documentation for more details. \strong{2e}: passed on to \code{\link[=compare]{compare()}}, if set. It's hard to reason about exactly what tolerance means because depending on the precise code path it could be either an absolute or relative tolerance.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label, expected.label}{Used to customise failure messages. For expert use only.} } \description{ These functions provide two levels of strictness when comparing a computation to a reference value. \code{expect_identical()} is the baseline; \code{expect_equal()} relaxes the test to ignore small numeric differences. In the 2nd edition, \code{expect_identical()} uses \code{\link[=identical]{identical()}} and \code{expect_equal} uses \code{\link[=all.equal]{all.equal()}}. In the 3rd edition, both functions use \href{https://github.com/r-lib/waldo}{waldo}. They differ only in that \code{expect_equal()} sets \code{tolerance = testthat_tolerance()} so that small floating point differences are ignored; this also implies that (e.g.) \code{1} and \code{1L} are treated as equal. } \examples{ a <- 10 expect_equal(a, 10) # Use expect_equal() when testing for numeric equality \dontrun{ expect_identical(sqrt(2) ^ 2, 2) } expect_equal(sqrt(2) ^ 2, 2) } \seealso{ \itemize{ \item \code{\link[=expect_setequal]{expect_setequal()}}/\code{\link[=expect_mapequal]{expect_mapequal()}} to test for set equality. \item \code{\link[=expect_reference]{expect_reference()}} to test if two names point to same memory address. } Other expectations: \code{\link{comparison-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/inheritance-expectations.Rd0000644000176200001440000000540714164710002020152 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-inheritance.R \name{inheritance-expectations} \alias{inheritance-expectations} \alias{expect_type} \alias{expect_s3_class} \alias{expect_s4_class} \title{Does code return an object inheriting from the expected base type, S3 class, or S4 class?} \usage{ expect_type(object, type) expect_s3_class(object, class, exact = FALSE) expect_s4_class(object, class) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{type}{String giving base type (as returned by \code{\link[=typeof]{typeof()}}).} \item{class}{Either a character vector of class names, or for \code{expect_s3_class()} and \code{expect_s4_class()}, an \code{NA} to assert that \code{object} isn't an S3 or S4 object.} \item{exact}{If \code{FALSE}, the default, checks that \code{object} inherits from \code{class}. If \code{TRUE}, checks that object has a class that's identical to \code{class}.} } \description{ See \url{https://adv-r.hadley.nz/oo.html} for an overview of R's OO systems, and the vocabulary used here. \itemize{ \item \code{expect_type(x, type)} checks that \code{typeof(x)} is \code{type}. \item \code{expect_s3_class(x, class)} checks that \code{x} is an S3 object that \code{\link[=inherits]{inherits()}} from \code{class} \item \code{expect_s3_class(x, NA)} checks that \code{x} isn't an S3 object. \item \code{expect_s4_class(x, class)} checks that \code{x} is an S4 object that \code{\link[=is]{is()}} \code{class}. \item \code{expect_s4_class(x, NA)} checks that \code{x} isn't an S4 object. } See \code{\link[=expect_vector]{expect_vector()}} for testing properties of objects created by vctrs. } \examples{ x <- data.frame(x = 1:10, y = "x", stringsAsFactors = TRUE) # A data frame is an S3 object with class data.frame expect_s3_class(x, "data.frame") show_failure(expect_s4_class(x, "data.frame")) # A data frame is built from a list: expect_type(x, "list") # An integer vector is an atomic vector of type "integer" expect_type(x$x, "integer") # It is not an S3 object show_failure(expect_s3_class(x$x, "integer")) # Above, we requested data.frame() converts strings to factors: show_failure(expect_type(x$y, "character")) expect_s3_class(x$y, "factor") expect_type(x$y, "integer") } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_is.Rd0000644000176200001440000000264214164710002015136 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-inheritance.R \name{expect_is} \alias{expect_is} \title{Does an object inherit from a given class?} \usage{ expect_is(object, class, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{class}{Either a character vector of class names, or for \code{expect_s3_class()} and \code{expect_s4_class()}, an \code{NA} to assert that \code{object} isn't an S3 or S4 object.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{expect_is()} is an older form that uses \code{\link[=inherits]{inherits()}} without checking whether \code{x} is S3, S4, or neither. Instead, I'd recommend using \code{\link[=expect_type]{expect_type()}}, \code{\link[=expect_s3_class]{expect_s3_class()}} or \code{\link[=expect_s4_class]{expect_s4_class()}} to more clearly convey your intent. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_is()} is formally deprecated in the 3rd edition. } \keyword{internal} testthat/man/snapshot_accept.Rd0000644000176200001440000000157514166627056016357 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-manage.R \name{snapshot_accept} \alias{snapshot_accept} \alias{snapshot_review} \title{Snapshot management} \usage{ snapshot_accept(files = NULL, path = "tests/testthat") snapshot_review(files = NULL, path = "tests/testthat") } \arguments{ \item{files}{Optionally, filter effects to snapshots from specified files. This can be a snapshot name (e.g. \code{foo} or \code{foo.md}), a snapshot file name (e.g. \code{testfile/foo.txt}), or a snapshot file directory (e.g. \verb{testfile/}).} \item{path}{Path to tests.} } \description{ \itemize{ \item \code{snapshot_accept()} accepts all modified snapshots. \item \code{snapshot_review()} opens a Shiny app that shows a visual diff of each modified snapshot. This is particularly useful for whole file snapshots created by \code{expect_snapshot_file()}. } } testthat/man/expect_success.Rd0000644000176200001440000000133214164710002016166 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-self-test.R \name{expect_success} \alias{expect_success} \alias{expect_failure} \alias{show_failure} \title{Tools for testing expectations} \usage{ expect_success(expr) expect_failure(expr, message = NULL, ...) show_failure(expr) } \arguments{ \item{expr}{Expression that evaluates a single expectation.} \item{message}{Check that the failure message matches this regexp.} \item{...}{Other arguments passed on to \code{\link[=expect_match]{expect_match()}}.} } \description{ Use these expectations to test other expectations. Use \code{show_failure()} in examples to print the failure message without throwing an error. } \keyword{internal} testthat/man/expect_less_than.Rd0000644000176200001440000000104614164710002016500 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-comparison.R \name{expect_less_than} \alias{expect_less_than} \alias{expect_more_than} \title{Deprecated numeric comparison functions} \usage{ expect_less_than(...) expect_more_than(...) } \arguments{ \item{...}{All arguments passed on to \code{expect_lt()}/\code{expect_gt()}.} } \description{ These functions have been deprecated in favour of the more concise \code{\link[=expect_gt]{expect_gt()}} and \code{\link[=expect_lt]{expect_lt()}}. } \keyword{internal} testthat/man/MinimalReporter.Rd0000644000176200001440000000167314164710002016267 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-minimal.R \name{MinimalReporter} \alias{MinimalReporter} \title{Test reporter: minimal.} \description{ The minimal test reporter provides the absolutely minimum amount of information: whether each expectation has succeeded, failed or experienced an error. If you want to find out what the failures and errors actually were, you'll need to run a more informative test reporter. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/teardown_env.Rd0000644000176200001440000000077714164710002015655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{teardown_env} \alias{teardown_env} \title{Run code after all test files} \usage{ teardown_env() } \description{ This environment has no purpose other than as a handle for \code{\link[withr:defer]{withr::defer()}}: use it when you want to run code after all tests have been run. Typically, you'll use \code{withr::defer(cleanup(), teardown_env())} immediately after you've made a mess in a \verb{setup-*.R} file. } testthat/man/default_reporter.Rd0000644000176200001440000000157314164710002016523 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter.R \name{default_reporter} \alias{default_reporter} \alias{default_parallel_reporter} \alias{default_compact_reporter} \alias{check_reporter} \title{Retrieve the default reporter} \usage{ default_reporter() default_parallel_reporter() default_compact_reporter() check_reporter() } \description{ The defaults are: \itemize{ \item \link{ProgressReporter} for interactive, non-parallel; override with \code{testthat.default_reporter} \item \link{ParallelProgressReporter} for interactive, parallel packages; override with \code{testthat.default_parallel_reporter} \item \link{CompactProgressReporter} for single-file interactive; override with \code{testthat.default_compact_reporter} \item \link{CheckReporter} for R CMD check; override with \code{testthat.default_check_reporter} } } \keyword{internal} testthat/man/FailReporter.Rd0000644000176200001440000000147214164710002015551 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-fail.R \name{FailReporter} \alias{FailReporter} \title{Test reporter: fail at end.} \description{ This reporter will simply throw an error if any of the tests failed. It is best combined with another reporter, such as the \link{SummaryReporter}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/expect_that.Rd0000644000176200001440000000317614164710002015466 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{expect_that} \alias{expect_that} \title{Expect that a condition holds.} \usage{ expect_that(object, condition, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{condition, }{a function that returns whether or not the condition is met, and if not, an error message to display.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ the (internal) expectation result as an invisible list } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} An old style of testing that's no longer encouraged. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} This style of testing is formally deprecated as of the 3rd edition. Use a more specific \code{expect_} function instead. } \examples{ expect_that(5 * 2, equals(10)) expect_that(sqrt(2) ^ 2, equals(2)) \dontrun{ expect_that(sqrt(2) ^ 2, is_identical_to(2)) } } \seealso{ \code{\link[=fail]{fail()}} for an expectation that always fails. } \keyword{internal} testthat/man/use_catch.Rd0000644000176200001440000001162014164710002015105 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-compiled-code.R \name{use_catch} \alias{use_catch} \title{Use Catch for C++ Unit Testing} \usage{ use_catch(dir = getwd()) } \arguments{ \item{dir}{The directory containing an \R package.} } \description{ Add the necessary infrastructure to enable C++ unit testing in \R packages with \href{https://github.com/catchorg/Catch2}{Catch} and \code{testthat}. } \details{ Calling \code{use_catch()} will: \enumerate{ \item Create a file \code{src/test-runner.cpp}, which ensures that the \code{testthat} package will understand how to run your package's unit tests, \item Create an example test file \code{src/test-example.cpp}, which showcases how you might use Catch to write a unit test, \item Add a test file \code{tests/testthat/test-cpp.R}, which ensures that \code{testthat} will run your compiled tests during invocations of \code{devtools::test()} or \verb{R CMD check}, and \item Create a file \code{R/catch-routine-registration.R}, which ensures that \R will automatically register this routine when \code{tools::package_native_routine_registration_skeleton()} is invoked. } You will also need to: \itemize{ \item Add xml2 to Suggests, with e.g. \code{usethis::use_package("xml2", "Suggests")} \item Add testthat to LinkingTo, with e.g. \code{usethis::use_package("testthat", "LinkingTo")} } C++ unit tests can be added to C++ source files within the \code{src} directory of your package, with a format similar to \R code tested with \code{testthat}. Here's a simple example of a unit test written with \code{testthat} + Catch: \preformatted{ context("C++ Unit Test") { test_that("two plus two is four") { int result = 2 + 2; expect_true(result == 4); } } } When your package is compiled, unit tests alongside a harness for running these tests will be compiled into your \R package, with the C entry point \code{run_testthat_tests()}. \code{testthat} will use that entry point to run your unit tests when detected. } \section{Functions}{ All of the functions provided by Catch are available with the \code{CATCH_} prefix -- see \href{https://github.com/catchorg/Catch2/blob/master/docs/assertions.md}{here} for a full list. \code{testthat} provides the following wrappers, to conform with \code{testthat}'s \R interface: \tabular{lll}{ \strong{Function} \tab \strong{Catch} \tab \strong{Description} \cr \code{context} \tab \code{CATCH_TEST_CASE} \tab The context of a set of tests. \cr \code{test_that} \tab \code{CATCH_SECTION} \tab A test section. \cr \code{expect_true} \tab \code{CATCH_CHECK} \tab Test that an expression evaluates to \code{true}. \cr \code{expect_false} \tab \code{CATCH_CHECK_FALSE} \tab Test that an expression evalutes to \code{false}. \cr \code{expect_error} \tab \code{CATCH_CHECK_THROWS} \tab Test that evaluation of an expression throws an exception. \cr \code{expect_error_as} \tab \code{CATCH_CHECK_THROWS_AS} \tab Test that evaluation of an expression throws an exception of a specific class. \cr } In general, you should prefer using the \code{testthat} wrappers, as \code{testthat} also does some work to ensure that any unit tests within will not be compiled or run when using the Solaris Studio compilers (as these are currently unsupported by Catch). This should make it easier to submit packages to CRAN that use Catch. } \section{Symbol Registration}{ If you've opted to disable dynamic symbol lookup in your package, then you'll need to explicitly export a symbol in your package that \code{testthat} can use to run your unit tests. \code{testthat} will look for a routine with one of the names: \preformatted{ C_run_testthat_tests c_run_testthat_tests run_testthat_tests } See \href{https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Controlling-visibility}{Controlling Visibility} and \href{https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-symbols}{Registering Symbols} in the \strong{Writing R Extensions} manual for more information. } \section{Advanced Usage}{ If you'd like to write your own Catch test runner, you can instead use the \code{testthat::catchSession()} object in a file with the form: \preformatted{ #define TESTTHAT_TEST_RUNNER #include void run() { Catch::Session& session = testthat::catchSession(); // interact with the session object as desired } } This can be useful if you'd like to run your unit tests with custom arguments passed to the Catch session. } \section{Standalone Usage}{ If you'd like to use the C++ unit testing facilities provided by Catch, but would prefer not to use the regular \code{testthat} \R testing infrastructure, you can manually run the unit tests by inserting a call to: \preformatted{ .Call("run_testthat_tests", PACKAGE = ) } as necessary within your unit test suite. } \seealso{ \href{https://github.com/catchorg/Catch2/blob/master/docs/assertions.md}{Catch}, the library used to enable C++ unit testing. } testthat/man/RStudioReporter.Rd0000644000176200001440000000140714164710002016265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-rstudio.R \name{RStudioReporter} \alias{RStudioReporter} \title{Test reporter: RStudio} \description{ This reporter is designed for output to RStudio. It produces results in any easily parsed form. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/comparison-expectations.Rd0000644000176200001440000000314314164710002020026 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-comparison.R \name{comparison-expectations} \alias{comparison-expectations} \alias{expect_lt} \alias{expect_lte} \alias{expect_gt} \alias{expect_gte} \title{Does code return a number greater/less than the expected value?} \usage{ expect_lt(object, expected, label = NULL, expected.label = NULL) expect_lte(object, expected, label = NULL, expected.label = NULL) expect_gt(object, expected, label = NULL, expected.label = NULL) expect_gte(object, expected, label = NULL, expected.label = NULL) } \arguments{ \item{object}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Single numeric value to compare.} \item{label}{Used to customise failure messages. For expert use only.} \item{expected.label}{Used to customise failure messages. For expert use only.} } \description{ Does code return a number greater/less than the expected value? } \examples{ a <- 9 expect_lt(a, 10) \dontrun{ expect_lt(11, 10) } a <- 11 expect_gt(a, 10) \dontrun{ expect_gt(9, 10) } } \seealso{ Other expectations: \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/with_mock.Rd0000644000176200001440000000414314164710002015135 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mock.R \name{with_mock} \alias{with_mock} \alias{local_mock} \title{Mock functions in a package.} \usage{ with_mock(..., .env = topenv()) local_mock(..., .env = topenv(), .local_envir = parent.frame()) } \arguments{ \item{...}{named parameters redefine mocked functions, unnamed parameters will be evaluated after mocking the functions} \item{.env}{the environment in which to patch the functions, defaults to the top-level environment. A character is interpreted as package name.} \item{.local_env}{Environment in which to add exit hander. For expert use only.} } \value{ The result of the last unnamed parameter } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{with_mock()} and \code{local_mock()} are superseded in favour of the more rigorous techniques found in the \href{https://krlmlr.github.io/mockr/}{mockr} and \href{https://github.com/r-lib/mockery#mockery}{mockery} packages. Mocking allows you to temporary replace the implementation of functions within a package, which useful for testing code that relies on functions that are slow, have unintended side effects or access resources that may not be available when testing. This works by using some C code to temporarily modify the mocked function \emph{in place}. On exit, all functions are restored to their previous state. This is somewhat abusive of R's internals so use with care. In particular, functions in base packages cannot be mocked; to work aroud you'll need to make a wrapper function in your own package.. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{with_mock()} and \code{local_mock()} are deprecated in the third edition. } \references{ Suraj Gupta (2012): \href{http://blog.obeautifulcode.com/R/How-R-Searches-And-Finds-Stuff/}{How R Searches And Finds Stuff} } \keyword{internal} testthat/man/figures/0000755000176200001440000000000014164710002014324 5ustar liggesuserstestthat/man/figures/lifecycle-defunct.svg0000644000176200001440000000170414164710002020434 0ustar liggesuserslifecyclelifecycledefunctdefunct testthat/man/figures/lifecycle-maturing.svg0000644000176200001440000000170614164710002020634 0ustar liggesuserslifecyclelifecyclematuringmaturing testthat/man/figures/logo.png0000644000176200001440000003600313717012042015776 0ustar liggesusersPNG  IHDRxb]esRGB pHYs  iTXtXML:com.adobe.xmp Adobe ImageReady 1 ).=9IDATx} |\U{LM4ItP AED! " -%-ZPThQ (RUtI&i&;L2s?Ϲsdnrsg=sy/(|$ueIْNr>L{%zur߶)[u ;ܽ3NՑD׏cx}Âvp!4%ղ*@ګL܇D,nX_pn(dt;N(`YƊS?`錓ݻ`B䠠8dں4˜myhqq很}}P# X]Z]P`NW͝o GDq1-lFu#Ve.yY9Sמհ6߾"N|AƔ c*ũ]n8{Rmc?JǟD|OI8,v˟ &uBoP+:+Y Xǭ[&09G/Bbd7kmiƞʖLQ8 { M= qt4;Aqc;yxtV^m^۾{-l`0D~!BZˎ-B@? 6#[iZzeSMO+9Jds.k*M ;tev.D1r)iH1"ȓ&(S <_?!1Ny kF+Cǜ~Z;LGnŜ4^l{5K ,JKKbUgGΟ K]X]׏E<u Uu2q?) MY}_ V8oj'.W?+]nA?۽G{%-X`P>ͱo͝0ףd̬6[IF>zޟtF7uvl4hhj .?]VmtEU=S21/]c4gl`Zb =ey@>ptqya/ϓ籑,c`XXPbO7\"2O=P $Af)gL~vqg%>TsWi>^A8t:(;*Y$$Ү~Ɵ< lӪ.0;=1`Mr ەZ b@¹~P4 ~wtnLvXr؉ߩy)ʣf}L$#2N2zOaɨ~aIr==ak٥;656Pn\rT5lun72{!$2 1d V'$pmkƦ$?hn Ԃ~гHysMk`/GQԇ܏aI6E.gArr{!y68WxImc{*Ig7ϝ>_dKjOܛL} g-gSǽ*L?qSLG?tOX2);*v"| p]ieESeJ=a5\jD x$?ίl]F?LIUb&s;8*]ר{@w+4 C۰$ܞ[{@x@ ՅK(ԅ:jYm@m8fX{*̨P**CS@k’p{6@lD96s{Vp6RqPvaʲa~RTEz,>Q (=/}’@A%.,麿^z֭ڐ T*(~8CB]v^G(Hu| n~Y {HPaI+W;7)}5n y&2Cs-ҿ#}oZ7fIvT9T@$Wcca_TS{v8WeeXcW1m5Dm }x+LO3K؏n&sx']Tˡ~S@ Q"mK4ڤoXXę`lYaH0)`l ;T ۅkzgҐ ֋m'@@sQj f{[k BNGNRJ`c۪!eމz;,pjo ˍ=T@җHA|4#>LV&\v\8B" 8qX q!n VyrB{֏}02R!S TM?$A=H{9қqr&*U.`F|P4ZO#\H j6/o+-lr#Aƍ"f8G4ج/볰BPG0$LCK.pjBEbDL4=̊bG T4Ͻ1u 676N Wv` йLl5(Ib\54x`e MQYvLuaqŴW쌗J=X{:g sxَy]zm Fhr6x**bE,7`k/iCb]]]_z*<8n׻}PZ{0;κ6:M6PCJ)SdSRœ漹n fmCL- +lϱlgXAibe23y+UpX3p B@sP$sD}I*D,E݂HAG(eiDtַ6.%`r2TWz( x:F*0g\_^>t ky}H.J`dɻSڷI Jp aC2\_t]0r(˰K9i"]'^4q."bˇ& b*l> ZjϗNn3 pnv˵Mǩ%Jkb z,d\q5~KCN2T#0="1`Tٯz:qb8I޳/*ؔcJjW`{ Q,LPnr "Fłž`CeA{hD^4E<"sZ4$/N^AbTUʮ' / j<*-'QP-|tڦ% ",e[Z%}99{߽bB)nx6J60beIa_#n4@f'Ϝz&Q m',HKr69ڌ5E]:#p#+a*Cݘ>DZZʚ(?Gߨt?3wA|'yRbMLRwOp:Zx >:0obbwgR$ఙhCӫ/~Xri9,k1!"DG]PGyXƜj*b(R%yG^2 R "R>Gλ-Z|FOt>.ΕzQlnD ,ti#x raNcB+c D D.D-X+[.5cF[LTr|#a&@T|]cTxEܭ,N[{d,8VRο@5*Dž:AÆ;q2~ȅ<{21omG-;; 7J C_5kWmZ?ZlRhuRsW_Jwel͛mͿ|㠮ht)h2mma@t}C CR:ɤ b>B p#G4]χשQ">,)pNLb*v% 1sŘ {&ne=xfn-.)riboo|K|Ӿ9%}|Rgn r%څ HM Bֈ] DIYFxobmƞOd^W^^8xz8,l?,0)Œ}G%$܏K}Ï <$I=0;&H'gjDvaZV;B[S-iW])$P/-CBJAP488(nYRL& )$(*d ,۠ ĜR ul}#q}X;6or3ngi(;˥_q':keeAg-xBuI2+WI (t72e@ lp]8羟_6 ʌͦVl[rfHEWNnƄ)!F@+ &G8mHte"@VA 021z|ӓ(Jd{N0\9-cz-a}I}1bDtL` ~LBI_b,uEZSx|כ>7IPϵBbAw*&[Kj NgnQ,F€HWa,S&&GLгet"z1rD**N5Tx) "i?v.8zwt?@*C!Q9AuJ  :Bxsܣ1/YQ``WҀFUa1PN߄Cd8(.PQڢŒqiHWux ʻF%"9}S 5t$' 9W5BG _/Ȩe׮VYLpk`A邹9H'B: *F-h+Wޞ% aI4^ץX1>1bG1gaÁ bnx"X-BMЬ) ~18$8I- bT[)x BbnGí8hlq6lAHNɪ?sI+&.Rq9w-^ߎWæ o@mJZ D.J1JՔ1yALwʚQ&ƓxSxO"wAx̸2$,2(/)\."Nʃ9m*٢.4-hl\#A>$>tTPހ7bm1e1Nbe͟#N/,TƙjLAX##pO8lStIbFV_qLJU0Pt۲X]rd`Wz€˒7IV)HtσJ/"H͏UZ/[Id֡ IuߐEdjԁ{1M{%t<qڴ>q)=ò&)hLy}ӎ;~vK%$PP+&ܘ+[D`r7Eo!GI.pުOЋe\#%ݢާܘߕm %#X ΍t rpb"yϮ 'jw6Rn|Uη7"Bк6m<4 AbIO.zg+&/nJ\${_BSJ#P["L@*L022̃ v ep< IW_%[[#Latu*ߚs4ĦZ0 ӄϮh$U5TpsO>)2=Z[>WzzR,82ysqbCN@Y,$hq8:]3Gz$уd#- "AGwхd;]xOޛ[?FΗ6a~lD=ȔIJm_~\]bA>)QFGj<`W4YOWF-OU{K@L* S5=٭WUHlp.LE>DO㸋̋Ə#W^c`~ ipWkR1b̩baTJŞi+|ek[| ҠTo`s51M޵"vPꇳa7?Osc!Ș[5N$h @;x91fK ҧGS\x&b\m7k}sd!< Ԅ?8/8 KPua2nM]g]{H>R-|F]6ppܦ_$p8fUT]}&z\@~{` 1YһvD0G7(\ E<P: G~{(+O+V7iSb8s ݷe2:գ=Uu^/'j?@l29*m?.Y,DdiVJTs0i5s6PChtuvxVI /<$yFȯyE׫U.1 H%4r_NL/G7ދ\SH5Xt.b=w"ĿA¿yp=:? _%ٟ9NļF_вHjs_E p #$[y ŽGh߱E?ʺDpWaI2Аäx^Pث+_iHG:}ȋ"9F05Wo66f1bh/<+VLɂ`.8XWŏzS0{i?J1U!q"p3BAu! 1R+$aO3KK fd-b=E&?fx=:b`_ȷ[*kXRU|p T}I &=X ߋs'HahL :1WB.5^z)fvn5r g}T/NYpebq CuKУQ0Qls{F'{2{/k|.ꯉS A57́{L`)GO`0X/ZL|iUv~`X2?NmKs&K+jd*`F *6} HpPi}3BΐBK 0o2nxwSoxQ"BSu_m꼋/U!@8މ" *ce.WgJ]kLV-p^[0 9a, Zkƛ%K89= XCCWozwz0xMnq/ik+D~0|E;:C"ۯes%e9+(PAϽ \NX?>|%VTʕWoZ/ t*wZl`8x,lm֗Ӏq+z8oW$>%nL^ UýzdW0z B <D \җ vPEQi!ϙt]?bzM F@Ħ3"r-*QG rf,\ S7 "$ ,%7莟'0븏ƿ7qZrEq߂+֭#ԩs ׼%`!"!e7OBnhI!,X~#YW~KqpQM#8- >W6?ESeSNfgV"i ~ ,iHPh-.J ٕmB[7T]8g,>:X|=u+hEɴ b;rd xqY&^AO";Y4ӴB\u[.Tݻ`XI>KOų'vEO_*s-"ksM'Stc! 9^#A~;jcY ]x?j^bvuvR<ҷ 0}󃥥8qkP3;.Q-Q`zz}F7`g[A%WJڛ]\j% H)"({X_rJGSjoq{JQU'Ϛjk=K"9#(J1\;j2jib aW7cB"|[I9WgUD+Dl6Zot5MD .xg yvDit#<:JXK;.Tuu5Ngyn46 cӪL)r _2t`7ܘ~.M*2MgFEk+iGN>?kPV?,E܋ΝgqlE5Ubc{ҳ3X_U{脱:N\> wBgjhA$G-.D\g@IV%͆gwE[{|J#{5is7^\Sy: =Y `ftZeTD_R)8Hg )r{Tj~$C^S0̞N>x/Zq=WqtDR$;Ԏ z:FCx·OO~i͕jzwl*RNlڃ{N1>[WyWzq{'x>ӪSN@ZfEQ=baDT1ʐ"t+Y~jIܻXHПqs91G|HJ01тҳt/bN+] p5UO-N Yٟtu'* ~NA89=pg3:e:5?JĊF1#kC ^Z^#OT\k FNڼ"qr[81 aQkWNsP@P0촣~f0,7L-4DaIb.ߦ$E2^t p4[/#lm/q(RZPQ&T~6nL<u/zV CvuK_o5\Y}’JX.t)>sbO?#u>-'H\,<, 03a<}_X[l=ih?b@~^9u\w)2>%h8p8baI4@"k %;- R$s 0"gtܷ]^|q?>a<~2UXVp3 KNLZ^gWd,OU L! @noJý+g_cJX6n| -ڬФm06#aɲar.0Dio҆9$MvGP &}M/ȼ90 .zKt-LJ"^Z %g ,ÈQX?m~~dBXK鼊>Y8J>CZTS(4VmC0~erd| \58X7Չ,u?&ۋˤi>pAUT3U/O›to=10;G?(.aȩ’C$+ܯ0p˜h3_ kp4aIuIGŢins6B>WkmH3맍iC[d3rj6u V~C2>cT4}9iٴYoi*ķA10f>c*.}wxy{?\ެzR ߊ%=sΔ ~!O&Doٞ"x'== ㉻EUUmaG[ϲ1 p K2+:HI} -̕Het<Կl+!]@0=\%0a險g?;T`Dc`E (aILK QYaXX/WnO,rd% V8*/(!\Sj½6+ 8LN{Rkph RU>s'㱱Zx 8wki͹ KbMM]MV)OSjG} )=]x!ϯTu~\ YM9 ̲R Ò|x} )َՌ|Xz0T/0=X%G`FP_TV)`Rb;~5̜a͖i|nm'eSQ݋:O>Aϲq ~ ʒS\DaI1;ίWTO`M g~ֆʒi_t3%:s&B?۹mD=}F4kbA̛seX20?#GU ,F#{h~&K_ZzĊjtSdy<POhT~dƹ+LVss7PGbmIENDB`testthat/man/figures/lifecycle-archived.svg0000644000176200001440000000170714164710002020574 0ustar liggesusers lifecyclelifecyclearchivedarchived testthat/man/figures/lifecycle-soft-deprecated.svg0000644000176200001440000000172614164710002022061 0ustar liggesuserslifecyclelifecyclesoft-deprecatedsoft-deprecated testthat/man/figures/lifecycle-questioning.svg0000644000176200001440000000171414164710002021352 0ustar liggesuserslifecyclelifecyclequestioningquestioning testthat/man/figures/lifecycle-superseded.svg0000644000176200001440000000171314164710002021147 0ustar liggesusers lifecyclelifecyclesupersededsuperseded testthat/man/figures/lifecycle-stable.svg0000644000176200001440000000167414164710002020264 0ustar liggesuserslifecyclelifecyclestablestable testthat/man/figures/lifecycle-experimental.svg0000644000176200001440000000171614164710002021504 0ustar liggesuserslifecyclelifecycleexperimentalexperimental testthat/man/figures/lifecycle-deprecated.svg0000644000176200001440000000171214164710002021103 0ustar liggesuserslifecyclelifecycledeprecateddeprecated testthat/man/reexports.Rd0000644000176200001440000000061714164710002015206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \docType{import} \name{reexports} \alias{reexports} \alias{\%>\%} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{magrittr}{\code{\link[magrittr:pipe]{\%>\%}}} }} testthat/man/expect_reference.Rd0000644000176200001440000000403514164710002016457 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-equality.R \name{expect_reference} \alias{expect_reference} \title{Does code return a reference to the expected object?} \usage{ expect_reference( object, expected, info = NULL, label = NULL, expected.label = NULL ) } \arguments{ \item{object}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{expected.label}{Used to customise failure messages. For expert use only.} } \description{ \code{expect_reference()} compares the underlying memory addresses of two symbols. It is for expert use only. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_reference()} is deprecated in the third edition. If you know what you're doing, and you really need this behaviour, just use \code{is_reference()} directly: \code{expect_true(rlang::is_reference(x, y))}. } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} \keyword{internal} testthat/man/expect_length.Rd0000644000176200001440000000215514164710002016003 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-length.R \name{expect_length} \alias{expect_length} \title{Does code return a vector with the specified length?} \usage{ expect_length(object, n) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{n}{Expected length.} } \description{ Does code return a vector with the specified length? } \examples{ expect_length(1, 1) expect_length(1:10, 10) \dontrun{ expect_length(1:10, 1) } } \seealso{ \code{\link[=expect_vector]{expect_vector()}} to make assertions about the "size" of a vector Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/takes_less_than.Rd0000644000176200001440000000060014164710002016312 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{takes_less_than} \alias{takes_less_than} \title{Does code take less than the expected amount of time to run?} \usage{ takes_less_than(amount) } \arguments{ \item{amount}{maximum duration in seconds} } \description{ This is useful for performance regression testing. } \keyword{internal} testthat/man/compare_state.Rd0000644000176200001440000000070713051613152016003 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{compare_state} \alias{compare_state} \title{Compare two directory states.} \usage{ compare_state(old, new) } \arguments{ \item{old}{previous state} \item{new}{current state} } \value{ list containing number of changes and files which have been \code{added}, \code{deleted} and \code{modified} } \description{ Compare two directory states. } \keyword{internal} testthat/man/find_test_scripts.Rd0000644000176200001440000000234214164710002016676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{find_test_scripts} \alias{find_test_scripts} \title{Find test files} \usage{ find_test_scripts( path, filter = NULL, invert = FALSE, ..., full.names = TRUE, start_first = NULL ) } \arguments{ \item{path}{path to tests} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching is performed on the file name after it's stripped of \code{"test-"} and \code{".R"}.} \item{invert}{If \code{TRUE} return files which \strong{don't} match.} \item{...}{Additional arguments passed to \code{\link[=grepl]{grepl()}} to control filtering.} \item{start_first}{A character vector of file patterns (globs, see \code{\link[utils:glob2rx]{utils::glob2rx()}}). The patterns are for the file names (base names), not for the whole paths. testthat starts the files matching the first pattern first, then the ones matching the second, etc. and then the rest of the files, alphabetically. Parallel tests tend to finish quicker if you start the slowest files first. \code{NULL} means alphabetical order.} } \value{ A character vector of paths } \description{ Find test files } \keyword{internal} testthat/man/expect_snapshot.Rd0000644000176200001440000001416214165635513016400 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot.R \name{expect_snapshot} \alias{expect_snapshot} \alias{expect_snapshot_output} \alias{expect_snapshot_error} \alias{expect_snapshot_warning} \alias{expect_snapshot_value} \title{Snapshot testing} \usage{ expect_snapshot( x, cran = FALSE, error = FALSE, transform = NULL, variant = NULL, cnd_class = FALSE ) expect_snapshot_output(x, cran = FALSE, variant = NULL) expect_snapshot_error(x, class = "error", cran = FALSE, variant = NULL) expect_snapshot_warning(x, class = "warning", cran = FALSE, variant = NULL) expect_snapshot_value( x, style = c("json", "json2", "deparse", "serialize"), cran = FALSE, tolerance = testthat_tolerance(), ..., variant = NULL ) } \arguments{ \item{x}{Code to evaluate.} \item{cran}{Should these expectations be verified on CRAN? By default, they are not, because snapshot tests tend to be fragile because they often rely on minor details of dependencies.} \item{error}{Do you expect the code to throw an error? The expectation will fail (even on CRAN) if an unexpected error is thrown or the expected error is not thrown.} \item{transform}{Optionally, a function to scrub sensitive or stochastic text from the output. Should take a character vector of lines as input and return a modified character vector as output.} \item{variant}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} If not-\code{NULL}, results will be saved in \verb{_snaps/\{variant\}/\{test.md\}}, so \code{variant} must be a single string of alphanumeric characters suitable for use as a directory name. You can variants to deal with cases where the snapshot output varies and you want to capture and test the variations. Common use cases include variations for operating system, R version, or version of key dependency. Variants are an advanced feature. When you use them, you'll need to carefully think about your testing strategy to ensure that all important variants are covered by automated tests, and ensure that you have a way to get snapshot changes out of your CI system and back into the repo.} \item{cnd_class}{Whether to include the class of messages, warnings, and errors in the snapshot. Only the most specific class is included, i.e. the first element of \code{class(cnd)}.} \item{class}{Class of expected error or warning. The expectation will always fail (even on CRAN) if an error of this class isn't seen when executing \code{x}.} \item{style}{Serialization style to use: \itemize{ \item \code{json} uses \code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} and \code{\link[jsonlite:fromJSON]{jsonlite::toJSON()}}. This produces the simplest output but only works for relatively simple objects. \item \code{json2} uses \code{\link[jsonlite:serializeJSON]{jsonlite::serializeJSON()}} and \code{\link[jsonlite:serializeJSON]{jsonlite::unserializeJSON()}} which are more verbose but work for a wider range of type. \item \code{deparse} uses \code{\link[=deparse]{deparse()}}, which generates a depiction of the object using R code. \item \code{serialize()} produces a binary serialization of the object using \code{\link[=serialize]{serialize()}}. This is all but guaranteed to work for any R object, but produces a completely opaque serialization. }} \item{tolerance}{Numerical tolerance: any differences (in the sense of \code{\link[base:all.equal]{base::all.equal()}}) smaller than this value will be ignored. The default tolerance is \code{sqrt(.Machine$double.eps)}, unless long doubles are not available, in which case the test is skipped.} \item{...}{For \code{expect_snapshot_value()} only, passed on to \code{\link[waldo:compare]{waldo::compare()}} so you can control the details of the comparison.} } \description{ Snapshot tests (aka golden tests) are similar to unit tests except that the expected result is stored in a separate file that is managed by testthat. Snapshot tests are useful for when the expected value is large, or when the intent of the code is something that can only be verified by a human (e.g. this is a useful error message). Learn more in \code{vignette("snapshotting")}. \itemize{ \item \code{expect_snapshot()} captures all messages, warnings, errors, and output from code. \item \code{expect_snapshot_output()} captures just output printed to the console. \item \code{expect_snapshot_error()} captures an error message and optionally checks its class. \item \code{expect_snapshot_warning()} captures a warning message and optionally checks its class. \item \code{expect_snapshot_value()} captures the return value. } (These functions supersede \code{\link[=verify_output]{verify_output()}}, \code{\link[=expect_known_output]{expect_known_output()}}, \code{\link[=expect_known_value]{expect_known_value()}}, and \code{\link[=expect_known_hash]{expect_known_hash()}}.) } \section{Workflow}{ The first time that you run a snapshot expectation it will run \code{x}, capture the results, and record in \verb{tests/testthat/snap/\{test\}.json}. Each test file gets its own snapshot file, e.g. \code{test-foo.R} will get \code{snap/foo.json}. It's important to review the JSON files and commit them to git. They are designed to be human readable, and you should always review new additions to ensure that the salient information has been captured. They should also be carefully reviewed in pull requests, to make sure that snapshots have updated in the expected way. On subsequent runs, the result of \code{x} will be compared to the value stored on disk. If it's different, the expectation will fail, and a new file \verb{snap/\{test\}.new.json} will be created. If the change was deliberate, you can approve the change with \code{\link[=snapshot_accept]{snapshot_accept()}} and then the tests will pass the next time you run them. Note that snapshotting can only work when executing a complete test file (with \code{\link[=test_file]{test_file()}}, \code{\link[=test_dir]{test_dir()}}, or friends) because there's otherwise no way to figure out the snapshot path. If you run snapshot tests interactively, they'll just display the current value. } testthat/man/expectation.Rd0000644000176200001440000000265114164710002015476 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectation.R \name{expectation} \alias{expectation} \alias{new_expectation} \alias{exp_signal} \alias{is.expectation} \title{Construct an expectation object} \usage{ expectation(type, message, srcref = NULL, trace = NULL) new_expectation( type, message, ..., srcref = NULL, trace = NULL, .subclass = NULL ) exp_signal(exp) is.expectation(x) } \arguments{ \item{type}{Expectation type. Must be one of "success", "failure", "error", "skip", "warning".} \item{message}{Message describing test failure} \item{srcref}{Optional \code{srcref} giving location of test.} \item{trace}{An optional backtrace created by \code{\link[rlang:trace_back]{rlang::trace_back()}}. When supplied, the expectation is displayed with the backtrace.} \item{...}{Additional attributes for the expectation object.} \item{.subclass}{An optional subclass for the expectation object.} \item{exp}{An expectation object, as created by \code{\link[=new_expectation]{new_expectation()}}.} \item{x}{object to test for class membership} } \description{ For advanced use only. If you are creating your own expectation, you should call \code{\link[=expect]{expect()}} instead. See \code{vignette("custom-expectation")} for more details. } \details{ Create an expectation with \code{expectation()} or \code{new_expectation()} and signal it with \code{exp_signal()}. } \keyword{internal} testthat/man/test_examples.Rd0000644000176200001440000000167014164710002016030 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-example.R \name{test_examples} \alias{test_examples} \alias{test_rd} \alias{test_example} \title{Test package examples} \usage{ test_examples(path = "../..") test_rd(rd, title = attr(rd, "Rdfile")) test_example(path, title = path) } \arguments{ \item{path}{For \code{test_examples()}, path to directory containing Rd files. For \code{test_example()}, path to a single Rd file. Remember the working directory for tests is \code{tests/testthat}.} \item{rd}{A parsed Rd object, obtained from \code{\link[tools:Rdutils]{tools::Rd_db()}} or otherwise.} \item{title}{Test title to use} } \description{ These helper functions make it easier to test the examples in a package. Each example counts as one test, and it succeeds if the code runs without an error. Generally, this is redundant with R CMD check, and is not recommended in routine practice. } \keyword{internal} testthat/man/expect_silent.Rd0000644000176200001440000000207014164710002016014 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-silent.R \name{expect_silent} \alias{expect_silent} \title{Does code execute silently?} \usage{ expect_silent(object) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} } \value{ The first argument, invisibly. } \description{ Checks that the code produces no output, messages, or warnings. } \examples{ expect_silent("123") f <- function() { message("Hi!") warning("Hey!!") print("OY!!!") } \dontrun{ expect_silent(f()) } } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/dir_state.Rd0000644000176200001440000000073713051613152015136 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{dir_state} \alias{dir_state} \title{Capture the state of a directory.} \usage{ dir_state(path, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{path to directory} \item{pattern}{regular expression with which to filter files} \item{hash}{use hash (slow but accurate) or time stamp (fast but less accurate)} } \description{ Capture the state of a directory. } \keyword{internal} testthat/man/test_path.Rd0000644000176200001440000000067614164710002015153 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-path.R \name{test_path} \alias{test_path} \title{Locate file in testing directory.} \usage{ test_path(...) } \arguments{ \item{...}{Character vectors giving path component.} } \value{ A character vector giving the path. } \description{ This function is designed to work both interactively and during tests, locating files in the \code{tests/testthat} directory } testthat/man/local_test_directory.Rd0000644000176200001440000000064614164710002017372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/local.R \name{local_test_directory} \alias{local_test_directory} \title{Locally set test directory options} \usage{ local_test_directory(path, package = NULL, .env = parent.frame()) } \arguments{ \item{path}{Path to directory of files} \item{package}{Optional package name, if known.} } \description{ For expert use only. } \keyword{internal} testthat/man/expect.Rd0000644000176200001440000000410514164710002014437 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectation.R \name{expect} \alias{expect} \title{The building block of all \code{expect_} functions} \usage{ expect( ok, failure_message, info = NULL, srcref = NULL, trace = NULL, trace_env = caller_env() ) } \arguments{ \item{ok}{\code{TRUE} or \code{FALSE} indicating if the expectation was successful.} \item{failure_message}{Message to show if the expectation failed.} \item{info}{Character vector continuing additional information. Included for backward compatibility only and new expectations should not use it.} \item{srcref}{Location of the failure. Should only needed to be explicitly supplied when you need to forward a srcref captured elsewhere.} \item{trace}{An optional backtrace created by \code{\link[rlang:trace_back]{rlang::trace_back()}}. When supplied, the expectation is displayed with the backtrace.} \item{trace_env}{If \code{is.null(trace)}, this is used to automatically generate a traceback running from \code{test_code()}/\code{test_file()} to \code{trace_env}. You'll generally only need to set this if you're wrapping an expectation inside another function.} } \value{ An expectation object. Signals the expectation condition with a \code{continue_test} restart. } \description{ Call \code{expect()} when writing your own expectations. See \code{vignette("custom-expectation")} for details. } \details{ While \code{expect()} creates and signals an expectation in one go, \code{exp_signal()} separately signals an expectation that you have manually created with \code{\link[=new_expectation]{new_expectation()}}. Expectations are signalled with the following protocol: \itemize{ \item If the expectation is a failure or an error, it is signalled with \code{\link[base:stop]{base::stop()}}. Otherwise, it is signalled with \code{\link[base:conditions]{base::signalCondition()}}. \item The \code{continue_test} restart is registered. When invoked, failing expectations are ignored and normal control flow is resumed to run the other tests. } } \seealso{ \code{\link[=exp_signal]{exp_signal()}} } testthat/man/local_test_context.Rd0000644000176200001440000001055214165635513017065 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/local.R \name{local_test_context} \alias{local_test_context} \alias{local_reproducible_output} \title{Locally set options for maximal test reproducibility} \usage{ local_test_context(.env = parent.frame()) local_reproducible_output( width = 80, crayon = FALSE, unicode = FALSE, lang = "en", .env = parent.frame() ) } \arguments{ \item{.env}{Environment to use for scoping; expert use only.} \item{width}{Value of the \code{"width"} option.} \item{crayon}{Value of the \code{"crayon.enabled"} option.} \item{unicode}{Value of the \code{"cli.unicode"} option. The test is skipped if \code{l10n_info()$`UTF-8`} is \code{FALSE}.} \item{lang}{Optionally, supply a BCP47 language code to set the language used for translating error messages. This is a lower case two letter \href{https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes}{ISO 639 country code}, optionally followed by "_" or "-" and an upper case two letter \href{https://en.wikipedia.org/wiki/ISO_3166-2}{ISO 3166 region code}.} } \description{ \code{local_test_context()} is run automatically by \code{test_that()} but you may want to run it yourself if you want to replicate test results interactively. If run inside a function, the effects are automatically reversed when the function exits; if running in the global environment, use \code{\link[withr:defer]{withr::deferred_run()}} to undo. \code{local_reproducible_output()} is run automatically by \code{test_that()} in the 3rd edition. You might want to call it to override the the default settings inside a test, if you want to test Unicode, coloured output, or a non-standard width. } \details{ \code{local_test_context()} sets \code{TESTTHAT = "true"}, which ensures that \code{\link[=is_testing]{is_testing()}} returns \code{TRUE} and allows code to tell if it is run by testthat. In the third edition, \code{local_test_context()} also calls \code{local_reproducible_output()} which temporary sets the following options: \itemize{ \item \code{cli.dynamic = FALSE} so that tests assume that they are not run in a dynamic console (i.e. one where you can move the cursor around). \item \code{cli.unicode} (default: \code{FALSE}) so that the cli package never generates unicode output (normally cli uses unicode on Linux/Mac but not Windows). Windows can't easily save unicode output to disk, so it must be set to false for consistency. \item \code{cli.condition_width = Inf} so that new lines introduced while width-wrapping condition messages don't interfere with message matching. \item \code{crayon.enabled} (default: \code{FALSE}) suppresses ANSI colours generated by the crayon package (normally colours are used if crayon detects that you're in a terminal that supports colour). \item \code{cli.num_colors} (default: \code{1L}) Same as the crayon option. \item \code{lifecycle_verbosity = "warning"} so that every lifecycle problem always generates a warning (otherwise deprecated functions don't generate a warning every time). \item \code{max.print = 99999} so the same number of values are printed. \item \code{OutDec = "."} so numbers always uses \code{.} as the decimal point (European users sometimes set \code{OutDec = ","}). \item \code{rlang_interactive = FALSE} so that \code{\link[rlang:is_interactive]{rlang::is_interactive()}} returns \code{FALSE}, and code that uses it pretends you're in a non-interactive environment. \item \code{useFancyQuotes = FALSE} so base R functions always use regular (straight) quotes (otherwise the default is locale dependent, see \code{\link[=sQuote]{sQuote()}} for details). \item \code{width} (default: 80) to control the width of printed output (usually this varies with the size of your console). } And modifies the following env vars: \itemize{ \item Unsets \code{RSTUDIO}, which ensures that RStudio is never detected as running. \item Sets \code{LANGUAGE = "en"}, which ensures that no message translation occurs. } Finally, it sets the collation locale to "C", which ensures that character sorting the same regardless of system locale. } \examples{ local({ local_test_context() cat(crayon::blue("Text will not be colored")) cat(cli::symbol$ellipsis) cat("\n") }) test_that("test ellipsis", { local_reproducible_output(unicode = FALSE) expect_equal(cli::symbol$ellipsis, "...") local_reproducible_output(unicode = TRUE) expect_equal(cli::symbol$ellipsis, "\u2026") }) } testthat/man/skip.Rd0000644000176200001440000000727114165635513014142 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/skip.R \name{skip} \alias{skip} \alias{skip_if_not} \alias{skip_if} \alias{skip_if_not_installed} \alias{skip_if_offline} \alias{skip_on_cran} \alias{skip_on_os} \alias{skip_on_travis} \alias{skip_on_appveyor} \alias{skip_on_ci} \alias{skip_on_covr} \alias{skip_on_bioc} \alias{skip_if_translated} \title{Skip a test} \usage{ skip(message) skip_if_not(condition, message = NULL) skip_if(condition, message = NULL) skip_if_not_installed(pkg, minimum_version = NULL) skip_if_offline(host = "r-project.org") skip_on_cran() skip_on_os(os, arch = NULL) skip_on_travis() skip_on_appveyor() skip_on_ci() skip_on_covr() skip_on_bioc() skip_if_translated(msgid = "'\%s' not found") } \arguments{ \item{message}{A message describing why the test was skipped.} \item{condition}{Boolean condition to check. \code{skip_if_not()} will skip if \code{FALSE}, \code{skip_if()} will skip if \code{TRUE}.} \item{pkg}{Name of package to check for} \item{minimum_version}{Minimum required version for the package} \item{host}{A string with a hostname to lookup} \item{os}{Character vector of one or more operating systems to skip on. Supported values are \code{"windows"}, \code{"mac"}, \code{"linux"}, and \code{"solaris"}.} \item{arch}{Character vector of one or more architectures to skip on. Common values include \code{"i386"} (32 bit), \code{"x86_64"} (64 bit), and \code{"aarch64"} (M1 mac). Supplying \code{arch} makes the test stricter; i.e. both \code{os} and \code{arch} must match in order for the test to be skipped.} \item{msgid}{R message identifier used to check for translation: the default uses a message included in most translation packs. See the complete list in \href{https://github.com/wch/r-source/blob/master/src/library/base/po/R-base.pot}{\code{R-base.pot}}.} } \description{ \code{skip_if()} and \code{skip_if_not()} allow you to skip tests, immediately concluding a \code{\link[=test_that]{test_that()}} block without executing any further expectations. This allows you to skip a test without failure, if for some reason it can't be run (e.g. it depends on the feature of a specific operating system, or it requires a specific version of a package). See \code{vignette("skipping")} for more details. } \section{Helpers}{ \itemize{ \item \code{skip_if_not_installed("pkg")} skips tests if package "pkg" is not installed or cannot be loaded (using \code{requireNamespace()}). Generally, you can assume that suggested packages are installed, and you do not need to check for them specifically, unless they are particularly difficult to install. \item \code{skip_if_offline()} skips if an internet connection is not available (using \code{\link[curl:nslookup]{curl::nslookup()}}) or if the test is run on CRAN. \item \code{skip_if_translated("msg")} skips tests if the "msg" is translated. \item \code{skip_on_bioc()} skips on Bioconductor (using the \code{BBS_HOME} env var). \item \code{skip_on_cran()} skips on CRAN (using the \code{NOT_CRAN} env var set by devtools and friends). \item \code{skip_on_covr()} skips when covr is running (using the \code{R_COVR} env var). \item \code{skip_on_ci()} skips on continuous integration systems like GitHub Actions, travis, and appveyor (using the \code{CI} env var). It supersedes the older \code{skip_on_travis()} and \code{skip_on_appveyor()} functions. \item \code{skip_on_os()} skips on the specified operating system(s) ("windows", "mac", "linux", or "solaris"). } } \examples{ if (FALSE) skip("No internet connection") test_that("skip example", { expect_equal(1, 1L) # this expectation runs skip('skip') expect_equal(1, 2) # this one skipped expect_equal(1, 3) # this one is also skipped }) } testthat/man/make_expectation.Rd0000644000176200001440000000121313171137773016503 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make-expectation.R \name{make_expectation} \alias{make_expectation} \title{Make an equality test.} \usage{ make_expectation(x, expectation = "equals") } \arguments{ \item{x}{a vector of values} \item{expectation}{the type of equality you want to test for (\code{"equals"}, \code{"is_equivalent_to"}, \code{"is_identical_to"})} } \description{ This a convenience function to make a expectation that checks that input stays the same. } \examples{ x <- 1:10 make_expectation(x) make_expectation(mtcars$mpg) df <- data.frame(x = 2) make_expectation(df) } \keyword{internal} testthat/man/logical-expectations.Rd0000644000176200001440000000337314164710002017273 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-constant.R \name{logical-expectations} \alias{logical-expectations} \alias{expect_true} \alias{expect_false} \title{Does code return \code{TRUE} or \code{FALSE}?} \usage{ expect_true(object, info = NULL, label = NULL) expect_false(object, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ These are fall-back expectations that you can use when none of the other more specific expectations apply. The disadvantage is that you may get a less informative error message. } \details{ Attributes are ignored. } \examples{ expect_true(2 == 2) # Failed expectations will throw an error \dontrun{ expect_true(2 != 2) } expect_true(!(2 != 2)) # or better: expect_false(2 != 2) a <- 1:3 expect_true(length(a) == 3) # but better to use more specific expectation, if available expect_equal(length(a), 3) } \seealso{ \code{\link[=is_false]{is_false()}} for complement Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}} } \concept{expectations} testthat/man/SilentReporter.Rd0000644000176200001440000000161714164710002016135 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-silent.R \name{SilentReporter} \alias{SilentReporter} \title{Test reporter: gather all errors silently.} \description{ This reporter quietly runs all tests, simply gathering all expectations. This is helpful for programmatically inspecting errors after a test run. You can retrieve the results with the \code{expectations()} method. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/try_again.Rd0000644000176200001440000000106113171137773015141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/try-again.R \name{try_again} \alias{try_again} \title{Try evaluating an expressing multiple times until it succeeds.} \usage{ try_again(times, code) } \arguments{ \item{times}{Maximum number of attempts.} \item{code}{Code to evaluate} } \description{ Try evaluating an expressing multiple times until it succeeds. } \examples{ third_try <- local({ i <- 3 function() { i <<- i - 1 if (i > 0) fail(paste0("i is ", i)) } }) try_again(3, third_try()) } \keyword{internal} testthat/man/teardown.Rd0000644000176200001440000000253014164710002014772 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/teardown.R \name{teardown} \alias{teardown} \alias{setup} \title{Run code before/after tests} \usage{ teardown(code, env = parent.frame()) setup(code, env = parent.frame()) } \arguments{ \item{code}{Code to evaluate} \item{env}{Environment in which code will be evaluated. For expert use only.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} We no longer recommend using \code{setup()} and \code{teardown()}; instead we think it's better practice to use a \strong{test fixture} as described in \code{vignette("test-fixtures")}. Code in a \code{setup()} block is run immediately in a clean environment. Code in a \code{teardown()} block is run upon completion of a test file, even if it exits with an error. Multiple calls to \code{teardown()} will be executed in the order they were created. } \examples{ \dontrun{ # Old approach tmp <- tempfile() setup(writeLines("some test data", tmp)) teardown(unlink(tmp)) } # Now recommended: local_test_data <- function(env = parent.frame()) { tmp <- tempfile() writeLines("some test data", tmp) withr::defer(unlink(tmp), env) tmp } # Then call local_test_data() in your tests } \keyword{internal} testthat/man/expect_snapshot_file.Rd0000644000176200001440000001132514165635513017375 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-file.R \name{expect_snapshot_file} \alias{expect_snapshot_file} \alias{announce_snapshot_file} \alias{compare_file_binary} \alias{compare_file_text} \title{Snapshot testing for whole files} \usage{ expect_snapshot_file( path, name = basename(path), binary = lifecycle::deprecated(), cran = FALSE, compare = NULL, transform = NULL, variant = NULL ) announce_snapshot_file(path, name = basename(path)) compare_file_binary(old, new) compare_file_text(old, new) } \arguments{ \item{path}{Path to file to snapshot. Optional for \code{announce_snapshot_file()} if \code{name} is supplied.} \item{name}{Snapshot name, taken from \code{path} by default.} \item{binary}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} Please use the \code{compare} argument instead.} \item{cran}{Should these expectations be verified on CRAN? By default, they are not, because snapshot tests tend to be fragile because they often rely on minor details of dependencies.} \item{compare}{A function used to compare the snapshot files. It should take two inputs, the paths to the \code{old} and \code{new} snapshot, and return either \code{TRUE} or \code{FALSE}. This defaults to \code{compare_file_text} if \code{name} has extension \code{.r}, \code{.R}, \code{.Rmd}, \code{.md}, or \code{.txt}, and otherwise uses \code{compare_file_binary}. \code{compare_file_binary()} compares byte-by-byte and \code{compare_file_text()} compares lines-by-line, ignoring the difference between Windows and Mac/Linux line endings.} \item{transform}{Optionally, a function to scrub sensitive or stochastic text from the output. Should take a character vector of lines as input and return a modified character vector as output.} \item{variant}{If not-\code{NULL}, results will be saved in \verb{_snaps/\{variant\}/\{test\}/\{name\}.\{ext\}}. This allows you to create different snapshots for different scenarios, like different operating systems or different R versions.} \item{old, new}{Paths to old and new snapshot files.} } \description{ Whole file snapshot testing is designed for testing objects that don't have a convenient textual representation, with initial support for images (\code{.png}, \code{.jpg}, \code{.svg}), data frames (\code{.csv}), and text files (\code{.R}, \code{.txt}, \code{.json}, ...). The first time \code{expect_snapshot_file()} is run, it will create \verb{_snaps/\{test\}/\{name\}.\{ext\}} containing reference output. Future runs will be compared to this reference: if different, the test will fail and the new results will be saved in \verb{_snaps/\{test\}/\{name\}.new.\{ext\}}. To review failures, call \code{\link[=snapshot_review]{snapshot_review()}}. We generally expect this function to be used via a wrapper that takes care of ensuring that output is as reproducible as possible, e.g. automatically skipping tests where it's known that images can't be reproduced exactly. } \section{Announcing snapshots}{ testthat automatically detects dangling snapshots that have been written to the \verb{_snaps} directory but which no longer have corresponding R code to generate them. These dangling files are automatically deleted so they don't clutter the snapshot directory. However we want to preserve snapshot files when the R code wasn't executed because of an unexpected error or because of a \code{\link[=skip]{skip()}}. Let testthat know about these files by calling \code{announce_snapshot_file()} before \code{expect_snapshot_file()}. } \examples{ # To use expect_snapshot_file() you'll typically need to start by writing # a helper function that creates a file from your code, returning a path save_png <- function(code, width = 400, height = 400) { path <- tempfile(fileext = ".png") png(path, width = width, height = height) on.exit(dev.off()) code path } path <- save_png(plot(1:5)) path \dontrun{ expect_snapshot_file(save_png(hist(mtcars$mpg)), "plot.png") } # You'd then also provide a helper that skips tests where you can't # be sure of producing exactly the same output expect_snapshot_plot <- function(name, code) { # Other packages might affect results skip_if_not_installed("ggplot2", "2.0.0") # Or maybe the output is different on some operation systems skip_on_os("windows") # You'll need to carefully think about and experiment with these skips name <- paste0(name, ".png") # Announce the file before touching `code`. This way, if `code` # unexpectedly fails or skips, testthat will not auto-delete the # corresponding snapshot file. announce_snapshot_file(name = name) path <- save_png(code) expect_snapshot_file(path, name) } } testthat/man/ListReporter.Rd0000644000176200001440000000156614164710002015615 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-list.R \name{ListReporter} \alias{ListReporter} \title{List reporter: gather all test results along with elapsed time and file information.} \description{ This reporter gathers all results, adding additional information such as test elapsed time, and test filename if available. Very useful for reporting. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/Reporter.Rd0000644000176200001440000000274314164710002014757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter.R \name{Reporter} \alias{Reporter} \title{Manage test reporting} \description{ The job of a reporter is to aggregate the results from files, tests, and expectations and display them in an informative way. Every testtthat function that runs multiple tests provides a \code{reporter} argument which you can use to override the default (which is selected by \code{\link[=default_reporter]{default_reporter()}}). } \details{ You only need to use this \code{Reporter} object directly if you are creating a new reporter. Currently, creating new Reporters is undocumented, so if you want to create your own, you'll need to make sure that you're familiar with \href{https://adv-r.hadley.nz/R6.html}{R6} and then need read the source code for a few. } \examples{ path <- testthat_example("success") test_file(path) # Override the default by supplying the name of a reporter test_file(path, reporter = "minimal") } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{internal} testthat/man/TapReporter.Rd0000644000176200001440000000155714164710002015426 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-tap.R \name{TapReporter} \alias{TapReporter} \title{Test reporter: TAP format.} \description{ This reporter will output results in the Test Anything Protocol (TAP), a simple text-based interface between testing modules in a test harness. For more information about TAP, see http://testanything.org } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/expect_error.Rd0000644000176200001440000001334614164742334015674 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-condition.R \name{expect_error} \alias{expect_error} \alias{expect_warning} \alias{expect_message} \alias{expect_condition} \title{Does code throw an error, warning, message, or other condition?} \usage{ expect_error( object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL ) expect_warning( object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL ) expect_message( object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL ) expect_condition( object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against. \itemize{ \item A character vector giving a regular expression that must match the error message. \item If \code{NULL}, the default, asserts that there should be an error, but doesn't test for a specific value. \item If \code{NA}, asserts that there should be no errors. }} \item{class}{Instead of supplying a regular expression, you can also supply a class name. This is useful for "classed" conditions.} \item{...}{ Arguments passed on to \code{\link[=expect_match]{expect_match}} \describe{ \item{\code{perl}}{logical. Should Perl-compatible regexps be used?} \item{\code{fixed}}{logical. If \code{TRUE}, \code{pattern} is a string to be matched as is. Overrides all conflicting arguments.} }} \item{inherit}{Whether to match \code{regexp} and \code{class} across the ancestry of chained errors.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{all}{\emph{DEPRECATED} If you need to test multiple warnings/messages you now need to use multiple calls to \code{expect_message()}/ \code{expect_warning()}} } \value{ If \code{regexp = NA}, the value of the first argument; otherwise the captured condition. } \description{ \code{expect_error()}, \code{expect_warning()}, \code{expect_message()}, and \code{expect_condition()} check that code throws an error, warning, message, or condition with a message that matches \code{regexp}, or a class that inherits from \code{class}. See below for more details. In the 3rd edition, these functions match (at most) a single condition. All additional and non-matching (if \code{regexp} or \code{class} are used) conditions will bubble up outside the expectation. If these additional conditions are important you'll need to catch them with additional \code{expect_message()}/\code{expect_warning()} calls; if they're unimportant you can ignore with \code{\link[=suppressMessages]{suppressMessages()}}/\code{\link[=suppressWarnings]{suppressWarnings()}}. It can be tricky to test for a combination of different conditions, such as a message followed by an error. \code{\link[=expect_snapshot]{expect_snapshot()}} is often an easier alternative for these more complex cases. } \section{Testing \code{message} vs \code{class}}{ When checking that code generates an error, it's important to check that the error is the one you expect. There are two ways to do this. The first way is the simplest: you just provide a \code{regexp} that match some fragment of the error message. This is easy, but fragile, because the test will fail if the error message changes (even if its the same error). A more robust way is to test for the class of the error, if it has one. You can learn more about custom conditions at \url{https://adv-r.hadley.nz/conditions.html#custom-conditions}, but in short, errors are S3 classes and you can generate a custom class and check for it using \code{class} instead of \code{regexp}. If you are using \code{expect_error()} to check that an error message is formatted in such a way that it makes sense to a human, we recommend using \code{\link[=expect_snapshot]{expect_snapshot()}} instead. } \examples{ # Errors ------------------------------------------------------------------ f <- function() stop("My error!") expect_error(f()) expect_error(f(), "My error!") # You can use the arguments of grepl to control the matching expect_error(f(), "my error!", ignore.case = TRUE) # Note that `expect_error()` returns the error object so you can test # its components if needed err <- expect_error(rlang::abort("a", n = 10)) expect_equal(err$n, 10) # Warnings ------------------------------------------------------------------ f <- function(x) { if (x < 0) { warning("*x* is already negative") return(x) } -x } expect_warning(f(-1)) expect_warning(f(-1), "already negative") expect_warning(f(1), NA) # To test message and output, store results to a variable expect_warning(out <- f(-1), "already negative") expect_equal(out, -1) # Messages ------------------------------------------------------------------ f <- function(x) { if (x < 0) { message("*x* is already negative") return(x) } -x } expect_message(f(-1)) expect_message(f(-1), "already negative") expect_message(f(1), NA) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/is_testing.Rd0000644000176200001440000000127514164710002015324 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-env.R \name{is_testing} \alias{is_testing} \alias{is_parallel} \alias{testing_package} \title{Determine testing status} \usage{ is_testing() is_parallel() testing_package() } \description{ \itemize{ \item \code{is_testing()} determine if code is being run as part of a test \item \code{is_parallel()} if the test is being run in parallel. \item \code{testing_package()} gives name of the package being tested. } These are thin wrappers that retrieve the values of environment variables. To avoid creating a run-time dependency on testthat, you can inline the source of these functions directly into your package. } testthat/man/verify_output.Rd0000644000176200001440000000526414164710002016102 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/verify-output.R \name{verify_output} \alias{verify_output} \title{Verify output} \usage{ verify_output( path, code, width = 80, crayon = FALSE, unicode = FALSE, env = caller_env() ) } \arguments{ \item{path}{Path to record results. This should usually be a call to \code{\link[=test_path]{test_path()}} in order to ensure that the same path is used when run interactively (when the working directory is typically the project root), and when run as an automated test (when the working directory will be \code{tests/testthat}).} \item{code}{Code to execute. This will usually be a multiline expression contained within \code{{}} (similarly to \code{test_that()} calls).} \item{width}{Width of console output} \item{crayon}{Enable crayon package colouring?} \item{unicode}{Enable cli package UTF-8 symbols? If you set this to \code{TRUE}, call \code{skip_if(!cli::is_utf8_output())} to disable the test on your CI platforms that don't support UTF-8 (e.g. Windows).} \item{env}{The environment to evaluate \code{code} in.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} This function is superseded in favour of \code{expect_snapshot()} and friends. This is a regression test that records interwoven code and output into a file, in a similar way to knitting an \code{.Rmd} file (but see caveats below). \code{verify_output()} is designed particularly for testing print methods and error messages, where the primary goal is to ensure that the output is helpful to a human. Obviously, you can't test that with code, so the best you can do is make the results explicit by saving them to a text file. This makes the output easy to verify in code reviews, and ensures that you don't change the output by accident. \code{verify_output()} is designed to be used with git: to see what has changed from the previous run, you'll need to use \verb{git diff} or similar. } \section{Syntax}{ \code{verify_output()} can only capture the abstract syntax tree, losing all whitespace and comments. To mildly offset this limitation: \itemize{ \item Strings are converted to R comments in the output. \item Strings starting with \verb{# } are converted to headers in the output. } } \section{CRAN}{ On CRAN, \code{verify_output()} will never fail, even if the output changes. This avoids false positives because tests of print methods and error messages are often fragile due to implicit dependencies on other packages, and failure does not imply incorrect computation, just a change in presentation. } \keyword{internal} testthat/man/context.Rd0000644000176200001440000000234614164710002014640 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/context.R \name{context} \alias{context} \title{Describe the context of a set of tests.} \usage{ context(desc) } \arguments{ \item{desc}{description of context. Should start with a capital letter.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} Use of \code{context()} is no longer recommended. Instead omit it, and messages will use the name of the file instead. This ensures that the context and test file name are always in sync. A context defines a set of tests that test related functionality. Usually you will have one context per file, but you may have multiple contexts in a single file if you so choose. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{context()} is deprecated in the third edition, and the equivalent information is instead recorded by the test file name. } \examples{ context("String processing") context("Remote procedure calls") } \keyword{internal} testthat/man/expect_output.Rd0000644000176200001440000000506514164710002016065 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-output.R \name{expect_output} \alias{expect_output} \title{Does code print output to the console?} \usage{ expect_output( object, regexp = NULL, ..., info = NULL, label = NULL, width = 80 ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against. \itemize{ \item A character vector giving a regular expression that must match the output. \item If \code{NULL}, the default, asserts that there should output, but doesn't check for a specific value. \item If \code{NA}, asserts that there should be no output. }} \item{...}{ Arguments passed on to \code{\link[=expect_match]{expect_match}} \describe{ \item{\code{all}}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE).} \item{\code{perl}}{logical. Should Perl-compatible regexps be used?} \item{\code{fixed}}{logical. If \code{TRUE}, \code{pattern} is a string to be matched as is. Overrides all conflicting arguments.} }} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} } \value{ The first argument, invisibly. } \description{ Test for output produced by \code{print()} or \code{cat()}. This is best used for very simple output; for more complex cases use \code{\link[=verify_output]{verify_output()}}. } \examples{ str(mtcars) expect_output(str(mtcars), "32 obs") expect_output(str(mtcars), "11 variables") # You can use the arguments of grepl to control the matching expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) expect_output(str(mtcars), "$ mpg", fixed = TRUE) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_invisible.Rd0000644000176200001440000000175014164710002016506 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-invisible.R \name{expect_invisible} \alias{expect_invisible} \alias{expect_visible} \title{Does code return a visible or invisible object?} \usage{ expect_invisible(call, label = NULL) expect_visible(call, label = NULL) } \arguments{ \item{call}{A function call.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ The evaluated \code{call}, invisibly. } \description{ Use this to test whether a function returns a visible or invisible output. Typically you'll use this to check that functions called primarily for their side-effects return their data argument invisibly. } \examples{ expect_invisible(x <- 10) expect_visible(x) # Typically you'll assign the result of the expectation so you can # also check that the value is as you expect. greet <- function(name) { message("Hi ", name) invisible(name) } out <- expect_invisible(greet("Hadley")) expect_equal(out, "Hadley") } testthat/man/compare.Rd0000644000176200001440000000533714164710002014605 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.R \name{compare} \alias{compare} \alias{compare.default} \alias{compare.character} \alias{compare.numeric} \alias{testthat_tolerance} \alias{compare.POSIXt} \title{Provide human-readable comparison of two objects} \usage{ compare(x, y, ...) \method{compare}{default}(x, y, ..., max_diffs = 9) \method{compare}{character}( x, y, check.attributes = TRUE, ..., max_diffs = 5, max_lines = 5, width = cli::console_width() ) \method{compare}{numeric}( x, y, tolerance = testthat_tolerance(), check.attributes = TRUE, ..., max_diffs = 9 ) testthat_tolerance() \method{compare}{POSIXt}(x, y, tolerance = 0.001, ..., max_diffs = 9) } \arguments{ \item{x, y}{Objects to compare} \item{...}{Additional arguments used to control specifics of comparison} \item{max_diffs}{Maximum number of differences to show} \item{check.attributes}{If \code{TRUE}, also checks values of attributes.} \item{max_lines}{Maximum number of lines to show from each difference} \item{width}{Width of output device} \item{tolerance}{Numerical tolerance: any differences (in the sense of \code{\link[base:all.equal]{base::all.equal()}}) smaller than this value will be ignored. The default tolerance is \code{sqrt(.Machine$double.eps)}, unless long doubles are not available, in which case the test is skipped.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{compare} is similar to \code{\link[base:all.equal]{base::all.equal()}}, but somewhat buggy in its use of \code{tolerance}. Please use \href{https://waldo.r-lib.org/}{waldo} instead. } \examples{ # Character ----------------------------------------------------------------- x <- c("abc", "def", "jih") compare(x, x) y <- paste0(x, "y") compare(x, y) compare(letters, paste0(letters, "-")) x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " compare(x, y) compare(c(x, x), c(y, y)) # Numeric ------------------------------------------------------------------- x <- y <- runif(100) y[sample(100, 10)] <- 5 compare(x, y) x <- y <- 1:10 x[5] <- NA x[6] <- 6.5 compare(x, y) # Compare ignores minor numeric differences in the same way # as all.equal. compare(x, x + 1e-9) } \keyword{internal} testthat/man/testthat_results.Rd0000644000176200001440000000111413171137773016604 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-list.R \name{testthat_results} \alias{testthat_results} \title{Create a \code{testthat_results} object from the test results as stored in the ListReporter results field.} \usage{ testthat_results(results) } \arguments{ \item{results}{a list as stored in ListReporter} } \value{ its list argument as a \code{testthat_results} object } \description{ Create a \code{testthat_results} object from the test results as stored in the ListReporter results field. } \seealso{ ListReporter } \keyword{internal} testthat/man/describe.Rd0000644000176200001440000000401514164710002014727 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe.R \name{describe} \alias{describe} \title{describe: a BDD testing language} \usage{ describe(description, code) } \arguments{ \item{description}{description of the feature} \item{code}{test code containing the specs} } \description{ A simple BDD DSL for writing tests. The language is similiar to RSpec for Ruby or Mocha for JavaScript. BDD tests read like sentences and it should thus be easier to understand what the specification of a function/component is. } \details{ Tests using the \code{describe} syntax not only verify the tested code, but also document its intended behaviour. Each \code{describe} block specifies a larger component or function and contains a set of specifications. A specification is defined by an \code{it} block. Each \code{it} block functions as a test and is evaluated in its own environment. You can also have nested \code{describe} blocks. This test syntax helps to test the intended behaviour of your code. For example: you want to write a new function for your package. Try to describe the specification first using \code{describe}, before your write any code. After that, you start to implement the tests for each specification (i.e. the \code{it} block). Use \code{describe} to verify that you implement the right things and use \code{\link[=test_that]{test_that()}} to ensure you do the things right. } \examples{ describe("matrix()", { it("can be multiplied by a scalar", { m1 <- matrix(1:4, 2, 2) m2 <- m1 * 2 expect_equal(matrix(1:4 * 2, 2, 2), m2) }) it("can have not yet tested specs") }) # Nested specs: ## code addition <- function(a, b) a + b division <- function(a, b) a / b ## specs describe("math library", { describe("addition()", { it("can add two numbers", { expect_equal(1 + 1, addition(1, 1)) }) }) describe("division()", { it("can divide two numbers", { expect_equal(10 / 2, division(10, 2)) }) it("can handle division by 0") #not yet implemented }) }) } testthat/man/JunitReporter.Rd0000644000176200001440000000275314164710002015772 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-junit.R \name{JunitReporter} \alias{JunitReporter} \title{Test reporter: summary of errors in jUnit XML format.} \description{ This reporter includes detailed results about each test and summaries, written to a file (or stdout) in jUnit XML format. This can be read by the Jenkins Continuous Integration System to report on a dashboard etc. Requires the \emph{xml2} package. } \details{ To fit into the jUnit structure, context() becomes the \verb{} name as well as the base of the \verb{ classname}. The test_that() name becomes the rest of the \verb{ classname}. The deparsed expect_that() call becomes the \verb{} name. On failure, the message goes into the \verb{} node message argument (first line only) and into its text content (full message). Execution time and some other details are also recorded. References for the jUnit XML format: \url{http://llg.cubic.org/docs/junit/} } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/run_cpp_tests.Rd0000644000176200001440000000117514164710002016043 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-compiled-code.R \name{expect_cpp_tests_pass} \alias{expect_cpp_tests_pass} \alias{run_cpp_tests} \title{Do C++ tests past?} \usage{ expect_cpp_tests_pass(package) run_cpp_tests(package) } \arguments{ \item{package}{The name of the package to test.} } \description{ Test compiled code in the package \code{package}. A call to this function will automatically be generated for you in \code{tests/testthat/test-cpp.R} after calling \code{\link[=use_catch]{use_catch()}}; you should not need to manually call this expectation yourself. } \keyword{internal} testthat/man/evaluate_promise.Rd0000644000176200001440000000136013171137773016532 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/evaluate-promise.R \name{evaluate_promise} \alias{evaluate_promise} \title{Evaluate a promise, capturing all types of output.} \usage{ evaluate_promise(code, print = FALSE) } \arguments{ \item{code}{Code to evaluate.} } \value{ A list containing \item{result}{The result of the function} \item{output}{A string containing all the output from the function} \item{warnings}{A character vector containing the text from each warning} \item{messages}{A character vector containing the text from each message} } \description{ Evaluate a promise, capturing all types of output. } \examples{ evaluate_promise({ print("1") message("2") warning("3") 4 }) } \keyword{internal} testthat/DESCRIPTION0000644000176200001440000000331214172362302013617 0ustar liggesusersPackage: testthat Title: Unit Testing for R Version: 3.1.2 Authors@R: c(person(given = "Hadley", family = "Wickham", role = c("aut", "cre"), email = "hadley@rstudio.com"), person(given = "RStudio", role = c("cph", "fnd")), person(given = "R Core team", role = "ctb", comment = "Implementation of utils::recover()")) Description: Software testing is important, but, in part because it is frustrating and boring, many of us avoid it. 'testthat' is a testing framework for R that is easy to learn and use, and integrates with your existing 'workflow'. License: MIT + file LICENSE URL: https://testthat.r-lib.org, https://github.com/r-lib/testthat BugReports: https://github.com/r-lib/testthat/issues Depends: R (>= 3.1) Imports: brio, callr (>= 3.5.1), cli (>= 2.2.0), crayon (>= 1.3.4), desc, digest, ellipsis (>= 0.2.0), evaluate, jsonlite, lifecycle, magrittr, methods, pkgload, praise, processx, ps (>= 1.3.4), R6 (>= 2.2.0), rlang (>= 0.4.9), utils, waldo (>= 0.2.4), withr (>= 2.4.3) Suggests: covr, curl (>= 0.9.5), diffviewer (>= 0.1.0), knitr, mockery, rmarkdown, rstudioapi, shiny, usethis, vctrs (>= 0.1.0), xml2 VignetteBuilder: knitr Config/testthat/edition: 3 Config/testthat/start-first: watcher, parallel* Config/Needs/website: tidyverse/tidytemplate Encoding: UTF-8 RoxygenNote: 7.1.2 NeedsCompilation: yes Packaged: 2022-01-20 21:10:39 UTC; hadleywickham Author: Hadley Wickham [aut, cre], RStudio [cph, fnd], R Core team [ctb] (Implementation of utils::recover()) Maintainer: Hadley Wickham Repository: CRAN Date/Publication: 2022-01-20 22:40:02 UTC testthat/build/0000755000176200001440000000000014172347705013223 5ustar liggesuserstestthat/build/vignette.rds0000644000176200001440000000053514172347705015565 0ustar liggesusersRMS0MK8:OA^34،mionmMJEdnuY7pk~jȳb d,6˂a{ؖBB"x"bNO MaU Du$UdYE G+L"Ѐu$GuD2;:lsV1ɜ ՌThM=榩 ^ZՈh뎋-L=]s^{3tNA*H0 + m{ѳ?t6P} K~vXPUW:2 citestthat/tests/0000755000176200001440000000000014172347717013271 5ustar liggesuserstestthat/tests/test-catch.R0000644000176200001440000000402514164710003015433 0ustar liggesuserslibrary(testthat) local({ # Disable test on Windows, pending devtools # compatibility with new toolchain isWindows <- Sys.info()[["sysname"]] == "Windows" if (isWindows) return() # Disable tests on Solaris, because we don't use Catch there. isSolaris <- Sys.info()[["sysname"]] == "SunOS" if (isSolaris) return() if (!requireNamespace("usethis", quietly = TRUE)) return() # devel <- try(pkgbuild::has_compiler(), silent = TRUE) # if (!isTRUE(devel)) # return() quietly <- function(expr) { suppressMessages(capture_output(result <- expr)) result } perform_test <- function(pkgName, catchEnabled) { owd <- setwd(tempdir()) on.exit(setwd(owd), add = TRUE) pkgPath <- file.path(tempdir(), pkgName) libPath <- file.path(tempdir(), "rlib") if (!utils::file_test("-d", libPath)) dir.create(libPath) .libPaths(c(libPath, .libPaths())) on.exit({ unlink(pkgPath, recursive = TRUE) unlink(libPath, recursive = TRUE) }, add = TRUE) quietly(usethis::create_package(pkgPath, open = FALSE)) quietly(testthat::use_catch(pkgPath)) cat("LinkingTo: testthat", file = file.path(pkgPath, "DESCRIPTION"), append = TRUE, sep = "\n") cat( sprintf("useDynLib(%s, .registration=TRUE)", pkgName), file = file.path(pkgPath, "NAMESPACE"), append = TRUE, sep = "\n" ) if (!catchEnabled) { makevarsPath <- file.path( pkgPath, "src", if (isWindows) "Makevars.win" else "Makevars" ) cat( "PKG_CPPFLAGS = -DTESTTHAT_DISABLED", file = makevarsPath, sep = "\n" ) } install.packages(pkgs = pkgPath, repos = NULL, type = "source") library(pkgName, character.only = TRUE) stopifnot(.Call("run_testthat_tests", FALSE, PACKAGE = pkgName)) pkgload::unload(pkgName) } withr::with_envvar(c(R_TESTS = ''), perform_test("testthatclient1", TRUE)) withr::with_envvar(c(R_TESTS = ''), perform_test("testthatclient2", FALSE)) }) testthat/tests/testthat/0000755000176200001440000000000014172362302015114 5ustar liggesuserstestthat/tests/testthat/test-source_dir.R0000644000176200001440000000104613706103225020352 0ustar liggesuserstest_that("source_dir()", { res <- source_dir("test_dir", pattern = "hello", chdir = TRUE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir(normalizePath("test_dir"), pattern = "hello", chdir = TRUE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir("test_dir", pattern = "hello", chdir = FALSE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir(normalizePath("test_dir"), pattern = "hello", chdir = FALSE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") }) testthat/tests/testthat/test-parallel-crash.R0000644000176200001440000000072514164710003021106 0ustar liggesusers test_that("crash", { skip_on_cran() skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") do <- function() { err <- NULL tryCatch( testthat::test_local(".", reporter = "silent", stop_on_failure = FALSE), error = function(e) err <<- e ) err } pkg <- test_path("test-parallel", "crash") err <- callr::r(do, wd = pkg) expect_s3_class(err, "testthat_process_error") expect_equal(err$test_file, "test-crash-3.R") }) testthat/tests/testthat/test-bare.R0000644000176200001440000000037613721254707017143 0ustar liggesusersexpect_equal(2, 2) expect_true(TRUE) expect_error(stop("!")) stopifnot( tryCatch( expect_true(TRUE), expectation_failure = function(e) FALSE ) ) stopifnot( tryCatch( expect_true(FALSE), expectation_failure = function(e) TRUE ) ) testthat/tests/testthat/test-parallel-outside.R0000644000176200001440000000047314164710003021462 0ustar liggesusers test_that("error outside of test_that()", { withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( suppressMessages(testthat::test_local( test_path("test-parallel", "outside"), reporter = "silent" )), error = function(e) e ) expect_match(err$message, "Test failures") }) testthat/tests/testthat/test-expect-named.R0000644000176200001440000000105213450671736020600 0ustar liggesuserstest_that("expected_named verifies presence of names", { expect_success(expect_named(c(a = 1))) expect_failure(expect_named(1:10)) }) test_that("expected_named verifies actual of names", { expect_success(expect_named(c(a = 1), "a")) expect_failure(expect_named(c(a = 1), "b")) }) test_that("expected_named optionally ignores case", { expect_success(expect_named(c(a = 1), "A", ignore.case = TRUE)) }) test_that("expected_named optionally ignores order", { expect_success(expect_named(c(a = 1, b = 2), c("b", "a"), ignore.order = TRUE)) }) testthat/tests/testthat/test-expect-self-test.R0000644000176200001440000000124114164710003021402 0ustar liggesuserstest_that("fail always fails", { expect_failure(fail()) expect_failure(fail("abc"), "abc") }) test_that("succeed always succeeds", { expect_success(succeed()) }) test_that("expect_success errors if null", { expect_error(expect_success(NULL)) }) test_that("expect_success errors with msg", { expect_error(expect_success(stop("asdf")), 'asdf') }) test_that("expect_failure errors if null", { expect_error(expect_failure(NULL)) }) test_that("expect_failure errors if no failure", { expect_error(expect_failure(TRUE)) }) test_that("show_failure", { expect_null(show_failure(NULL)) expect_output(show_failure(expect_true(FALSE)), "FALSE is not TRUE") }) testthat/tests/testthat/test-expect-match.R0000644000176200001440000000254414164710003020577 0ustar liggesuserstest_that("extra arguments to matches passed onto grepl", { expect_success(expect_match("te*st", "e*", fixed = TRUE)) expect_success(expect_match("test", "TEST", ignore.case = TRUE)) }) test_that("special regex characters are escaped in output", { error <- tryCatch(expect_match("f() test", "f() test"), expectation = function(e) e$message) expect_equal(error, "\"f\\(\\) test\" does not match \"f() test\".\nActual value: \"f\\(\\) test\"") }) test_that("correct reporting of expected label", { expect_failure(expect_match("[a]", "[b]"), escape_regex("[a]"), fixed = TRUE) expect_failure(expect_match("[a]", "[b]", fixed = TRUE), "[a]", fixed = TRUE) }) test_that("errors if obj is empty str", { x <- character(0) err <- expect_error( expect_match(x, 'asdf'), class = "expectation_failure" ) expect_match(err$message, 'is empty') }) test_that("prints multiple unmatched values", { err <- expect_error( expect_match(letters[1:10], 'asdf'), class = "expectation_failure" ) expect_match(err$message, "does not match") }) test_that("expect_no_match works", { expect_success(expect_no_match("[a]", "[b]")) expect_success(expect_no_match("[a]", "[b]", fixed = TRUE)) expect_failure(expect_no_match("te*st", "e*", fixed = TRUE), escape_regex("te*st")) expect_failure(expect_no_match("test", "TEST", ignore.case = TRUE), "test") }) testthat/tests/testthat/test-parallel/0000755000176200001440000000000014172362302017665 5ustar liggesuserstestthat/tests/testthat/test-parallel/ok/0000755000176200001440000000000014164710003020272 5ustar liggesuserstestthat/tests/testthat/test-parallel/ok/NAMESPACE0000644000176200001440000000005614164710003021512 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/ok/DESCRIPTION0000644000176200001440000000110414164710003021774 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/ok/tests/0000755000176200001440000000000014164710003021434 5ustar liggesuserstestthat/tests/testthat/test-parallel/ok/tests/testthat/0000755000176200001440000000000014172362302023300 5ustar liggesuserstestthat/tests/testthat/test-parallel/ok/tests/testthat/test-ok-3.R0000644000176200001440000000010414164710003025140 0ustar liggesuserstest_that("this skips", { skip(paste("This is", Sys.getpid())) }) testthat/tests/testthat/test-parallel/ok/tests/testthat/test-ok-1.R0000644000176200001440000000007014164710003025140 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/ok/tests/testthat/test-ok-2.R0000644000176200001440000000007614164710003025147 0ustar liggesuserstest_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/ok/tests/testthat.R0000644000176200001440000000006014164710003023413 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/startup/0000755000176200001440000000000014164710003021363 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/NAMESPACE0000644000176200001440000000005614164710003022603 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/startup/DESCRIPTION0000644000176200001440000000110414164710003023065 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/startup/tests/0000755000176200001440000000000014164710003022525 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/tests/testthat/0000755000176200001440000000000014172362302024371 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/tests/testthat/test-startup-1.R0000644000176200001440000000007014164710003027322 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/startup/tests/testthat.R0000644000176200001440000000006014164710003024504 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/startup/R/0000755000176200001440000000000014164710003021564 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/R/fail.R0000644000176200001440000000013514164710003022621 0ustar liggesusers .onLoad <- function(libname, pkgname) { stop("This will fail when loading the package") } testthat/tests/testthat/test-parallel/teardown/0000755000176200001440000000000014164710003021504 5ustar liggesuserstestthat/tests/testthat/test-parallel/teardown/NAMESPACE0000644000176200001440000000005614164710003022724 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/teardown/DESCRIPTION0000644000176200001440000000111214164710003023205 0ustar liggesusersPackage: teardown Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/teardown/tests/0000755000176200001440000000000014164710003022646 5ustar liggesuserstestthat/tests/testthat/test-parallel/teardown/tests/testthat/0000755000176200001440000000000014172362302024512 5ustar liggesuserstestthat/tests/testthat/test-parallel/teardown/tests/testthat/test-teardown-1.R0000644000176200001440000000007014164710003027564 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/teardown/tests/testthat/teardown-bad.R0000644000176200001440000000003314164710003027174 0ustar liggesusers stop("Error in teardown") testthat/tests/testthat/test-parallel/teardown/tests/testthat.R0000644000176200001440000000006014164710003024625 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/setup/0000755000176200001440000000000014164710003021021 5ustar liggesuserstestthat/tests/testthat/test-parallel/setup/NAMESPACE0000644000176200001440000000005614164710003022241 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/setup/DESCRIPTION0000644000176200001440000000110714164710003022526 0ustar liggesusersPackage: setup Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/setup/tests/0000755000176200001440000000000014164710003022163 5ustar liggesuserstestthat/tests/testthat/test-parallel/setup/tests/testthat/0000755000176200001440000000000014172362302024027 5ustar liggesuserstestthat/tests/testthat/test-parallel/setup/tests/testthat/test-setup-1.R0000644000176200001440000000007014164710003026416 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/setup/tests/testthat/setup-bad.R0000644000176200001440000000003014164710003026023 0ustar liggesusers stop("Error in setup") testthat/tests/testthat/test-parallel/setup/tests/testthat/test-setup-2.R0000644000176200001440000000007614164710003026425 0ustar liggesuserstest_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/setup/tests/testthat/test-setup-3.R0000644000176200001440000000010414164710003026416 0ustar liggesuserstest_that("this skips", { skip(paste("This is", Sys.getpid())) }) testthat/tests/testthat/test-parallel/setup/tests/testthat.R0000644000176200001440000000006014164710003024142 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/outside/0000755000176200001440000000000014164710003021335 5ustar liggesuserstestthat/tests/testthat/test-parallel/outside/NAMESPACE0000644000176200001440000000005614164710003022555 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/outside/DESCRIPTION0000644000176200001440000000111114164710003023035 0ustar liggesusersPackage: outside Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/outside/tests/0000755000176200001440000000000014164710003022477 5ustar liggesuserstestthat/tests/testthat/test-parallel/outside/tests/testthat/0000755000176200001440000000000014172362302024343 5ustar liggesuserstestthat/tests/testthat/test-parallel/outside/tests/testthat/test-outside-2.R0000644000176200001440000000014514164710003027252 0ustar liggesusers stop("Error outside of test_that()") test_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/outside/tests/testthat/test-outside-3.R0000644000176200001440000000010414164710003027246 0ustar liggesuserstest_that("this skips", { skip(paste("This is", Sys.getpid())) }) testthat/tests/testthat/test-parallel/outside/tests/testthat/test-outside-1.R0000644000176200001440000000007014164710003027246 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/outside/tests/testthat.R0000644000176200001440000000006014164710003024456 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/crash/0000755000176200001440000000000014172347717021002 5ustar liggesuserstestthat/tests/testthat/test-parallel/crash/NAMESPACE0000644000176200001440000000005614164710003022201 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/crash/DESCRIPTION0000644000176200001440000000110414164710003022463 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/crash/tests/0000755000176200001440000000000014164710003022123 5ustar liggesuserstestthat/tests/testthat/test-parallel/crash/tests/testthat/0000755000176200001440000000000014172362302023767 5ustar liggesuserstestthat/tests/testthat/test-parallel/crash/tests/testthat/test-crash-2.R0000644000176200001440000000007614164710003026325 0ustar liggesuserstest_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/crash/tests/testthat/test-crash-3.R0000644000176200001440000000014714164710003026325 0ustar liggesuserstest_that("this crashes", { expect_true(TRUE) expect_true(FALSE) asNamespace("callr")$crash() }) testthat/tests/testthat/test-parallel/crash/tests/testthat/test-crash-1.R0000644000176200001440000000007014164710003026316 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/crash/tests/testthat.R0000644000176200001440000000006014164710003024102 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-test-that.R0000644000176200001440000001065014164710003020127 0ustar liggesuserstest_that("can't access variables from other tests (1)", { a <- 10 expect_true(TRUE) }) test_that("can't access variables from other tests (2)", { expect_false(exists("a")) }) test_that("messages are suppressed", { local_edition(2) message("YOU SHOULDN'T SEE ME") succeed() }) test_that("errors are captured", { f <- function() g() g <- function() stop("I made a mistake", call. = FALSE) reporter <- with_reporter("silent", { test_that("", { f() } ) }) expect_equal(length(reporter$expectations()), 1) }) test_that("errors captured even when looking for messages", { reporter <- with_reporter("silent", { test_that("", { expect_message(stop("a")) } ) }) expect_equal(length(reporter$expectations()), 1) expect_true(expectation_error(reporter$expectations()[[1L]])) }) test_that("errors captured even when looking for warnings", { reporter <- with_reporter("silent", { test_that("", { expect_warning(stop()) } ) }) expect_equal(length(reporter$expectations()), 1) expect_true(expectation_error(reporter$expectations()[[1L]])) }) test_that("failures are errors", { f <- function() { expect_true(FALSE) expect_false(TRUE) } expect_error(f(), "is not TRUE", class = "expectation_failure") }) test_that("infinite recursion is captured", { f <- function() f() reporter <- with_reporter("silent", { withr::with_options( list(expressions = sys.nframe() + 100), test_that("", { f() }) ) }) expect_equal(length(reporter$expectations()), 1) }) test_that("return value from test_that", { with_reporter("", success <- test_that("success", { succeed() } )) expect_true(success) with_reporter("", success <- test_that("success", { expect(TRUE, "Yes!") })) expect_true(success) with_reporter("", error <- test_that("error", { barf } )) expect_false(error) with_reporter("", failure <- test_that("failure", { expect_true(FALSE) } )) expect_false(failure) with_reporter("", failure <- test_that("failure", { fail() } )) expect_false(failure) with_reporter("", success <- test_that("failure", { expect(FALSE, "No!") } )) expect_false(success) with_reporter("", skip <- test_that("skip", { skip("skipping") } )) expect_false(skip) # No tests = automatically generated skip with_reporter("", skip <- test_that("success", {})) expect_false(skip) }) # Line numbering ---------------------------------------------------------- expectation_lines <- function(code) { srcref <- attr(substitute(code), "srcref") if (!is.list(srcref)) { stop("code doesn't have srcref", call. = FALSE) } results <- with_reporter("silent", code)$expectations() unlist(lapply(results, function(x) x$srcref[1])) - srcref[[1]][1] } test_that("line numbers captured in simple case", { lines <- expectation_lines({ # line 1 test_that("simple", { # line 2 expect_true(FALSE) # line 3 }) # line 4 }) expect_equal(lines, 3) }) test_that("line numbers captured inside another function", { lines <- expectation_lines({ test_that("simple", { # line 1 suppressMessages(expect_true(FALSE)) # line 2 }) }) expect_equal(lines, 2) }) test_that("line numbers captured inside a loop", { lines <- expectation_lines({ test_that("simple", { # line 1 for (i in 1:4) expect_true(TRUE) # line 2 }) }) expect_equal(lines, rep(2, 4)) }) test_that("line numbers captured for skip()s", { lines <- expectation_lines({ test_that("simple", { # line 1 skip("Not this time") # line 2 }) # line 3 }) expect_equal(lines, 2) }) test_that("line numbers captured for stop()s", { lines <- expectation_lines({ test_that("simple", { # line 1 skip("Not this time") # line 2 }) # line 3 }) expect_equal(lines, 2) }) test_that("can signal warnings and messages without restart", { expect_null(signalCondition(message_cnd("foo"))) return("Skipping following test because it verbosely registers the warning") expect_null(signalCondition(warning_cnd("foo"))) }) test_that("braces required in testthat 3e", { local_edition(3) expect_warning( test_that("", expect_true(TRUE)) ) }) test_that("no braces required in testthat 2e", { local_edition(2) expect_warning( test_that("", expect_true(TRUE)), NA ) }) testthat/tests/testthat/test-snapshot-reporter.R0000644000176200001440000001263514166627056021737 0ustar liggesusers test_that("can establish local snapshotter for testing", { snapper <- local_snapshotter() snapper$start_file("snapshot-1", "test") expect_true(snapper$is_active()) expect_equal(snapper$file, "snapshot-1") expect_equal(snapper$test, "test") }) test_that("basic workflow", { path <- withr::local_tempdir() snapper <- local_snapshotter(path) snapper$start_file("snapshot-2") # output if not active (because test not set here) expect_message(expect_snapshot_output("x"), "Can't compare") # warns on first creation snapper$start_file("snapshot-2", "test") expect_warning(expect_snapshot_output("x"), "Adding new") snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-2.md"))) expect_false(file.exists(file.path(path, "snapshot-2.new.md"))) # succeeds if unchanged snapper$start_file("snapshot-2", "test") expect_success(expect_snapshot_output("x")) snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-2.md"))) expect_false(file.exists(file.path(path, "snapshot-2.new.md"))) # fails if changed snapper$start_file("snapshot-2", "test") expect_failure(expect_snapshot_output("y")) snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-2.md"))) expect_true(file.exists(file.path(path, "snapshot-2.new.md"))) }) test_that("only create new files for changed variants", { snapper <- local_snapshotter() snapper$start_file("variants", "test") expect_warning(expect_snapshot_output("x"), "Adding new") expect_warning(expect_snapshot_output("x", variant = "a"), "Adding new") expect_warning(expect_snapshot_output("x", variant = "b"), "Adding new") snapper$end_file() expect_setequal( snapper$snap_files(), c("variants.md", "a/variants.md", "b/variants.md") ) # failure in default snapper$start_file("variants", "test") expect_failure(expect_snapshot_output("y")) expect_success(expect_snapshot_output("x", variant = "a")) expect_success(expect_snapshot_output("x", variant = "b")) snapper$end_file() expect_setequal( snapper$snap_files(), c("variants.md", "variants.new.md", "a/variants.md", "b/variants.md") ) unlink(file.path(snapper$snap_dir, "variants.new.md")) # failure in variant snapper$start_file("variants", "test") expect_success(expect_snapshot_output("x")) expect_success(expect_snapshot_output("x", variant = "a")) expect_failure(expect_snapshot_output("y", variant = "b")) snapper$end_file() expect_setequal( snapper$snap_files(), c("variants.md", "a/variants.md", "b/variants.md", "b/variants.new.md") ) }) test_that("only reverting change in variant deletes .new", { snapper <- local_snapshotter() snapper$start_file("v", "test") expect_warning(expect_snapshot_output("x", variant = "a"), "Adding new") expect_warning(expect_snapshot_output("x", variant = "b"), "Adding new") snapper$end_file() expect_setequal(snapper$snap_files(), c("a/v.md", "b/v.md")) # failure snapper$start_file("v", "test") expect_failure(expect_snapshot_output("y", variant = "a")) snapper$end_file() expect_setequal(snapper$snap_files(), c("a/v.md", "b/v.md", "a/v.new.md")) # success snapper$start_file("v", "test") expect_success(expect_snapshot_output("x", variant = "a")) snapper$end_file() expect_setequal(snapper$snap_files(), c("a/v.md", "b/v.md")) }) test_that("removing tests removes snap file", { path <- withr::local_tempdir() snapper <- local_snapshotter(path) snapper$start_file("snapshot-3", "test") expect_warning(expect_snapshot_output("x"), "Adding new") snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-3.md"))) snapper$start_file("snapshot-3", "test") snapper$end_file() expect_false(file.exists(file.path(path, "snapshot-3.md"))) }) test_that("errors if can't roundtrip", { snapper <- local_snapshotter() snapper$start_file("snapshot-4", "test") expect_error(expect_snapshot_value(NULL), "not symmetric") }) test_that("errors in test doesn't change snapshot", { snapper <- local_snapshotter() # First run snapper$start_file("snapshot-5", "test") expect_warning(expect_snapshot_output("x"), "Adding new") snapper$end_file() # Second run has error snapper$start_file("snapshot-5", "test") snapper$add_result(NULL, NULL, as.expectation(simpleError("error"))) snapper$end_file() # Third run snapper$start_file("snapshot-5", "test") expect_warning(expect_snapshot_output("x"), NA) snapper$end_file() # No warning if snapshot already happened snapper$start_file("snapshot-5", "test") expect_snapshot_output("x") expect_warning( snapper$add_result(NULL, NULL, as.expectation(simpleError("error"))), NA ) snapper$end_file() }) test_that("skips and unexpected errors reset snapshots", { regenerate <- FALSE if (regenerate) { withr::local_envvar(c(TESTTHAT_REGENERATE_SNAPS = "true")) } catch_cnd( test_file( test_path("test-snapshot", "test-snapshot.R"), reporter = NULL ) ) path <- "test-snapshot/_snaps/snapshot.md" stopifnot(file.exists(path)) snaps <- snap_from_md(brio::read_lines(path)) titles <- c("errors reset snapshots", "skips reset snapshots") expect_true(all(titles %in% names(snaps))) }) test_that("`expect_error()` can fail inside `expect_snapshot()`", { out <- test_file( test_path("test-snapshot", "test-expect-condition.R"), reporter = NULL ) err <- out[[1]]$results[[1]] expect_match(err$message, "did not throw the expected error") }) testthat/tests/testthat/test-verify-conditions-lines.txt0000644000176200001440000000032714172347327023431 0ustar liggesusers> message("First.\nSecond.") Message: First. Second. > warning("First.\nSecond.") Warning in eval(expr, envir, enclos): First. Second. > stop("First.\nSecond.") Error in eval(expr, envir, enclos): First. Second. testthat/tests/testthat/test-old-school.R0000644000176200001440000000301214164710003020247 0ustar liggesusers test_that("old school logical works", { local_edition(2L) expect_warning( expect_success(expect_that(TRUE, is_true())), "deprecated") expect_warning( expect_success(expect_that(FALSE, is_false())), "deprecated") }) test_that("old school types still work", { local_edition(2L) expect_success(expect_that(1L, is_a("integer"))) }) test_that("tidyverse conflicts throw warnings", { local_edition(2L) expect_warning( expect_that(NULL, is_null()), "deprecated" ) expect_warning( expect_that("te*st", matches("e*", fixed = TRUE)), "deprecated" ) expect_warning( expect_that("test", matches("TEST", ignore.case = TRUE)), "deprecated" ) }) test_that("old school names still work", { local_edition(2L) expect_success(expect_that("a", has_names(NULL))) }) test_that("old school comparisons still work", { local_edition(2L) expect_success(expect_that(10, is_less_than(11))) expect_failure(expect_that(10, is_more_than(11))) }) test_that("old school equality tests still work", { local_edition(2L) expect_success(expect_that(10, equals(10))) expect_success(expect_that(10, is_identical_to(10))) expect_success(expect_that(10, is_equivalent_to(10))) }) test_that("old school output tests still work", { local_edition(2L) expect_success(expect_that(stop("!"), throws_error())) expect_success(expect_that(warning("!"), gives_warning())) expect_success(expect_that(message("!"), shows_message())) expect_success(expect_that(print("!"), prints_text())) }) testthat/tests/testthat/test-verify-constructed-calls.txt0000644000176200001440000000015614172347327023601 0ustar liggesusers> expr(foo(!!c("bar", "baz"))) foo(c("bar", "baz")) > binding <- quote(foo) > expr(foo(!!binding)) foo(foo) testthat/tests/testthat/test-examples.R0000644000176200001440000000047614164710003020035 0ustar liggesuserstest_that("test_examples works with installed packages", { local_edition(2) local_mock(test_rd = identity) expect_true(length(test_examples()) > 1) }) test_that("test_examples fails if no examples", { withr::local_envvar(TESTTHAT_PKG = "") expect_error(test_examples("asdf"), "Could not find examples") }) testthat/tests/testthat/test-reporter-progress.R0000644000176200001440000000412714164710003021720 0ustar liggesuserstest_that("captures error before first test", { local_output_override() expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/error-setup.R") ) }) test_that("gracefully handles multiple contexts", { expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/context.R") ) }) test_that("can control max fails with env var or option", { withr::local_envvar(TESTTHAT_MAX_FAILS = 11) expect_equal(testthat_max_fails(), 11) withr::local_options(testthat.progress.max_fails = 12) expect_equal(testthat_max_fails(), 12) }) test_that("fails after max_fail tests", { withr::local_options(testthat.progress.max_fails = 10) expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path(c("reporters/fail-many.R", "reporters/fail.R")) ) }) test_that("can fully suppress incremental updates", { expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/successes.R") ) expect_snapshot_reporter( ProgressReporter$new(update_interval = Inf, min_time = Inf), test_path("reporters/successes.R") ) }) test_that("reports backtraces", { expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/backtraces.R") ) }) test_that("records skips", { expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/skips.R") ) }) # compact display --------------------------------------------------------- test_that("compact display is informative", { expect_snapshot_reporter( CompactProgressReporter$new(), test_path("reporters/tests.R") ) }) test_that("display of successes only is compact", { expect_snapshot_reporter( CompactProgressReporter$new(), test_path("reporters/successes.R") ) # And even more compact if in RStudio pane expect_snapshot_reporter( CompactProgressReporter$new(rstudio = TRUE), test_path("reporters/successes.R") ) }) testthat/tests/testthat/test-catch.R0000644000176200001440000000047214164710003017275 0ustar liggesuserstest_that("get_routine() finds own 'run_testthat_tests'", { routine <- get_routine("testthat", "run_testthat_tests") expect_s3_class(routine, "NativeSymbolInfo") }) test_that("get_routine() fails when no routine exists", { expect_error(get_routine("utils", "no_such_routine")) }) run_cpp_tests("testthat") testthat/tests/testthat/test-source.R0000644000176200001440000000226713726505644017537 0ustar liggesuserstest_that("source_file always uses UTF-8 encoding", { has_locale <- function(l) { has <- TRUE tryCatch( withr::with_locale(c(LC_CTYPE = l), "foobar"), warning = function(w) has <<- FALSE, error = function(e) has <<- FALSE ) has } ## Some text in UTF-8 tmp <- tempfile() on.exit(unlink(tmp), add = TRUE) utf8 <- as.raw(c( 0xc3, 0xa1, 0x72, 0x76, 0xc3, 0xad, 0x7a, 0x74, 0xc5, 0xb1, 0x72, 0xc5, 0x91, 0x20, 0x74, 0xc3, 0xbc, 0x6b, 0xc3, 0xb6, 0x72, 0x66, 0xc3, 0xba, 0x72, 0xc3, 0xb3, 0x67, 0xc3, 0xa9, 0x70 )) writeBin(c(charToRaw("x <- \""), utf8, charToRaw("\"\n")), tmp) run_test <- function(locale) { if (has_locale(locale)) { env <- new.env() withr::with_locale( c(LC_CTYPE = locale), source_file(tmp, env = env, wrap = FALSE) ) expect_equal(Encoding(env$x), "UTF-8") expect_equal(charToRaw(env$x), utf8) } } ## Try to read it in latin1 and UTF-8 locales ## They have diffefent names on Unix and Windows run_test("en_US.ISO8859-1") run_test("en_US.UTF-8") run_test("English_United States.1252") run_test("German_Germany.1252") run_test(Sys.getlocale("LC_CTYPE")) }) testthat/tests/testthat/test-warning/0000755000176200001440000000000014172347327017550 5ustar liggesuserstestthat/tests/testthat/test-warning/test-warning.R0000644000176200001440000000012113450671736022311 0ustar liggesuserstest_that("warning emitted", { warning("This is not a test", call. = FALSE) }) testthat/tests/testthat/test-list-reporter/0000755000176200001440000000000014172347717020721 5ustar liggesuserstestthat/tests/testthat/test-list-reporter/test-exception-outside-tests.R0000644000176200001440000000034114164710003026606 0ustar liggesusers# the objective is to test what happens if some code fails outside of tests # i.e. not inside a test_that() call. test_that("before", expect_true(TRUE)) stop('dying outside of tests') test_that("after", expect_true(TRUE)) testthat/tests/testthat/test-list-reporter/test-bare-expectations.R0000644000176200001440000000017614164710003025421 0ustar liggesuserstest_that("before", expect_true(TRUE)) # this is a bare expectation expect_true(TRUE) test_that("after", expect_true(TRUE)) testthat/tests/testthat/test-list-reporter/test-exercise-list-reporter.R0000644000176200001440000000034414164710003026421 0ustar liggesuserstest_that("test1", expect_true(TRUE)) test_that("test2", expect_true(TRUE)) test_that("test-pass", expect_true(TRUE)) test_that("test-fail", expect_true(FALSE)) test_that("test-error", { stop('argh') expect_true(TRUE) }) testthat/tests/testthat/test-list-reporter/test-only-error.R0000644000176200001440000000003714164710003024110 0ustar liggesusersstop('dying outside of tests') testthat/tests/testthat/test-snapshot-file-snaps.R0000644000176200001440000000442314165635513022126 0ustar liggesuserstest_that("append manages current snapshot index", { path <- withr::local_tempdir() snaps <- FileSnaps$new(path, "file") i <- snaps$append("test1", "_default", 1) expect_equal(i, 1) i <- snaps$append("test1", "_default", 2) expect_equal(i, 2) i <- snaps$append("test1", "windows", 3) expect_equal(i, 1) i <- snaps$append("test1", "windows", 3) expect_equal(i, 2) i <- snaps$append("test2", "_default", 3) expect_equal(i, 1) }) test_that("can retrieve appended snaps", { snaps <- FileSnaps$new(withr::local_tempdir(), "file") snaps$append("test1", "_default", "1") snaps$append("test1", "_default", "2") snaps$append("test2", "_default", "3") snaps$append("test2", "windows", "4") expect_equal(snaps$get("test1", "_default", 2), "2") expect_equal(snaps$get("test2", "_default", 1), "3") expect_equal(snaps$get("test2", "windows", 1), "4") # Returns NULL if don't exist expect_equal(snaps$get("MISSING", "_default", 1), NULL) expect_equal(snaps$get("test1", "MISSING", 1), NULL) expect_equal(snaps$get("test1", "_default", 100), NULL) }) test_that("can reset snapshots", { snaps1 <- FileSnaps$new(withr::local_tempdir(), "file") snaps2 <- FileSnaps$new(withr::local_tempdir(), "file") snaps1$append("test1", "_default", "1") snaps1$append("test1", "_default", "2") snaps1$append("test2", "_default", "3") snaps2$append("test1", "_default", "4") snaps2$reset("test1", snaps1) expect_equal(snaps2$snaps$`_default`$test1, c("4", "2")) # And can copy complete snapshot snaps2$reset("test2", snaps1) expect_equal(snaps2$snaps$`_default`$test2, "3") # And shouldn't change if we reset again snaps2$reset("test1", snaps1) expect_equal(snaps2$snaps$`_default`$test1, c("4", "2")) }) test_that("can round trip cur to old snaps", { path <- withr::local_tempdir() cur <- FileSnaps$new(path, "file", "cur") cur$append("test1", "_default", "1") cur$append("test2", "_default", "2") cur$append("test2", "windows", "3") cur$write() old <- FileSnaps$new(path, "file", "old") expect_equal(cur$snaps, old$snaps) }) test_that("snaps delete default variant if no snaps", { snaps <- FileSnaps$new(withr::local_tempdir(), "file", "cur") brio::write_lines("x", snaps$path()) snaps$write() expect_false(file.exists(snaps$path())) }) testthat/tests/testthat/test-reporter-zzz.R0000644000176200001440000000110014164710003020675 0ustar liggesuserstest_that("can locate reporter from name", { expect_equal(find_reporter("minimal"), MinimalReporter$new()) expect_equal(find_reporter("summary"), SummaryReporter$new()) }) test_that("useful error message if can't find reporter", { expect_error( find_reporter(c("summary", "blah")), "Can not find test reporter blah" ) }) test_that("character vector yields multi reporter", { expect_equal( find_reporter(c("summary", "stop")), MultiReporter$new( reporters = list( SummaryReporter$new(), StopReporter$new() ) ) ) }) testthat/tests/testthat/test-expect_that.R0000644000176200001440000000015313450671736020537 0ustar liggesuserstest_that("expect_that returns the input value", { res <- expect_true(TRUE) expect_equal(res, TRUE) }) testthat/tests/testthat/test-make-expectation.R0000644000176200001440000000035214164710003021446 0ustar liggesuserstest_that("make_expectation returns and prints expectation", { x <- 1:5 out <- capture_output( expect_equal(make_expectation(x), bquote(expect_equal(x, .(1:5)))) ) expect_equal( out, "expect_equal(x, 1:5)" ) }) testthat/tests/testthat/test-expect-length.R0000644000176200001440000000110613701142746020766 0ustar liggesuserstest_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) test_that("uses S4 length method", { A <- setClass("ExpectLengthA", slots = c(x = "numeric", y = "numeric")) setMethod("length", "ExpectLengthA", function(x) 5L) expect_success(expect_length(A(x = 1:9, y = 3), 5)) }) test_that("returns input", { x <- list(1:10, letters) out <- expect_length(x, 2) expect_identical(out, x) }) testthat/tests/testthat/test-reporter-tap.R0000644000176200001440000000011714164710003020633 0ustar liggesuserstest_that("reporter works", { expect_snapshot_reporter(TapReporter$new()) }) testthat/tests/testthat/test-snapshot.R0000644000176200001440000001070614166627056020074 0ustar liggesuserstest_that("can access nickname", { expect_snapshot(version$nickname, variant = r_version()) }) test_that("can snapshot output", { foo <- function() cat("y") expect_snapshot_output(foo()) expect_snapshot_output(foo()) expect_snapshot_output(foo()) expect_snapshot_output(foo()) }) test_that("can snapshot everything", { f <- function() { print("1") message("2") warning("3") stop("4") } expect_snapshot(f(), error = TRUE) }) test_that("empty lines are preserved", { f <- function() { cat("1\n\n") message("2\n") warning("3\n") stop("4\n\n") } expect_snapshot(f(), error = TRUE) }) test_that("multiple outputs of same type are collapsed", { expect_snapshot({ x <- 1 y <- 1 {message("a"); message("b")} {warning("a"); warning("b")} }) }) test_that("can scrub output/messages/warnings/errors", { secret <- function() { print("secret") message("secret") warning("secret") stop("secret") } redact <- function(x) gsub("secret", "", x) expect_snapshot(secret(), transform = redact, error = TRUE) # Or with an inline fun expect_snapshot(print("secret"), transform = ~ gsub("secret", "****", .x)) }) test_that("always checks error status", { expect_error(expect_snapshot(stop("!"), error = FALSE)) expect_failure(expect_snapshot(print("!"), error = TRUE)) }) test_that("can capture error/warning messages", { expect_snapshot_error(stop("This is an error")) expect_snapshot_warning(warning("This is a warning")) }) test_that("can check error/warning classes", { expect_snapshot(expect_snapshot_error(1), error = TRUE) expect_snapshot(expect_snapshot_error(1, class = "myerror"), error = TRUE) expect_snapshot(expect_snapshot_warning(1), error = TRUE) expect_snapshot(expect_snapshot_warning(1, class = "mywarning"), error = TRUE) }) test_that("snapshot handles multi-line input", { expect_snapshot({ 1 + 2 3 + 4 "this is a comment" }) }) test_that("snapshot captures output if visible", { f_visible <- function() "x" f_invisible <- function() invisible("x") expect_snapshot(f_visible()) expect_snapshot(f_invisible()) }) test_that("captures custom classes", { f <- function() { inform("Hello", class = "testthat_greeting") warn("Goodbye", class = "testthat_farewell") abort("Eeek!", class = "testthat_scream") } expect_snapshot(f(), error = TRUE) }) test_that("even with multiple lines", { expect_snapshot_output(cat("a\nb\nc")) expect_snapshot_output(cat("a\nb\nc\n")) }) test_that("can snapshot values", { x <- list("a", 1.5, 1L, TRUE) expect_snapshot_value(x, style = "json") expect_snapshot_value(x, style = "json2") expect_snapshot_value(x, style = "deparse") expect_snapshot_value(x, style = "serialize") }) test_that("can control snapshot value details", { expect_snapshot_value(1.2, tolerance = 0.1) }) test_that("tolerance passed to check_roundtrip", { expect_snapshot_value(0.900000000000001, style = "json") }) test_that("reparse handles common cases", { roundtrip <- function(x) reparse(deparse(x)) expect_equal(roundtrip(-1), -1) expect_equal(roundtrip(c(1, 2, 3)), c(1, 2, 3)) expect_equal(roundtrip(list(1, 2, 3)), list(1, 2, 3)) expect_equal(roundtrip(mtcars), mtcars) f <- function(x) x + 1 expect_equal(roundtrip(f), f, ignore_function_env = TRUE) }) test_that("`expect_snapshot()` does not inject", { expect_snapshot({ x <- quote(!!foo) expect_equal(x, call("!", call("!", quote(foo)))) }) }) test_that("full condition message is printed with rlang", { local_use_rlang_1_0() expect_snapshot( error = TRUE, variant = rlang_version(), { foo <- error_cnd("foo", message = "Title parent.") abort("Title.", parent = foo) } ) }) test_that("can print with and without condition classes", { local_use_rlang_1_0() f <- function() { message("foo") warning("bar") stop("baz") } expect_snapshot( error = TRUE, cnd_class = TRUE, variant = rlang_version(), f() ) expect_snapshot( error = TRUE, cnd_class = FALSE, variant = rlang_version(), f() ) }) test_that("errors and warnings are folded", { local_use_rlang_1_0() f <- function() { warning("foo") stop("bar") } expect_snapshot( error = TRUE, variant = rlang_version(), f() ) }) test_that("hint is informative", { expect_snapshot({ cat(snapshot_accept_hint("_default", "bar.R")) cat(snapshot_accept_hint("foo", "bar.R")) }) }) testthat/tests/testthat/test-error/0000755000176200001440000000000014172347717017237 5ustar liggesuserstestthat/tests/testthat/test-error/test-error.R0000644000176200001440000000006713450671736021472 0ustar liggesuserstest_that("should fail", { expect_equal(1 + 1, 3) }) testthat/tests/testthat/test-helpers.R0000644000176200001440000000013713450671736017673 0ustar liggesusers# See helper-assign.R test_that("helpers run before tests", { expect_equal(abcdefghi, 10) }) testthat/tests/testthat/test-verify-conditions-cr.txt0000644000176200001440000000002014172347327022711 0ustar liggesusers> cat("\r\n") testthat/tests/testthat/test-watcher.R0000644000176200001440000000476214164710003017656 0ustar liggesuserstest_that("compare state works correctly", { loc <- tempfile("watcher") dir.create(loc) empty <- dir_state(loc) expect_equal(length(empty), 0) file.create(file.path(loc, "test-1.txt")) one <- dir_state(loc) expect_equal(length(one), 1) expect_equal(basename(names(one)), "test-1.txt") diff <- compare_state(empty, one) expect_equal(diff$n, 1) expect_equal(basename(diff$added), "test-1.txt") write.table(mtcars, file.path(loc, "test-1.txt")) diff <- compare_state(one, dir_state(loc)) expect_equal(diff$n, 1) expect_equal(basename(diff$modified), "test-1.txt") file.rename(file.path(loc, "test-1.txt"), file.path(loc, "test-2.txt")) diff <- compare_state(one, dir_state(loc)) expect_equal(diff$n, 2) expect_equal(basename(diff$deleted), "test-1.txt") expect_equal(basename(diff$added), "test-2.txt") diff <- compare_state( c(file1 = "62da2", file2 = "e14a6", file3 = "6e6dd"), c(file1 = "62da2", file2 = "e14a6", file21 = "532fa", file3 = "3f4sa") ) expect_equal(diff$n, 2) expect_equal(basename(diff$added), "file21") expect_equal(basename(diff$modified), "file3") }) test_that("watcher works correctly", { skip_on_ci() skip_on_os("windows") skip_on_cran() if (Sys.which("bash") == "") { skip("bash not available") } if (system("bash -c 'which touch'", ignore.stdout = TRUE) != 0L) { skip("touch (or which) not available") } loc <- tempfile("watcher") dir.create(loc) code_path <- file.path(loc, "R") test_path <- file.path(loc, "tests") dir.create(code_path) dir.create(test_path) delayed.bash.cmd <- function(command) { system(paste0("bash -c 'sleep 1;", command, "'"), wait = FALSE) } add.code.file <- function(file.name) { delayed.bash.cmd(paste0("touch ", file.path(code_path, file.name))) } remove.code.file <- function(file.name) { delayed.bash.cmd(paste0("rm ", file.path(code_path, file.name))) } test.added <- function(added, deleted, modified) { expect_equal(length(added), 1) expect_equal(grepl("test1.R", added), TRUE) expect_equal(length(deleted), 0) expect_equal(length(modified), 0) FALSE } test.removed <- function(added, deleted, modified) { expect_equal(length(added), 0) expect_equal(length(deleted), 1) expect_equal(grepl("test1.R", deleted), TRUE) expect_equal(length(modified), 0) FALSE } add.code.file("test1.R") watch(c(code_path, test_path), test.added) remove.code.file("test1.R") watch(c(code_path, test_path), test.removed) }) testthat/tests/testthat/test-label.R0000644000176200001440000000046413450671736017313 0ustar liggesuserstest_that("labelling compound {} expression gives single string", { out <- expr_label(quote({ 1 + 2 })) expect_length(out, 1) expect_type(out, "character") }) test_that("can label multiline functions", { expect_equal( expr_label(quote(function(x, y) {})), "function(x, y) ..." ) }) testthat/tests/testthat/test-evaluate-promise.R0000644000176200001440000000166613450671736021523 0ustar liggesuserstest_that("captures warnings, messages and output", { out <- evaluate_promise({ message("m", appendLF = FALSE) warning("w") cat("out") }) expect_equal(out$output, "out") expect_equal(out$messages, "m") expect_equal(out$warnings, "w") }) test_that("capture_warnings captures warnings", { out <- capture_warnings({ warning("a") warning("b") }) expect_equal(out, c("a", "b")) }) test_that("capture_messages captures messages", { out <- capture_messages({ message("a") message("b") }) expect_equal(out, c("a\n", "b\n")) # message adds LF by default }) test_that("capture output captures output", { out1 <- capture_output(print(1:5)) out2 <- capture_output(1:5, print = TRUE) expect_equal(out1, "[1] 1 2 3 4 5") expect_equal(out2, "[1] 1 2 3 4 5") }) test_that("capture output doesn't print invisible things", { out <- capture_output(invisible(1), print = TRUE) expect_equal(out, "") }) testthat/tests/testthat/setup.R0000644000176200001440000000037214165635513016412 0ustar liggesuserswriteLines( "If you see me, something has gone wrong with old-school teardown", "DELETE-ME" ) writeLines( "If you see me, something has gone wrong with new-school teardown", "DELETE-ME-2" ) withr::defer(unlink("DELETE-ME-2"), teardown_env()) testthat/tests/testthat/test-path-missing/0000755000176200001440000000000013024664617020505 5ustar liggesuserstestthat/tests/testthat/test-path-missing/empty0000644000176200001440000000000013024664617021554 0ustar liggesuserstestthat/tests/testthat/test-reporter-silent.R0000644000176200001440000000030514164710003021344 0ustar liggesuserstest_that("captures expectations; doesn't produce any output", { reporter <- SilentReporter$new() expect_snapshot_reporter(reporter) expect_snapshot_value(length(reporter$expectations())) }) testthat/tests/testthat/test-parallel.R0000644000176200001440000000177714164710003020020 0ustar liggesusers test_that("detect number of cpus to use", { withr::local_options(Ncpus = 100L) withr::local_envvar(TESTTHAT_CPUS = NA) expect_equal(default_num_cpus(), 100L) withr::local_options(Ncpus = 100L) withr::local_envvar(TESTTHAT_CPUS = 10) expect_equal(default_num_cpus(), 100L) withr::local_options(list(Ncpus = NULL)) withr::local_envvar(TESTTHAT_CPUS = NA) expect_equal(default_num_cpus(), 2L) withr::local_options(list(Ncpus = NULL)) withr::local_envvar(TESTTHAT_CPUS = NA) expect_equal(default_num_cpus(), 2L) withr::local_options(list(Ncpus = NULL)) withr::local_envvar(TESTTHAT_CPUS = 13) expect_equal(default_num_cpus(), 13L) }) test_that("ok", { withr::local_envvar(c(TESTTHAT_PARALLEL = "TRUE")) suppressMessages(ret <- test_local( test_path("test-parallel", "ok"), reporter = "silent", stop_on_failure = FALSE )) tdf <- as.data.frame(ret) tdf <- tdf[order(tdf$file), ] expect_equal(tdf$failed, c(0,1,0)) expect_equal(tdf$skipped, c(FALSE, FALSE, TRUE)) }) testthat/tests/testthat/test-reporter-list.R0000644000176200001440000000526114164710003021027 0ustar liggesusers # regression test: test_file() used to crash with a NULL reporter test_that("ListReporter with test_file and NULL reporter", { test_file_path <- 'test-list-reporter/test-exercise-list-reporter.R' expect_error(test_file(test_path(test_file_path), reporter = NULL), NA) }) # regression: check that an exception is reported if it is raised in the test file outside # of a test (test_that() call). # N.B: the exception here happens between two tests: "before" and "after" test_that("ListReporter - exception outside of test_that()", { test_file_path <- 'test-list-reporter/test-exception-outside-tests.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_true(is.list(res)) # 2 results: first test "before" + the exception. N.B: the 2nd test "after" is not reported expect_length(res, 2) df <- as.data.frame(res) # the first result should be the results of test "before", that was successful expect_identical(df$test[1], 'before') expect_equal(df$passed[1], 1) expect_false(df$error[1]) # the 2nd result should be the exception expect_true(is.na(df$test[2])) # no test name expect_true(df$error[2]) # it was an error expect_match(res[[2]]$results[[1]]$message, "dying outside of tests") }) test_that("captures error if only thing in file", { test_file_path <- 'test-list-reporter/test-only-error.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_length(res, 1) expect_s3_class(res[[1]]$results[[1]], "expectation_error") }) # ListReporter on a "standard" test file: 2 contexts, passing, failing and crashing tests test_that("exercise ListReporter", { test_file_path <- 'test-list-reporter/test-exercise-list-reporter.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_s3_class(res, "testthat_results") # we convert the results to data frame for convenience df <- as.data.frame(res) expect_equal(nrow(df), 5) expect_equal(df$test, c("test1", "test2", "test-pass", "test-fail", "test-error")) # test "A failing test" is the only failing test expect_equal(df$failed, c(0, 0, 0, 1, 0)) expect_identical(expectation_type(res[[4]]$results[[1]]), "failure") # test "A crashing test" is the only crashing test expect_equal(df$error, c(FALSE, FALSE, FALSE, FALSE, TRUE)) expect_identical(expectation_type(res[[5]]$results[[1]]), "error") }) # bare expectations are ignored test_that("ListReporter and bare expectations", { test_file_path <- 'test-list-reporter/test-bare-expectations.R' res <- test_file(test_path(test_file_path), reporter = NULL) df <- as.data.frame(res) # 2 tests, "before" and "after". no result for the bare expectation expect_identical(df$test, c("before", "after")) }) testthat/tests/testthat/test-teardown.R0000644000176200001440000000130414164710003020031 0ustar liggesuserstest_that("teardown adds to queue", { local_edition(2) on.exit(teardown_reset()) expect_length(file_teardown_env$queue, 0) teardown({}) expect_length(file_teardown_env$queue, 1) teardown({}) expect_length(file_teardown_env$queue, 2) }) test_that("teardowns runs in order", { local_edition(2) on.exit(teardown_reset()) a <- 1 teardown(a <<- 2) teardown(a <<- 3) expect_length(file_teardown_env$queue, 2) teardown_run() expect_equal(a, 3) expect_length(file_teardown_env$queue, 0) }) test_that("teardown run after tests complete", { test_file(test_path("test-teardown/test-teardown.R"), "silent") expect_false(file.exists(test_path("test-teardown/teardown.txt"))) }) testthat/tests/testthat/test-expect-setequal.R0000644000176200001440000000446114164710003021326 0ustar liggesusers# setequal ---------------------------------------------------------------- test_that("ignores order and duplicates", { expect_success(expect_setequal(letters, rev(letters))) expect_success(expect_setequal(c("a", "a", "b"), c("b", "b", "a"))) }) test_that("checks both directions of containment", { expect_failure(expect_setequal(letters, letters[-1])) expect_failure(expect_setequal(letters[-1], letters)) }) test_that("truncates long differences", { cnd <- catch_cnd(expect_setequal("a", letters)) expect_match(cnd$message, "...") }) test_that("warns if both inputs are named", { expect_warning(expect_setequal(c(a = 1), c(b = 1)), "ignores names") }) test_that("error for non-vectors", { expect_error(expect_setequal(sum, sum), "be vectors") }) # mapequal ---------------------------------------------------------------- test_that("ignores order", { expect_success(expect_mapequal(list(a = 1, b = 2), list(b = 2, a = 1))) }) test_that("error if any names are duplicated", { expect_error(expect_mapequal(list(a = 1, b = 2, b = 3), list(b = 2, a = 1))) expect_error(expect_mapequal(list(a = 1, b = 2), list(b = 3, b = 2, a = 1))) expect_error(expect_mapequal(list(a = 1, b = 2, b = 3), list(b = 3, b = 2, a = 1))) }) test_that("handling NULLs", { expect_success(expect_mapequal(list(a = 1, b = NULL), list(b = NULL, a = 1))) }) test_that("fail if names don't match", { expect_failure(expect_mapequal(list(a = 1, b = 2), list(a = 1))) expect_failure(expect_mapequal(list(a = 1), list(a = 1, b = 2))) }) test_that("fails if values don't match", { expect_failure(expect_mapequal(list(a = 1, b = 2), list(a = 1, b = 3))) }) test_that("error for non-vectors", { expect_error(expect_mapequal(sum, sum), "be vectors") expect_error(expect_mapequal(NULL, NULL), "be vectors") }) test_that("error if any unnamed values", { expect_error(expect_mapequal(list(1, b = 2), list(1, b = 2))) expect_error(expect_mapequal(list(1, b = 2), list(b = 2, 1))) }) test_that("succeeds if comparing empty named and unnamed vectors", { x1 <- list() x2 <- setNames(list(), character()) expect_warning(expect_success(expect_mapequal(x1, x1))) expect_warning(expect_success(expect_mapequal(x1, x2))) expect_warning(expect_success(expect_mapequal(x2, x1))) expect_warning(expect_success(expect_mapequal(x2, x2))) }) testthat/tests/testthat/one.rds0000644000176200001440000000005413167170015016407 0ustar liggesusersb```b`fdd`b2Ctestthat/tests/testthat/test-expectation.R0000644000176200001440000000321214164710003020531 0ustar liggesuserstest_that("expectation contains failure message even when successful", { e <- expect(TRUE, "I failed") expect_equal(e$message, "I failed") }) test_that("expect warns if no `failure_message`", { expect_warning(expect(TRUE), "missing, with no default") }) test_that("info only evaluated on failure", { expect_error(expect(TRUE, "fail", info = stop("!")), NA) }) test_that("can subclass expectation", { exp <- new_expectation("failure", "didn't work", .subclass = "foo", bar = "baz") expect_true(inherits_all(exp, c("foo", "expectation_failure", "expectation", "error", "condition"))) expect_identical(attr(exp, "bar"), "baz") }) test_that("`expect()` and `exp_signal()` signal expectations", { expect_error(expect(TRUE, ""), regexp = NA) expect_error(expect(FALSE, ""), class = "expectation_failure") expect_error(exp_signal(new_expectation("success", "")), regexp = NA) expect_error(exp_signal(new_expectation("failure", "")), class = "expectation_failure") }) test_that("conditionMessage() is called during conversion", { local_bindings( conditionMessage.foobar = function(...) "dispatched", .env = global_env() ) wrn <- warning_cnd("foobar", message = "wrong") expect_identical(as.expectation(wrn)$message, "dispatched") err <- error_cnd("foobar", message = "wrong") expect_match(as.expectation(err)$message, "Error: dispatched") err <- cnd(c("foobar", "skip"), message = "wrong") expect_identical(as.expectation(err)$message, "dispatched") }) test_that("error message includes call", { f <- function() stop("Error!") cnd <- catch_cnd(f()) expect_equal(format(as.expectation(cnd)), "Error in `f()`: Error!") }) testthat/tests/testthat/too-many-failures.R0000644000176200001440000000044314164710003020607 0ustar liggesuserstest_that("SummaryReport gives up if too many errors", { expect_equal(Inf, 1) expect_equal(Inf, 2) expect_equal(Inf, 3) expect_equal(Inf, 4) expect_equal(Inf, 5) expect_equal(Inf, 6) expect_equal(Inf, 7) expect_equal(Inf, 8) expect_equal(Inf, 9) expect_equal(Inf, 10) }) testthat/tests/testthat/test-expect-invisible.R0000644000176200001440000000064614164710003021470 0ustar liggesuserstest_that("basically principles of visibilty hold", { expect_success(expect_invisible(x <- 10)) expect_failure(expect_invisible(x)) expect_success(expect_visible(x)) expect_failure(expect_visible(x <- 1)) }) test_that("invisibly returns evaluated value", { out <- expect_invisible(expect_invisible(x <- 2 + 2)) expect_equal(out, 4) out <- expect_invisible(expect_visible(2 + 2)) expect_equal(out, 4) }) testthat/tests/testthat/test-expect-known.R0000644000176200001440000000712514165635513020654 0ustar liggesuserslocal_edition(2) # expect_known_output ----------------------------------------------------- test_that("uses specified width", { old <- options(width = 20) on.exit(options(old), add = TRUE) x <- 1:100 expect_known_output(print(x), "width-80.txt") }) test_that("creates file on first run", { file <- tempfile() expect_success( expect_warning( expect_known_output(cat("ok!\n"), file), "Creating reference" ) ) expect_true(file.exists(file)) }) test_that("igores incomplete last line", { file <- tempfile() writeLines("Hi!", file) expect_success(expect_known_output(cat("Hi!"), file)) expect_success(expect_known_output(cat("Hi!\n"), file)) expect_failure(expect_known_output(cat("Hi!\n\n"), file)) expect_failure(expect_known_output(cat("oops"), file)) }) test_that("updates by default", { file <- tempfile() writeLines("Hi!", file) expect_failure(expect_known_output(cat("oops"), file, update = FALSE)) expect_equal(readLines(file), "Hi!") expect_failure(expect_known_output(cat("oops"), file, update = TRUE)) expect_success(expect_known_output(cat("oops"), file)) }) test_that("works with utf-8 output", { skip_on_cran() skip_on_os("windows") text <- c("\u00fc", "\u2a5d", "\u6211", "\u0438") expect_known_output(cat(text, sep = "\n"), "test-expect-known.txt") }) test_that("Warning for non-UTF-8 reference files", { x <- "\xe9\xe1\xed\xf6\xfc" Encoding(x) <- "latin1" tmp <- tempfile() on.exit(unlink(tmp), add = TRUE) writeBin(x, tmp) suppressWarnings( expect_failure( expect_known_output("foobar", tmp, update = FALSE) ) ) }) # expect_known_value ------------------------------------------------------ test_that("correctly matches to a file", { x <- 1 expect_success(expect_known_value(x, "one.rds")) x <- 2 expect_failure(expect_known_value(x, "one.rds", update = FALSE)) }) test_that("first run is successful", { expect_success( expect_warning( expect_known_value(2, "two.rds"), "Creating reference" ) ) unlink("two.rds") }) test_that("equal_to_ref does not overwrite existing", { tmp_rds <- tempfile(fileext=".rds") on.exit(unlink(tmp_rds)) ref_obj1 <- 1:3 ref_obj2 <- 2:4 saveRDS(ref_obj1, tmp_rds) expect_success(expect_equal_to_reference(ref_obj1, tmp_rds)) # Failure does not update object expect_failure(expect_equal_to_reference(ref_obj2, tmp_rds)) expect_equal(readRDS(tmp_rds), ref_obj1) # Now failure does update object expect_failure(expect_equal_to_reference(ref_obj2, tmp_rds, update=TRUE)) expect_success(expect_equal_to_reference(ref_obj2, tmp_rds)) }) test_that("serializes to version 2 by default", { skip_if(getRversion() < 3.5) tmp_rds <- tempfile(fileext = ".rds") on.exit(unlink(tmp_rds)) expect_warning( expect_known_value("a", tmp_rds), "Creating reference" ) expect_identical(tools:::get_serialization_version(tmp_rds)[[1]], 2L) }) test_that("version 3 is possible", { skip_if(getRversion() < 3.5) tmp_rds <- tempfile(fileext = ".rds") on.exit(unlink(tmp_rds)) expect_warning( expect_known_value("a", tmp_rds, version = 3), "Creating reference" ) expect_identical(tools:::get_serialization_version(tmp_rds)[[1]], 3L) }) # expect_known_hash ------------------------------------------------------- test_that("empty hash succeeds with warning", { expect_success( expect_warning( expect_known_hash(1:10), "No recorded hash" ) ) }) test_that("only succeeds if hash is correct", { expect_success(expect_known_hash(1:10, "c08951d2c2")) expect_failure(expect_known_hash(1:10, "c08951d2c3")) }) testthat/tests/testthat/teardown.R0000644000176200001440000000002413762224743017070 0ustar liggesusersunlink("DELETE-ME") testthat/tests/testthat/test-snapshot-file.R0000644000176200001440000001171614166627056021013 0ustar liggesuserstest_that("expect_snapshot_file works", { skip_if_not(getRversion() >= "3.6.0") expect_snapshot_file( write_tmp_lines(letters), "foo.r", compare = compare_file_text ) path <- tempfile() png(path, width = 300, height = 300, type = "cairo") plot(1:10, xlab = "", ylab = "", pch = 20, cex = 5, axes = FALSE) dev.off() expect_snapshot_file(path, "foo.png") path <- tempfile() mtcars2 <- mtcars # mtcars2$wt[10] <- NA write.csv(mtcars2, path) expect_snapshot_file( path, "foo.csv", compare = compare_file_text ) # Deprecated `binary` argument still works withr::local_options(lifecycle_verbosity = "quiet") expect_snapshot_file( path, "foo.csv", binary = FALSE ) }) test_that("expect_snapshot_file works in a different directory", { skip_if_not(getRversion() >= "3.6.0") path <- withr::local_tempdir() withr::local_dir(path) brio::write_lines("a", "a.txt", eol = "\r\n") # expect no warning expect_warning( expect_snapshot_file("a.txt"), regexp = NA ) }) test_that("expect_snapshot_file works with variant", { expect_snapshot_file( write_tmp_lines(version$nickname), "nickname.txt", compare = compare_file_text, variant = r_version() ) }) test_that("basic workflow", { snapper <- local_snapshotter() # warns on first run snapper$start_file("snapshot-6", "test") expect_warning(expect_snapshot_file(write_tmp_lines(letters), "letters.txt"), "Adding new") snapper$end_file() # succeeds if unchanged snapper$start_file("snapshot-6", "test") expect_success(expect_snapshot_file(write_tmp_lines(letters), "letters.txt")) snapper$end_file() # fails if changed snapper$start_file("snapshot-6", "test") expect_failure(expect_snapshot_file(write_tmp_lines(letters[-1]), "letters.txt")) snapper$end_file() }) test_that("can announce snapshot file", { snapper <- local_snapshotter() snapper$start_file("snapshot-announce", "test") announce_snapshot_file(name = "bar.svg") expect_equal(snapper$snap_file_seen, "snapshot-announce/bar.svg") }) test_that("can transform snapshot contents", { path <- local_tempfile1(c("secret", "ssh secret squirrel")) redact <- function(x) gsub("secret", "", x) expect_snapshot_file(path, "secret.txt", transform = redact) }) # snapshot_file_equal ----------------------------------------------------- test_that("warns on first creation", { path <- write_tmp_lines("a") withr::defer(unlink(file.path(tempdir(), "test.txt"))) # Warns on first run expect_warning( expect_true(snapshot_file_equal(tempdir(), "test.txt", path)), "new file snapshot" ) # Errors on non-existing file expect_error( expect_true(snapshot_file_equal(tempdir(), "test.txt", "doesnt-exist.txt")), "`doesnt-exist.txt` not found" ) # Unchanged returns TRUE expect_true(snapshot_file_equal(tempdir(), "test.txt", path)) expect_true(file.exists(file.path(tempdir(), "test.txt"))) expect_false(file.exists(file.path(tempdir(), "test.new.txt"))) # Changed returns FALSE path2 <- write_tmp_lines("b") expect_false(snapshot_file_equal(tempdir(), "test.txt", path2)) expect_true(file.exists(file.path(tempdir(), "test.txt"))) expect_true(file.exists(file.path(tempdir(), "test.new.txt"))) # Changing again overwrites path2 <- write_tmp_lines("c") expect_false(snapshot_file_equal(tempdir(), "test.txt", path2)) expect_equal(brio::read_lines(file.path(tempdir(), "test.new.txt")), "c") # Unchanged cleans up expect_true(snapshot_file_equal(tempdir(), "test.txt", path)) expect_true(file.exists(file.path(tempdir(), "test.txt"))) expect_false(file.exists(file.path(tempdir(), "test.new.txt"))) }) # helpers ----------------------------------------------------------------- test_that("text comparison ignores CR", { path1 <- write_tmp_lines(c("a", "b")) path2 <- write_tmp_lines(c("a", "b"), eol = "\r\n") expect_false(compare_file_binary(path1, path2)) expect_true(compare_file_text(path1, path2)) }) test_that("split_path handles edge cases", { expect_equal(split_path(""), list(dir = "", name = "", ext = "")) expect_equal(split_path("a"), list(dir = "", name = "a", ext = "")) expect_equal(split_path("a"), list(dir = "", name = "a", ext = "")) expect_equal(split_path(".b"), list(dir = "", name = "", ext = "b")) expect_equal(split_path(".b.c"), list(dir = "", name = "", ext = "b.c")) expect_equal(split_path("x/a"), list(dir = "x", name = "a", ext = "")) expect_equal(split_path("x/a"), list(dir = "x", name = "a", ext = "")) expect_equal(split_path("x/.b"), list(dir = "x", name = "", ext = "b")) expect_equal(split_path("x/.b.c"), list(dir = "x", name = "", ext = "b.c")) }) test_that("snapshot_hint output differs in R CMD check", { expect_snapshot(cat(snapshot_review_hint("lala", "foo.r", check = FALSE, ci = FALSE))) expect_snapshot(cat(snapshot_review_hint("lala", "foo.r", check = TRUE, ci = FALSE))) expect_snapshot(cat(snapshot_review_hint("lala", "foo.r", check = TRUE, ci = TRUE))) }) testthat/tests/testthat/test-test-env.R0000644000176200001440000000047014164710003017756 0ustar liggesuserstest_that("environment has package name", { expect_equal(methods::getPackageName(test_env("testthat")), "testthat") expect_equal(methods::getPackageName(topenv()), "testthat") }) setClass("MyClass") test_that("Cannot create S4 class without special behaviour", { expect_error(setClass("MyClass2"), NA) }) testthat/tests/testthat/test-reporter-debug.R0000644000176200001440000001155214164710003021142 0ustar liggesuserstest_that("produces consistent output", { withr::local_options(testthat.edition_ignore = TRUE) local_edition(2) local_mock( show_menu = function(choices, title = NULL) { cat(paste0(format(seq_along(choices)), ": ", choices, sep = "\n"), "\n", sep = "") 0L }, sink_number = function() 0L ) withr::local_options(testthat_format_srcrefs = FALSE) expect_snapshot_reporter(DebugReporter$new()) }) get_vars_from_debug_reporter <- function(choice, fun, envir = parent.frame()) { frame <- get_frame_from_debug_reporter(choice, fun, envir) ls(frame) } get_frame_from_debug_reporter <- function(choice, fun, envir = parent.frame()) { local_edition(2) force(choice) test_debug_reporter_parent_frame <- NULL with_mock( show_menu = function(choices, title = NULL) { # if (choice > 0) print(choices) my_choice <- choice choice <<- 0L my_choice }, browse_frame = function(frame, skip) { test_debug_reporter_parent_frame <<- frame }, sink_number = function() 0L, with_reporter( "debug", test_that("debug_reporter_test", { fun() }) ) ) test_debug_reporter_parent_frame } success_fun <- function() { aa <- 1 expect_true(TRUE) } test_that("debug reporter is not called for successes", { expect_null(get_frame_from_debug_reporter(2, success_fun)) }) test_that("browser() is called for the correct frame for failures", { fun_1 <- function() { aa <- 1 expect_true(FALSE) } fun_2 <- function() { f <- function() expect_true(FALSE) f() } fun_3 <- function() { f <- function() { g <- function() expect_true(FALSE) g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for warnings", { fun_1 <- function() { aa <- 1 warning("warn") } fun_2 <- function() { f <- function() warning("warn") f() } fun_3 <- function() { f <- function() { g <- function() warning("warn") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for errors", { fun_1 <- function() { aa <- 1 stop("error") } fun_2 <- function() { f <- function() stop("error") f() } fun_3 <- function() { f <- function() { g <- function() stop("error") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for skips", { fun_1 <- function() { aa <- 1 skip("skip") } fun_2 <- function() { f <- function() skip("skip") f() } fun_3 <- function() { f <- function() { g <- function() skip("skip") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) testthat/tests/testthat/test-path-installed/0000755000176200001440000000000013024664701021005 5ustar liggesuserstestthat/tests/testthat/test-path-installed/testthat-tests/0000755000176200001440000000000013024664617024013 5ustar liggesuserstestthat/tests/testthat/test-path-installed/testthat-tests/testthat/0000755000176200001440000000000013024664617025653 5ustar liggesuserstestthat/tests/testthat/test-path-installed/testthat-tests/testthat/empty0000644000176200001440000000000013024664617026722 0ustar liggesuserstestthat/tests/testthat/test-reporter-stop.R0000644000176200001440000000052114164710003021033 0ustar liggesuserstest_that("produces useful output", { expect_snapshot_reporter(StopReporter$new()) }) test_that("stop if needed errors when needed",{ r <- StopReporter$new() expect_error(r$stop_if_needed(), NA) r$n_fail <- 1 expect_error(r$stop_if_needed(), "Test failed") r$stop_reporter <- FALSE expect_error(r$stop_if_needed(), NA) }) testthat/tests/testthat/test-expect-known-hash.R0000644000176200001440000000000014164710003021541 0ustar liggesuserstestthat/tests/testthat/test-reporter-rstudio.R0000644000176200001440000000013214164710003021535 0ustar liggesuserstest_that("reporter basics works", { expect_snapshot_reporter(RStudioReporter$new()) }) testthat/tests/testthat/utf8.R0000644000176200001440000000011713450671736016140 0ustar liggesuserstest_that("sourced with correct encoding", { expect_equal("ä", "\u00e4") }) testthat/tests/testthat/test-try-again.R0000644000176200001440000000046413450671736020127 0ustar liggesuserssucceed_after <- function(i) { function() { i <<- i - 1 if (i > 0) fail(paste0("i is ", i)) } } test_that("tries multiple times", { third_try <- succeed_after(3) expect_true(try_again(3, third_try())) third_try <- succeed_after(3) expect_failure(try_again(2, third_try()), "i is 1") }) testthat/tests/testthat/test-path-present/0000755000176200001440000000000013701151360020500 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/0000755000176200001440000000000013701151360021642 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/testthat/0000755000176200001440000000000013701151360023502 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/testthat/empty0000644000176200001440000000000013701151360024551 0ustar liggesuserstestthat/tests/testthat/test-compare.R0000644000176200001440000001405414164710003017642 0ustar liggesuserstest_that("list comparison truncates to max_diffs", { x <- as.list(as.character(1:1e3)) y <- lapply(x, paste0, ".") lines1 <- strsplit(compare(x, y)$message, "\n")[[1]] expect_length(lines1, 10) lines2 <- strsplit(compare(x, y, max_diffs = 99)$message, "\n")[[1]] expect_length(lines2, 100) }) test_that("no diff", { expect_equal(compare(1,1), no_difference()) }) test_that("vector_equal_tol handles infinity", { expect_true(vector_equal_tol(Inf, Inf)) expect_true(vector_equal_tol(-Inf, -Inf)) expect_false(vector_equal_tol(Inf, -Inf)) expect_false(vector_equal_tol(Inf, 0)) }) test_that("vector_equal_tol handles na", { expect_true(vector_equal_tol(NA, NA)) expect_false(vector_equal_tol(NA, 0)) }) # character --------------------------------------------------------------- test_that("types must be the same", { expect_match(compare("a", 1L)$message, "character is not integer") }) test_that("base lengths must be identical", { expect_match(compare("a", letters)$message, "1 is not 26") }) test_that("classes must be identical", { c1 <- "a" c2 <- structure("a", class = "mycharacter") expect_match(compare(c1, c2)$message, "'character' is not 'mycharacter'") }) test_that("attributes must be identical", { x1 <- "a" x2 <- c(a = "a") x3 <- c(b = "a") x4 <- structure("a", a = 1) x5 <- structure("a", b = 1) expect_match(compare(x1, x2)$message, "names for current") expect_match(compare(x2, x3)$message, "Names: 1 string mismatch") expect_match(compare(x1, x4)$message, "target is NULL") expect_match(compare(x4, x5)$message, "Names: 1 string mismatch") }) test_that("two identical vectors are the same", { expect_true(compare(letters, letters)$equal) }) test_that("equal if both missing or both the same (multiple values)", { expect_true(compare(c("ABC", NA), c("ABC", NA))$equal) expect_false(compare(c(NA, NA), c("ABC", NA))$equal) expect_false(compare(c("AB", NA), c("ABC", NA))$equal) expect_false(compare(c("AB", "AB"), c("ABC", "AB"))$equal) }) test_that("computes correct number of mismatches", { x <- mismatch_character(c("a", "b", "c"), c("c", "d", "e")) expect_equal(x$n, 3) }) test_that("only differences are shown", { x <- mismatch_character(letters, c(letters[-26], "a")) lines <- strsplit(format(x), "\n")[[1]] expect_equal(lines[1], "1/26 mismatches") expect_equal(lines[2], 'x[26]: "z"') }) test_that("not all lines are shown", { a <- "1234567890" b <- paste(rep(a, 10), collapse = "") x <- mismatch_character(a, b) lines <- strsplit(format(x, width = 16), "\n")[[1]] expect_equal(lines[1], "1/1 mismatches") expect_equal(length(lines), 8) }) test_that("vectors longer than `max_diffs` (#513)", { comp <- compare(letters[1:2], LETTERS[1:2], max_diffs = 1) expect_s3_class(comp, "comparison") expect_false(comp$equal) expect_equal(comp$message, "2/2 mismatches\nx[1]: \"a\"\ny[1]: \"A\"") }) # numeric ------------------------------------------------------------------ test_that("numeric types are compatible", { expect_true(compare(1, 1L)$equal) expect_true(compare(1L, 1)$equal) }) test_that("non-numeric types are not compatible", { expect_match(compare(1, "a")$message, "double is not character") }) test_that("base lengths must be identical", { expect_match(compare(1, c(1, 2))$message, "1 is not 2") }) test_that("classes must be identical", { f1 <- factor("a") f2 <- factor("a", ordered = TRUE) expect_match(compare(1L, f1)$message, "'integer' is not 'factor'") expect_match(compare(1L, f2)$message, "'integer' is not 'ordered'/'factor'") }) test_that("attributes must be identical", { x1 <- 1L x2 <- c(a = 1L) x3 <- c(b = 1L) x4 <- structure(1L, a = 1) x5 <- structure(1L, b = 1) expect_match(compare(x1, x2)$message, "names for current") expect_match(compare(x2, x3)$message, "Names: 1 string mismatch") expect_match(compare(x1, x4)$message, "target is NULL") expect_match(compare(x4, x5)$message, "Names: 1 string mismatch") }) test_that("unless check.attributes is FALSE", { x1 <- 1L x2 <- c(a = 1L) x3 <- structure(1L, a = 1) expect_equal(compare(x1, x2, check.attributes = FALSE)$message, "Equal") expect_equal(compare(x1, x3, check.attributes = FALSE)$message, "Equal") expect_equal(compare(x2, x3, check.attributes = FALSE)$message, "Equal") }) test_that("two identical vectors are the same", { expect_true(compare(1:10, 1:10)$equal) }) test_that("named arguments to all.equal passed through", { expect_equal(415, 416, tolerance = 0.01) }) test_that("tolerance used for individual comparisons", { x1 <- 1:3 x2 <- x1 + c(0, 0, 0.1) expect_false(compare(x1, x2)$equal) expect_true(compare(x1, x2, tolerance = 0.1)$equal) }) test_that("mismatch_numeric truncates diffs", { x <- mismatch_numeric(1:11, 11:1) expect_equal(x$n, 11) expect_equal(x$n_diff, 10) lines <- strsplit(format(x, max_diffs = 5), "\n")[[1]] expect_equal(length(lines), 5 + 2) }) # time -------------------------------------------------------------------- test_that("both POSIXt classes are compatible", { x1 <- Sys.time() x2 <- as.POSIXlt(x1) expect_true(compare(x1, x2)$equal) expect_true(compare(x2, x1)$equal) }) test_that("other classes are not", { expect_match(compare(Sys.time(), 1)$message, "'POSIXct'/'POSIXt' is not 'numeric'") }) test_that("base lengths must be identical", { x1 <- Sys.time() x2 <- c(x1, x1 - 3600) expect_match(compare(x1, x2)$message, "1 is not 2") }) test_that("tzones must be identical", { t1 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "EST") t2 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "US/Eastern") expect_match(compare(t1, t2)$message, '"tzone": 1 string mismatch') }) test_that("two identical vectors are the same", { x <- Sys.time() expect_true(compare(x, x)$equal) }) test_that("two different values are not the same", { x1 <- Sys.time() x2 <- x1 + 3600 expect_false(compare(x1, x2)$equal) }) test_that("uses all.equal tolerance", { x1 <- structure(1457284588.83749, class = c("POSIXct", "POSIXt")) x2 <- structure(1457284588.837, class = c("POSIXct", "POSIXt")) expect_true(compare(x1, x2)$equal) }) testthat/tests/testthat/test-reporter-junit.R0000644000176200001440000000036714164710003021207 0ustar liggesuserstest_that("reporter doesn't change without warning", { expect_snapshot_reporter(JunitReporterMock$new()) }) test_that("permit Java-style class names", { class <- "package_name_or_domain.ClassName" expect_equal(classnameOK(class), class) }) testthat/tests/testthat/test-verify-output.R0000644000176200001440000000412114164710003021050 0ustar liggesuserstest_that("can record all types of output", { verify_output(test_path("test-verify-output.txt"), { "Output" 1 + 2 invisible(1:10) 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 "# Header" "Other output" letters }) }) test_that("can record all types of output", { scoped_bindings( .env = global_env(), conditionMessage.foobar = function(cnd) { paste("Dispatched!", cnd$message) } ) verify_output(test_path("test-verify-conditions.txt"), { message("Message") "With calls" warning("Warning") stop("Error") "Without calls" warning("Warning", call. = FALSE) stop("Error", call. = FALSE) "With `conditionMessage()` method" cnd_signal(message_cnd("foobar", message = "Message")) cnd_signal(warning_cnd("foobar", message = "Warning")) cnd_signal(error_cnd("foobar", message = "Error")) }) }) test_that("can't record plots", { skip_if(interactive()) expect_error(verify_output(tempfile(), plot(1:10)), "Plots") }) test_that("verify_output() splits condition messages on newlines", { verify_output(test_path("test-verify-conditions-lines.txt"), { message("First.\nSecond.") warning("First.\nSecond.") stop("First.\nSecond.") }) }) test_that("can use constructed calls in verify_output() (#945)", { verify_output(test_path("test-verify-constructed-calls.txt"), { expr(foo(!!c("bar", "baz"))) # Can unquote local objects binding <- quote(foo) expr(foo(!!binding)) }) }) test_that("verify_output() doesn't use cli unicode by default", { verify_output( test_path("test-verify-unicode-false.txt"), { cat(cli::symbol$info, cli::symbol$cross, "\n") } ) local_reproducible_output(unicode = TRUE) verify_output( test_path("test-verify-unicode-true.txt"), unicode = TRUE, { cat(cli::symbol$info, cli::symbol$cross, "\n") }) }) test_that("verify_output() handles carriage return", { verify_output(test_path("test-verify-conditions-cr.txt"), { cat("\r\n") }) }) testthat/tests/testthat/test-quasi-label.R0000644000176200001440000000262014164710003020407 0ustar liggesuserstest_that("atomic scalars deparsed to single values", { expect_equal(expr_label(NULL), "NULL") expect_equal(expr_label(TRUE), "TRUE") expect_equal(expr_label(1L), "1L") expect_equal(expr_label(1), "1") expect_equal(expr_label("a"), '"a"') }) test_that("symbols are quoted", { expect_equal(expr_label(quote(a)), "`a`") }) test_that("long vectors get ...", { long <- "123456789_123456789_123456789_123456789_123456789_123456789_" expect_equal( expr_label(c(long, long)), paste0('c("', long, '", ...)') ) }) test_that("produces useful summaries for long calls", { expect_snapshot({ expr_label(quote(foo( a = "this is a long argument", b = "this is a long argument", c = "this is a long argument" ))) expr_label(quote( arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg )) expr_label(quote( arg + (arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg) )) expr_label(quote(function(a, b, c) { a + b + c})) }) }) test_that("other inlined other objects are deparsed", { expect_equal(expr_label(c(1, 2, 3)), "c(1, 2, 3)") expect_equal(expr_label(list(1, 2, 3)), "list(1, 2, 3)") expect_equal( expr_label(1:100 + 0), "c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, ...)" ) }) test_that("informative error for missing arg", { expect_snapshot(error = TRUE, expect_equal()) }) testthat/tests/testthat/test-verify-conditions.txt0000644000176200001440000000111414172347327022314 0ustar liggesusers> message("Message") Message: Message > # With calls > warning("Warning") Warning in eval(expr, envir, enclos): Warning > stop("Error") Error in eval(expr, envir, enclos): Error > # Without calls > warning("Warning", call. = FALSE) Warning: Warning > stop("Error", call. = FALSE) Error: Error > # With `conditionMessage()` method > cnd_signal(message_cnd("foobar", message = "Message")) Message: Dispatched! Message > cnd_signal(warning_cnd("foobar", message = "Warning")) Warning: Dispatched! Warning > cnd_signal(error_cnd("foobar", message = "Error")) Error: Dispatched! Error testthat/tests/testthat/test-colour.R0000644000176200001440000000100214164742334017520 0ustar liggesuserstest_that("can supress colours", { op <- options( crayon.enabled = TRUE, testthat.use_colours = TRUE, cli.num_colors = 8L ) check <- crayon::has_style(colourise("X")) # Must restore original options before expectation is triggered options(op) expect_true(check) }) test_that("We don't have colours if we don't want to", { op <- options( crayon.enabled = TRUE, testthat.use_colours = FALSE ) check <- crayon::has_style(colourise("X")) options(op) expect_false(check) }) testthat/tests/testthat/test-skip.R0000644000176200001440000000504714164737335017205 0ustar liggesuserstest_that("Package checks", { expect_skip(skip_if_not_installed("testthat", "9999.9999.999")) expect_skip(skip_if_not_installed("testthat", "1.0.0"), NA) expect_skip(skip_if_not(FALSE)) expect_skip(skip_if(TRUE)) }) test_that("Skip env vars", { expect_skip_with_env <- function(new, code, regexp = NULL) { withr::with_envvar(new, expect_skip(code, regexp)) } expect_skip_with_env(c("NOT_CRAN" = "false"), skip_on_cran()) expect_skip_with_env(c("NOT_CRAN" = "true"), skip_on_cran(), NA) expect_skip_with_env(c("TRAVIS" = "true"), skip_on_travis()) expect_skip_with_env(c("TRAVIS" = "false"), skip_on_travis(), NA) expect_skip_with_env(c("CI" = "true"), skip_on_ci()) expect_skip_with_env(c("CI" = "false"), skip_on_ci(), NA) expect_skip_with_env(c("APPVEYOR" = "True"), skip_on_appveyor()) expect_skip_with_env(c("APPVEYOR" = "False"), skip_on_appveyor(), NA) expect_skip_with_env(c("R_COVR" = "true"), skip_on_covr()) expect_skip_with_env(c("R_COVR" = "false"), skip_on_covr(), NA) expect_skip_with_env(c("BBS_HOME" = "asdf"), skip_on_bioc()) expect_skip_with_env(c("BBS_HOME" = ""), skip_on_bioc(), NA) }) test_that("skip on os checks os names", { expect_snapshot(skip_on_os("amiga"), error = TRUE) }) test_that("can skip on multiple oses", { mockery::stub(skip_on_os, "system_os", function() "windows") expect_skip(skip_on_os("windows")) expect_skip(skip_on_os(c("windows", "linux"))) expect_skip(skip_on_os("linux"), NA) expect_skip(skip_on_os("mac"), NA) }) test_that("can refine os with arch", { mockery::stub(skip_on_os, "system_os", function() "windows") mockery::stub(skip_on_os, "system_arch", function() "i386") expect_skip(skip_on_os("windows")) expect_skip(skip_on_os("windows", "i386")) expect_skip(skip_on_os("windows", "x86_64"), NA) expect_skip(skip_on_os("linux", "i386"), NA) }) test_that("autogenerated message is always single line", { a_very_long_argument_name <- FALSE cnd <- capture_condition(skip_if_not( a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name )) expect_length(cnd$message, 1) # ensure the message is not repeated, #1290 expect_snapshot_output(cat(cnd$message)) }) testthat/tests/testthat/test-expect-equality.R0000644000176200001440000000737214164710003021344 0ustar liggesuserstest_that("basical principles of equality hold", { local_edition(2) expect_success(expect_equal(1, 1)) expect_failure(expect_equal(1, 2)) expect_success(expect_identical(1, 1)) expect_failure(expect_identical(1, 2)) local_edition(3) expect_success(expect_equal(1, 1)) expect_failure(expect_equal(1, 2)) expect_success(expect_identical(1, 1)) expect_failure(expect_identical(1, 2)) }) test_that("expect_equal() ignores numeric type; expect_identical() does not", { local_edition(2) expect_success(expect_equal(1, 1L)) expect_failure(expect_identical(1, 1L)) local_edition(3) expect_success(expect_equal(1, 1L)) expect_failure(expect_identical(1, 1L)) }) test_that("returns value", { one <- 1 local_edition(3) expect_equal(expect_equal(one, one), 1) expect_equal(expect_identical(one, one), 1) local_edition(2) expect_equal(expect_equal(one, one), 1) expect_equal(expect_identical(one, one), 1) }) test_that("can control numeric tolerance", { x1 <- 1 x2 <- x1 + 1e-6 local_edition(2) expect_failure(expect_equal(x1, x2)) expect_success(expect_equal(x1, x2, tolerance = 1e-5)) expect_success(expect_equivalent(x1, x2, tolerance = 1e-5)) # with partial matching # we work around https://github.com/r-lib/testthat/issues/1188 if (getRversion() < "3.6.0" && is.null(getOption("warnPartialMatchArgs"))) { options(warnPartialMatchArgs = FALSE) } withr::local_options(warnPartialMatchArgs = FALSE) expect_success(expect_equal(x1, x2, tol = 1e-5)) local_edition(3) expect_failure(expect_equal(x1, x2)) expect_success(expect_equal(x1, x2, tolerance = 1e-5)) }) test_that("second edition only optionally sets tolerance", { local_edition(2) # all.equal.POSIXct sets default tolerance to 0.001 x <- .POSIXct(1) y <- .POSIXct(1 + 1e-4) expect_success(expect_equal(x, y)) }) test_that("provide useful feedback on failure", { local_output_override() local_edition(3) expect_snapshot_error(expect_identical(1, "a")) expect_snapshot_error(expect_equal(1, "a")) local_edition(2) withr::local_options(testthat.edition_ignore = TRUE) expect_snapshot_error(expect_identical(1, "a")) expect_snapshot_error(expect_equal(1, "a")) }) test_that("default labels use unquoting", { local_edition(2) x <- 2 expect_failure(expect_equal(1, !! x), "1 not equal to 2", fixed = TRUE) }) test_that("% is not treated as sprintf format specifier (#445)", { expect_failure(expect_equal("+", "%")) expect_failure(expect_equal("%", "+")) expect_equal("%", "%") }) test_that("is_call_infix() handles complex calls (#1472)", { expect_false(is_call_infix(quote( base::any( c(veryyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy_long_name = TRUE), na.rm = TRUE ) ))) withr::local_envvar( "_R_CHECK_LENGTH_1_LOGIC2_" = "TRUE", ) expect_true( base::any( c(veryyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy_long_name = TRUE), na.rm = TRUE ) ) }) # 2nd edition --------------------------------------------------- test_that("useful message if objects equal but not identical", { local_edition(2) f <- function() x g <- function() x environment(g) <- globalenv() expect_failure(expect_identical(f, g)) }) test_that("attributes for object (#452)", { local_edition(2) oops <- structure(0, oops = "oops") expect_equal(oops, oops) expect_failure(expect_equal(oops, 0)) expect_equal(as.numeric(oops), 0) }) test_that("expect_equivalent ignores attributes and numeric differences", { local_edition(2) x <- y <- 1 attr(y, "y") <- y expect_success(expect_equivalent(x, y)) expect_success(expect_equivalent(x, 1L)) }) test_that("expect_equivalent returns value", { local_edition(2) one <- 1 expect_equal(expect_equivalent(one, one), 1) }) testthat/tests/testthat/test-describe.R0000644000176200001440000000235114164710003017771 0ustar liggesuserssomeExternalVariable <- 1 describe("describe", { it("can contain nested describe blocks", { describe("addition", { it("should be able to add two numbers", { expect_equal(2, 1 + 1) }) describe("sub feature", { it("should also work", { expect_equal(2, 1 + 1) }) }) }) }) it("can have not yet implemented specs", { describe("Millennium Prize Problems", { it("can be shown that P != NP") }) }) it("has to have a description for the block", { expect_error(describe({})) expect_error(describe("", {})) expect_error(describe("test", {it()})) expect_error(describe("test", {it("")})) }) it("has to have a description of length 1", { expect_error(describe(c("a", "b"), {})) expect_error(describe("test", {it(c("a", "b"))})) }) someInternalVariable <- 1 it("should be possible to use variables from outer environments", { expect_equal(1, someExternalVariable) expect_equal(1, someInternalVariable) }) it("should not be possible to access variables from other specs (1)", { some_test_var <- 5 }) it("should not be possible to access variables from other specs (2)", { expect_false(exists("some_test_var")) }) }) testthat/tests/testthat/test-teardown/0000755000176200001440000000000014172362302017714 5ustar liggesuserstestthat/tests/testthat/test-teardown/test-teardown.R0000644000176200001440000000026614165635513022654 0ustar liggesuserslocal_edition(2) setup(brio::write_lines("test", "teardown.txt")) teardown(file.remove("teardown.txt")) test_that("file is created", { expect_true(file.exists("teardown.txt")) }) testthat/tests/testthat/test-expect-silent.R0000644000176200001440000000045313450671736021016 0ustar liggesuserstest_that("checks for any type of output", { expect_failure(expect_silent(warning("!"))) expect_failure(expect_silent(message("!"))) expect_failure(expect_silent(print("!"))) expect_success(expect_silent("")) }) test_that("returns first argument", { expect_equal(expect_silent(1), 1) }) testthat/tests/testthat/test-test-path.R0000644000176200001440000000211114164710003020114 0ustar liggesuserstest_that("returns local path when called in tests", { expect_equal(test_path("test-test-path.R"), "test-test-path.R") # even if path doesn't (yet) exists expect_equal(test_path("xxxx"), "xxxx") }) test_that("returns local path when called from tools::testInstalledPackages", { old <- setwd("test-path-installed/testthat-tests/testthat") on.exit(setwd(old)) expect_equal(test_path("test-test-path.R"), "test-test-path.R") expect_equal(test_path("xxxx"), "xxxx") }) test_that("returns full path when called outside tests", { withr::local_dir(test_path("test-path-present")) withr::local_envvar("TESTTHAT" = "false") expect_equal(test_path("empty"), "tests/testthat/empty") # even when file doesn't exist expect_equal(test_path("xxx"), "tests/testthat/xxx") }) test_that("throws error if can't find tests/testthat", { withr::local_dir(test_path("test-path-missing")) withr::local_envvar("TESTTHAT" = "false") expect_error(test_path("empty"), "Can't find `tests/testthat/`") }) test_that("test_path() always returns a path",{ expect_equal(test_path(), ".") }) testthat/tests/testthat/context.R0000644000176200001440000000050314164710003016715 0ustar liggesuserscontext("First context.") test_that("Logical equivalence", { x <- TRUE expect_equal(x, TRUE) }) test_that("Numerical equivalence", { x <- 1 expect_equal(x, 1) }) context("Second context.") test_that("A passing test", { expect_equal(TRUE, TRUE) }) test_that("A failing test", { expect_equal(TRUE, FALSE) }) testthat/tests/testthat/width-80.txt0000644000176200001440000000066414172347322017234 0ustar liggesusers [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 [19] 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 [37] 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 [55] 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 [73] 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 [91] 91 92 93 94 95 96 97 98 99 100 testthat/tests/testthat/test-verify-output.txt0000644000176200001440000000052614172347327021511 0ustar liggesusers> # Output > 1 + 2 [1] 3 > invisible(1:10) > 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + + 12345678 + 12345678 + 12345678 + 12345678 [1] 135802458 Header ====== > # Other output > letters [1] "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" [20] "t" "u" "v" "w" "x" "y" "z" testthat/tests/testthat/_snaps/0000755000176200001440000000000014166627056016414 5ustar liggesuserstestthat/tests/testthat/_snaps/reporter-rstudio.md0000644000176200001440000000104214172347324022256 0ustar liggesusers# reporter basics works tests.R:12:3 [failure] Failure:1. FALSE is not TRUE tests.R:17:3 [failure] Failure:2a. FALSE is not TRUE tests.R:23:3 [error] Error:1. Error in `eval(code, test_env)`: stop tests.R:31:3 [error] errors get tracebacks. Error in `h()`: ! tests.R:37:3 [skip] explicit skips are reported. Reason: skip tests.R:40:1 [skip] empty tests are implicitly skipped. Reason: empty test tests.R:49:3 [warning] warnings get backtraces. def tests.R:45:1 [skip] warnings get backtraces. Reason: empty test testthat/tests/testthat/_snaps/reporter-progress.md0000644000176200001440000003332414172347324022441 0ustar liggesusers# captures error before first test v | F W S OK | Context / | 0 | reporters/error-setup - | 1 0 | reporters/error-setup x | 1 0 | reporters/error-setup -------------------------------------------------------------------------------- Error (error-setup.R:6:1): (code run outside of `test_that()`) Error in `h()`: ! Backtrace: 1. testthat::setup(f()) reporters/error-setup.R:6:0 3. f() 4. g() reporters/error-setup.R:1:5 5. h() reporters/error-setup.R:2:5 -------------------------------------------------------------------------------- == Results ===================================================================== [ FAIL 1 | WARN 0 | SKIP 0 | PASS 0 ] I believe in you! # gracefully handles multiple contexts v | F W S OK | Context / | 0 | reporters/context / | 0 | my context - | 1 | my context v | 1 | my context == Results ===================================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] You rock! # fails after max_fail tests v | F W S OK | Context / | 0 | reporters/fail-many - | 1 0 | reporters/fail-many \ | 2 0 | reporters/fail-many | | 3 0 | reporters/fail-many / | 4 0 | reporters/fail-many - | 5 0 | reporters/fail-many \ | 6 0 | reporters/fail-many | | 7 0 | reporters/fail-many / | 8 0 | reporters/fail-many - | 9 0 | reporters/fail-many \ | 10 0 | reporters/fail-many | | 11 0 | reporters/fail-many x | 11 0 | reporters/fail-many -------------------------------------------------------------------------------- Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE Failure (fail-many.R:3:5): Example FALSE is not TRUE `actual`: FALSE `expected`: TRUE -------------------------------------------------------------------------------- Maximum number of failures exceeded; quitting at end of file. Increase this number with (e.g.) `Sys.setenv('TESTTHAT_MAX_FAILS' = Inf)` == Results ===================================================================== [ FAIL 11 | WARN 0 | SKIP 0 | PASS 0 ] == Terminated early ============================================================ I believe in you! # can fully suppress incremental updates v | F W S OK | Context / | 0 | reporters/successes - | 1 | reporters/successes \ | 2 | reporters/successes | | 3 | reporters/successes / | 4 | reporters/successes - | 5 | reporters/successes \ | 6 | reporters/successes | | 7 | reporters/successes v | 7 | reporters/successes == Results ===================================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] You rock! --- v | F W S OK | Context v | 7 | reporters/successes == Results ===================================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] You rock! # reports backtraces v | F W S OK | Context / | 0 | reporters/backtraces - | 1 0 | reporters/backtraces \ | 2 0 | reporters/backtraces | | 3 0 | reporters/backtraces / | 4 0 | reporters/backtraces - | 5 0 | reporters/backtraces \ | 6 0 | reporters/backtraces | | 6 1 0 | reporters/backtraces / | 6 1 1 | reporters/backtraces - | 7 1 1 | reporters/backtraces \ | 8 1 1 | reporters/backtraces | | 9 1 1 | reporters/backtraces x | 9 1 1 | reporters/backtraces -------------------------------------------------------------------------------- Error (backtraces.R:6:3): errors thrown at block level are entraced Error in `g()`: foo Backtrace: 1. f() reporters/backtraces.R:6:2 2. g() reporters/backtraces.R:4:7 Error (backtraces.R:11:3): errors thrown from a quasi-labelled argument are entraced Error in `foo()`: foo Backtrace: 1. testthat::expect_s3_class(foo(), "foo") reporters/backtraces.R:11:2 4. foo() Error (backtraces.R:18:3): errors thrown from a quasi-labelled argument are entraced (deep case) Error in `foo()`: foo Backtrace: 1. testthat::expect_s3_class(f(), "foo") reporters/backtraces.R:18:2 4. f() 5. g() reporters/backtraces.R:16:7 9. foo() Error (backtraces.R:28:3): errors thrown from a quasi-labelled argument are entraced (deep deep case) Error in `bar()`: foobar Backtrace: 1. f() reporters/backtraces.R:28:2 2. g() reporters/backtraces.R:25:7 6. foo() 7. bar() reporters/backtraces.R:22:9 Error (backtraces.R:35:3): failed expect_error() prints a backtrace Error in `signaller()`: bar Backtrace: 1. testthat::expect_error(f(), "foo") reporters/backtraces.R:35:2 7. f() 8. signaller() reporters/backtraces.R:32:7 Error (backtraces.R:43:3): Errors are inspected with `conditionMessage()` Error: dispatched Warning (backtraces.R:50:3): also get backtraces for warnings foobar Backtrace: 1. foo() reporters/backtraces.R:50:2 2. bar() reporters/backtraces.R:47:9 Error (backtraces.R:58:3): deep stacks are trimmed Error in `f(x - 1)`: This is deep Backtrace: 1. f(25) reporters/backtraces.R:58:2 2. f(x - 1) reporters/backtraces.R:56:4 3. f(x - 1) reporters/backtraces.R:56:4 4. f(x - 1) reporters/backtraces.R:56:4 5. f(x - 1) reporters/backtraces.R:56:4 6. f(x - 1) reporters/backtraces.R:56:4 7. f(x - 1) reporters/backtraces.R:56:4 8. f(x - 1) reporters/backtraces.R:56:4 9. f(x - 1) reporters/backtraces.R:56:4 10. f(x - 1) reporters/backtraces.R:56:4 ... 17. f(x - 1) reporters/backtraces.R:56:4 18. f(x - 1) reporters/backtraces.R:56:4 19. f(x - 1) reporters/backtraces.R:56:4 20. f(x - 1) reporters/backtraces.R:56:4 21. f(x - 1) reporters/backtraces.R:56:4 22. f(x - 1) reporters/backtraces.R:56:4 23. f(x - 1) reporters/backtraces.R:56:4 24. f(x - 1) reporters/backtraces.R:56:4 25. f(x - 1) reporters/backtraces.R:56:4 26. f(x - 1) reporters/backtraces.R:56:4 Failure (backtraces.R:66:1): (code run outside of `test_that()`) FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() reporters/backtraces.R:66:0 2. g() reporters/backtraces.R:62:5 3. h() reporters/backtraces.R:63:5 4. testthat::expect_true(FALSE) reporters/backtraces.R:64:5 Failure (backtraces.R:69:3): nested expectations get backtraces FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() reporters/backtraces.R:69:2 2. g() reporters/backtraces.R:62:5 3. h() reporters/backtraces.R:63:5 4. testthat::expect_true(FALSE) reporters/backtraces.R:64:5 -------------------------------------------------------------------------------- == Results ===================================================================== [ FAIL 9 | WARN 1 | SKIP 0 | PASS 1 ] I believe in you! # records skips v | F W S OK | Context / | 0 | reporters/skips - | 1 0 | reporters/skips \ | 2 0 | reporters/skips v | 2 0 | reporters/skips -------------------------------------------------------------------------------- Skip (skips.R:2:3): regular skip Reason: regular skip Skip (skips.R:6:3): skip with details Reason: longer skip: this is what happened -------------------------------------------------------------------------------- == Results ===================================================================== -- Skipped tests -------------------------------------------------------------- * longer skip (1) * regular skip (1) [ FAIL 0 | WARN 0 | SKIP 2 | PASS 0 ] You rock! # compact display is informative == Testing reporters/tests.R =================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 1 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 2 | WARN 0 | SKIP 0 | PASS 1 ] -- Failure (tests.R:12:3): Failure:1 ------------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE -- Failure (tests.R:17:3): Failure:2a ------------------------------------------ FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() 2. testthat::expect_true(FALSE) [ FAIL 2 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 3 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 4 | WARN 0 | SKIP 0 | PASS 1 ] -- Error (tests.R:23:3): Error:1 ----------------------------------------------- Error in `eval(code, test_env)`: stop -- Error (tests.R:31:3): errors get tracebacks --------------------------------- Error in `h()`: ! Backtrace: 1. f() 2. g() 3. h() [ FAIL 4 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 4 | WARN 0 | SKIP 1 | PASS 1 ] [ FAIL 4 | WARN 0 | SKIP 2 | PASS 1 ] -- Skip (tests.R:37:3): explicit skips are reported ---------------------------- Reason: skip -- Skip (tests.R:40:1): empty tests are implicitly skipped --------------------- Reason: empty test [ FAIL 4 | WARN 0 | SKIP 2 | PASS 1 ] [ FAIL 4 | WARN 1 | SKIP 2 | PASS 1 ] [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] -- Warning (tests.R:49:3): warnings get backtraces ----------------------------- def Backtrace: 1. f() reporters/tests.R:49:2 -- Skip (tests.R:45:1): warnings get backtraces -------------------------------- Reason: empty test [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] # display of successes only is compact == Testing reporters/successes.R =============================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 2 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 4 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 5 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 6 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] Done! --- [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 2 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 4 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 5 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 6 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] testthat/tests/testthat/_snaps/snapshot-file.md0000644000176200001440000000137714172347326021516 0ustar liggesusers# snapshot_hint output differs in R CMD check Code cat(snapshot_review_hint("lala", "foo.r", check = FALSE, ci = FALSE)) Output Run `testthat::snapshot_review('lala/')` to review changes --- Code cat(snapshot_review_hint("lala", "foo.r", check = TRUE, ci = FALSE)) Output * Locate check directory * Copy 'tests/testthat/_snaps/lala/foo.new.r' to local test directory * Run `testthat::snapshot_review('lala/')` to review changes --- Code cat(snapshot_review_hint("lala", "foo.r", check = TRUE, ci = TRUE)) Output * Download and unzip run artifact * Copy 'tests/testthat/_snaps/lala/foo.new.r' to local test directory * Run `testthat::snapshot_review('lala/')` to review changes testthat/tests/testthat/_snaps/reporter-stop.md0000644000176200001440000000223714172347324021561 0ustar liggesusers# produces useful output Test passed -- Failure (tests.R:12:3): Failure:1 ------------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE -- Failure (tests.R:17:3): Failure:2a ------------------------------------------ FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() 2. testthat::expect_true(FALSE) -- Error (tests.R:23:3): Error:1 ----------------------------------------------- Error in `eval(code, test_env)`: stop -- Error (tests.R:31:3): errors get tracebacks --------------------------------- Error in `h()`: ! Backtrace: 1. f() 2. g() 3. h() -- Skip (tests.R:37:3): explicit skips are reported ---------------------------- Reason: skip -- Skip (tests.R:40:1): empty tests are implicitly skipped --------------------- Reason: empty test -- Warning (tests.R:49:3): warnings get backtraces ----------------------------- def Backtrace: 1. f() -- Skip (tests.R:45:1): warnings get backtraces -------------------------------- Reason: empty test testthat/tests/testthat/_snaps/snapshot-cleanup.md0000644000176200001440000000033214172347325022213 0ustar liggesusers# snapshot cleanup makes nice message if needed Code snapshot_cleanup(dir) Message Deleting unused snapshots: * a.md * b.md Code snapshot_cleanup(dir, c("a", "b")) testthat/tests/testthat/_snaps/reporter-check.md0000644000176200001440000000623014172347323021645 0ustar liggesusers# basic report works [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] == Skipped tests =============================================================== * empty test (2) * skip (1) == Warnings ==================================================================== -- Warning (tests.R:49:3): warnings get backtraces ----------------------------- def Backtrace: 1. f() reporters/tests.R:49:2 == Failed tests ================================================================ -- Failure (tests.R:12:3): Failure:1 ------------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE -- Failure (tests.R:17:3): Failure:2a ------------------------------------------ FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() reporters/tests.R:17:2 2. \-testthat::expect_true(FALSE) reporters/tests.R:16:7 -- Error (tests.R:23:3): Error:1 ----------------------------------------------- Error in `eval(code, test_env)`: stop -- Error (tests.R:31:3): errors get tracebacks --------------------------------- Error in `h()`: ! Backtrace: x 1. \-f() reporters/tests.R:31:2 2. \-g() reporters/tests.R:27:7 3. \-h() reporters/tests.R:28:7 [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] # doesn't truncate long lines [ FAIL 1 | WARN 0 | SKIP 0 | PASS 0 ] == Failed tests ================================================================ -- Failure (long-test.R:2:3): That very long test messages are not truncated because they contain useful information that you probably want to read -- Failure has been forced [ FAIL 1 | WARN 0 | SKIP 0 | PASS 0 ] # always shows summary [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] # shows warnings when not on CRAN [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] == Skipped tests =============================================================== * empty test (2) * skip (1) == Warnings ==================================================================== -- Warning (tests.R:49:3): warnings get backtraces ----------------------------- def Backtrace: 1. f() reporters/tests.R:49:2 == Failed tests ================================================================ -- Failure (tests.R:12:3): Failure:1 ------------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE -- Failure (tests.R:17:3): Failure:2a ------------------------------------------ FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() reporters/tests.R:17:2 2. \-testthat::expect_true(FALSE) reporters/tests.R:16:7 -- Error (tests.R:23:3): Error:1 ----------------------------------------------- Error in `eval(code, test_env)`: stop -- Error (tests.R:31:3): errors get tracebacks --------------------------------- Error in `h()`: ! Backtrace: x 1. \-f() reporters/tests.R:31:2 2. \-g() reporters/tests.R:27:7 3. \-h() reporters/tests.R:28:7 [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] testthat/tests/testthat/_snaps/snapshot-file/0000755000176200001440000000000014172362302021153 5ustar liggesuserstestthat/tests/testthat/_snaps/snapshot-file/foo.r0000644000176200001440000000006414164710003022115 0ustar liggesusersa b c d e f g h i j k l m n o p q r s t u v w x y z testthat/tests/testthat/_snaps/snapshot-file/secret.txt0000644000176200001440000000004314165635513023207 0ustar liggesusers ssh squirrel testthat/tests/testthat/_snaps/snapshot-file/a.txt0000644000176200001440000000000314165635513022136 0ustar liggesusersa testthat/tests/testthat/_snaps/snapshot-file/foo.png0000644000176200001440000000542714164710003022450 0ustar liggesusersPNG  IHDR,,N~GPLTE888999:::;;;<<<===>>>???@@@AAABBBCCCDDDEEEFFFGGGHHHIIIJJJKKKLLLMMMNNNOOOPPPQQQRRRSSSTTTUUUVVVWWWXXXYYY[[[\\\]]]^^^___bbbcccdddeeefffggghhhjjjkkklllmmmnnnooopppqqqrrrssstttuuuvvvxxxzzz{{{|||}}}~~~^IDATxEM"" vQQQ@E,Q,XPؐ4{EkCT eg>df|⺷74XD, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"j"1zm䟮7?^o\mEIL!V6+{? h,X`Z8bXk䟮9VPuQ~VX6ck>9`w_FX]Xڶ_౅=Q#ii!jyƱ|y1j5:ڝc`O?.X m|`qVױJ븋u>JM\Qj>H]nwkmuqAոڗc`uV6X#)r QUV-uN?@5nGAu~m闏1vOXO:3\Z[ }jĚba#5r>wجzXK+Uxu%X=-zHja`}!5tZV7\A^j91e:u~Ag?K=Du>ѹ"JYl C߹>_-c7I’˹rk5bycP1cb}U1cp:aXϻX9?XXoXߕ:aXږ3Vcu[Ej3+yNE=]\X݇{R[)}N|ƤmqsTskGkW?޷e_:[4XNSO!]Hz UD>HX)Z$X&P7+Ǎ ˈzDaq`KVXE$X8X*"i"_,XG:iƃ͗YCXޑHŝ',DTgw~i31RbHoQ(hai'^PaΨ^Pap^`a;9{asi/i&9Xb8.8V8K g%9{5|+4XpX[,~.8,K{Ix˲ZjtVk/'UX}t R:W/d\{gᾭh-iX4'd7w ַ.> a>J딋k>s#i2t~NXXL+;oVU:_oϽ&i݊ZN)z?2N~Y)~ܰ)T0 }쳝\YXUYXU?wĚ%@HlU&UWSpYV)b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D,ccL^IENDB`testthat/tests/testthat/_snaps/snapshot-file/foo.csv0000644000176200001440000000336714164710003022460 0ustar liggesusers"","mpg","cyl","disp","hp","drat","wt","qsec","vs","am","gear","carb" "Mazda RX4",21,6,160,110,3.9,2.62,16.46,0,1,4,4 "Mazda RX4 Wag",21,6,160,110,3.9,2.875,17.02,0,1,4,4 "Datsun 710",22.8,4,108,93,3.85,2.32,18.61,1,1,4,1 "Hornet 4 Drive",21.4,6,258,110,3.08,3.215,19.44,1,0,3,1 "Hornet Sportabout",18.7,8,360,175,3.15,3.44,17.02,0,0,3,2 "Valiant",18.1,6,225,105,2.76,3.46,20.22,1,0,3,1 "Duster 360",14.3,8,360,245,3.21,3.57,15.84,0,0,3,4 "Merc 240D",24.4,4,146.7,62,3.69,3.19,20,1,0,4,2 "Merc 230",22.8,4,140.8,95,3.92,3.15,22.9,1,0,4,2 "Merc 280",19.2,6,167.6,123,3.92,3.44,18.3,1,0,4,4 "Merc 280C",17.8,6,167.6,123,3.92,3.44,18.9,1,0,4,4 "Merc 450SE",16.4,8,275.8,180,3.07,4.07,17.4,0,0,3,3 "Merc 450SL",17.3,8,275.8,180,3.07,3.73,17.6,0,0,3,3 "Merc 450SLC",15.2,8,275.8,180,3.07,3.78,18,0,0,3,3 "Cadillac Fleetwood",10.4,8,472,205,2.93,5.25,17.98,0,0,3,4 "Lincoln Continental",10.4,8,460,215,3,5.424,17.82,0,0,3,4 "Chrysler Imperial",14.7,8,440,230,3.23,5.345,17.42,0,0,3,4 "Fiat 128",32.4,4,78.7,66,4.08,2.2,19.47,1,1,4,1 "Honda Civic",30.4,4,75.7,52,4.93,1.615,18.52,1,1,4,2 "Toyota Corolla",33.9,4,71.1,65,4.22,1.835,19.9,1,1,4,1 "Toyota Corona",21.5,4,120.1,97,3.7,2.465,20.01,1,0,3,1 "Dodge Challenger",15.5,8,318,150,2.76,3.52,16.87,0,0,3,2 "AMC Javelin",15.2,8,304,150,3.15,3.435,17.3,0,0,3,2 "Camaro Z28",13.3,8,350,245,3.73,3.84,15.41,0,0,3,4 "Pontiac Firebird",19.2,8,400,175,3.08,3.845,17.05,0,0,3,2 "Fiat X1-9",27.3,4,79,66,4.08,1.935,18.9,1,1,4,1 "Porsche 914-2",26,4,120.3,91,4.43,2.14,16.7,0,1,5,2 "Lotus Europa",30.4,4,95.1,113,3.77,1.513,16.9,1,1,5,2 "Ford Pantera L",15.8,8,351,264,4.22,3.17,14.5,0,1,5,4 "Ferrari Dino",19.7,6,145,175,3.62,2.77,15.5,0,1,5,6 "Maserati Bora",15,8,301,335,3.54,3.57,14.6,0,1,5,8 "Volvo 142E",21.4,4,121,109,4.11,2.78,18.6,1,1,4,2 testthat/tests/testthat/_snaps/rlang-1.0/0000755000176200001440000000000014165427540020006 5ustar liggesuserstestthat/tests/testthat/_snaps/rlang-1.0/snapshot.md0000644000176200001440000000126514165427540022173 0ustar liggesusers# full condition message is printed with rlang Code foo <- error_cnd("foo", message = "Title parent.") abort("Title.", parent = foo) Condition Error: Title. Caused by error: Title parent. # can print with and without condition classes Code f() Message foo Condition Warning in `f()`: bar Condition Error in `f()`: baz --- Code f() Message foo Condition Warning in `f()`: bar Error in `f()`: baz # errors and warnings are folded Code f() Condition Warning in `f()`: foo Error in `f()`: bar testthat/tests/testthat/_snaps/snapshot.md0000644000176200001440000000704614172347327020601 0ustar liggesusers# can snapshot output y --- y --- y --- y # can snapshot everything Code f() Output [1] "1" Message 2 Warning 3 Error 4 # empty lines are preserved Code f() Output 1 Message 2 Warning 3 Error 4 # multiple outputs of same type are collapsed Code x <- 1 y <- 1 { message("a") message("b") } Message a b Code { warning("a") warning("b") } Warning a b # can scrub output/messages/warnings/errors Code secret() Output [1] "" Message Warning Error --- Code print("secret") Output [1] "****" # can capture error/warning messages This is an error --- This is a warning # can check error/warning classes Code expect_snapshot_error(1) Error 1 did not generate error --- Code expect_snapshot_error(1, class = "myerror") Error 1 did not generate error with class 'myerror' --- Code expect_snapshot_warning(1) Error 1 did not generate warning --- Code expect_snapshot_warning(1, class = "mywarning") Error 1 did not generate warning with class 'mywarning' # snapshot handles multi-line input Code 1 + 2 Output [1] 3 Code 3 + 4 Output [1] 7 Code # this is a comment # snapshot captures output if visible Code f_visible() Output [1] "x" --- Code f_invisible() # captures custom classes Code f() Message Hello Warning Goodbye Error Eeek! # even with multiple lines a b c --- a b c # can snapshot values [ "a", 1.5, 1, true ] --- { "type": "list", "attributes": {}, "value": [ { "type": "character", "attributes": {}, "value": ["a"] }, { "type": "double", "attributes": {}, "value": [1.5] }, { "type": "integer", "attributes": {}, "value": [1] }, { "type": "logical", "attributes": {}, "value": [true] } ] } --- list("a", 1.5, 1L, TRUE) --- WAoAAAACAAMGAwACAwAAAAATAAAABAAAABAAAAABAAQACQAAAAFhAAAADgAAAAE/+AAAAAAA AAAAAA0AAAABAAAAAQAAAAoAAAABAAAAAQ== # can control snapshot value details 1.1 # tolerance passed to check_roundtrip 0.9 # `expect_snapshot()` does not inject Code x <- quote(!!foo) expect_equal(x, call("!", call("!", quote(foo)))) # hint is informative Code cat(snapshot_accept_hint("_default", "bar.R")) Output * Run `snapshot_accept('bar.R')` to accept the change * Run `snapshot_review('bar.R')` to interactively review the change Code cat(snapshot_accept_hint("foo", "bar.R")) Output * Run `snapshot_accept('foo/bar.R')` to accept the change * Run `snapshot_review('foo/bar.R')` to interactively review the change testthat/tests/testthat/_snaps/test-files.md0000644000176200001440000000400614172347327021012 0ustar liggesusers# runs all tests and records output file context test nb failed skipped error warning passed 1 test-basic.R logical tests act as expected 2 0 FALSE FALSE 0 2 2 test-basic.R logical tests ignore attributes 2 0 FALSE FALSE 0 2 3 test-basic.R equality holds 2 0 FALSE FALSE 0 2 4 test-basic.R can't access variables from other tests 2 1 0 TRUE FALSE 0 0 5 test-basic.R can't access variables from other tests 1 1 0 FALSE FALSE 0 1 6 test-empty.R empty test 1 0 TRUE FALSE 0 0 7 test-empty.R empty test with error 0 0 FALSE TRUE 0 0 8 test-errors.R simple 0 0 FALSE TRUE 0 0 9 test-errors.R after one success 1 0 FALSE TRUE 0 1 10 test-errors.R after one failure 1 1 FALSE TRUE 0 0 11 test-errors.R in the test 0 0 FALSE TRUE 0 0 12 test-errors.R in expect_error 1 0 FALSE FALSE 0 1 13 test-failures.R just one failure 1 1 FALSE FALSE 0 0 14 test-failures.R one failure on two 2 1 FALSE FALSE 0 1 15 test-failures.R no failure 2 0 FALSE FALSE 0 2 16 test-helper.R helper test 1 0 FALSE FALSE 0 1 17 test-skip.R Skips skip 1 0 TRUE FALSE 0 0 testthat/tests/testthat/_snaps/reporter-debug.md0000644000176200001440000000100714172347323021653 0ustar liggesusers# produces consistent output 1: expect_true(FALSE) 2: expect_waldo_constant(act, TRUE, info = info) 3: expect(identical(act$val, constant), sprintf("%s is not %s\n\n%s", act$lab, 1: f() 2: expect_true(FALSE) 3: expect_waldo_constant(act, TRUE, info = info) 4: expect(identical(act$val, constant), sprintf("%s is not %s\n\n%s", act$lab, 1: stop("stop") 1: f() 2: g() 3: h() 4: stop("!") 1: skip("skip") 1: f() 2: warning("def") testthat/tests/testthat/_snaps/expect-condition.md0000644000176200001440000000053014172347321022177 0ustar liggesusers# regexp = NULL checks for presence of error `null()` did not throw the expected error. # regexp = string matches for error message "OK" did not throw the expected error. # message method is called when expecting error `fb()` threw an unexpected error. Message: dispatched! Class: foobar/rlang_error/error/condition testthat/tests/testthat/_snaps/skip.md0000644000176200001440000000125514172347325017702 0ustar liggesusers# skip on os checks os names Code skip_on_os("amiga") Error 'arg' should be one of "windows", "mac", "linux", "solaris" # autogenerated message is always single line Reason: a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name is not TRUE testthat/tests/testthat/_snaps/edition.md0000644000176200001440000000025114172347321020356 0ustar liggesusers# deprecation only fired for newer edition Code edition_deprecate(3, "old stuff") Warning `old stuff` was deprecated in the 3rd edition. testthat/tests/testthat/_snaps/reporter-silent.md0000644000176200001440000000010714172347324022064 0ustar liggesusers# captures expectations; doesn't produce any output --- 9 testthat/tests/testthat/_snaps/reporter-teamcity.md0000644000176200001440000000552414172347324022415 0ustar liggesusers# reporter basics work ##teamcity[testSuiteStarted name='Successes'] ##teamcity[testSuiteStarted name='Success'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Success'] ##teamcity[testSuiteFinished name='Successes'] ##teamcity[testSuiteStarted name='Failures'] ##teamcity[testSuiteStarted name='Failure:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='FALSE is not TRUE' details='|n`actual`: FALSE|n`expected`: TRUE '] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Failure:1'] ##teamcity[testSuiteStarted name='Failure:2a'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='FALSE is not TRUE' details='|n`actual`: FALSE|n`expected`: TRUE |nBacktrace:|n 1. f()|n 2. testthat::expect_true(FALSE)'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Failure:2a'] ##teamcity[testSuiteFinished name='Failures'] ##teamcity[testSuiteStarted name='Errors'] ##teamcity[testSuiteStarted name='Error:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Error in `eval(code, test_env)`: stop' details=''] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Error:1'] ##teamcity[testSuiteStarted name='errors get tracebacks'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Error in `h()`: !' details='Backtrace:|n 1. f()|n 2. g()|n 3. h()'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='errors get tracebacks'] ##teamcity[testSuiteFinished name='Errors'] ##teamcity[testSuiteStarted name='Skips'] ##teamcity[testSuiteStarted name='explicit skips are reported'] ##teamcity[testIgnored name='expectation 1' message='Reason: skip'] ##teamcity[testSuiteFinished name='explicit skips are reported'] ##teamcity[testSuiteStarted name='empty tests are implicitly skipped'] ##teamcity[testIgnored name='expectation 1' message='Reason: empty test'] ##teamcity[testSuiteFinished name='empty tests are implicitly skipped'] ##teamcity[testSuiteFinished name='Skips'] ##teamcity[testSuiteStarted name='Warnings'] ##teamcity[testSuiteStarted name='warnings get backtraces'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testIgnored name='expectation 2' message='Reason: empty test'] ##teamcity[testSuiteFinished name='warnings get backtraces'] ##teamcity[testSuiteFinished name='Warnings'] testthat/tests/testthat/_snaps/reporter.md0000644000176200001440000000013214172347325020567 0ustar liggesusers# can control output with file arg/option [1] ".FFEESSWS" --- [1] ".FFEESSWS" testthat/tests/testthat/_snaps/R4.1/0000755000176200001440000000000014165673170017035 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.1/snapshot-file/0000755000176200001440000000000014164710003021573 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.1/snapshot-file/nickname.txt0000644000176200001440000000001414164710003024114 0ustar liggesusersBird Hippie testthat/tests/testthat/_snaps/R4.1/snapshot.md0000644000176200001440000000013314172347327021213 0ustar liggesusers# can access nickname Code version$nickname Output [1] "Bird Hippie" testthat/tests/testthat/_snaps/rlang-pre-1.0/0000755000176200001440000000000014165405045020566 5ustar liggesuserstestthat/tests/testthat/_snaps/rlang-pre-1.0/snapshot.md0000644000176200001440000000116314172347327022756 0ustar liggesusers# full condition message is printed with rlang Code foo <- error_cnd("foo", message = "Title parent.") abort("Title.", parent = foo) Error Title. # can print with and without condition classes Code f() Message foo Warning bar Error baz --- Code f() Message foo Warning bar Error baz # errors and warnings are folded Code f() Warning foo Error bar testthat/tests/testthat/_snaps/expect-equality.md0000644000176200001440000000100514172347322022045 0ustar liggesusers# provide useful feedback on failure 1 (`actual`) not identical to "a" (`expected`). `actual` is a double vector (1) `expected` is a character vector ('a') --- 1 (`actual`) not equal to "a" (`expected`). `actual` is a double vector (1) `expected` is a character vector ('a') --- 1 not identical to "a". Types not compatible: double is not character --- 1 not equal to "a". Types not compatible: double is not character testthat/tests/testthat/_snaps/reporter-minimal.md0000644000176200001440000000004714172347323022216 0ustar liggesusers# reporter as expected .FFEESSWS testthat/tests/testthat/_snaps/R4.0/0000755000176200001440000000000014164710003017016 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.0/snapshot-file/0000755000176200001440000000000014164710003021572 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.0/snapshot-file/nickname.txt0000644000176200001440000000002014164710003024110 0ustar liggesusersShake and Throw testthat/tests/testthat/_snaps/R4.0/snapshot.md0000644000176200001440000000013714164710003021200 0ustar liggesusers# can access nickname Code version$nickname Output [1] "Shake and Throw" testthat/tests/testthat/_snaps/reporter-location.md0000644000176200001440000000151414172347323022400 0ustar liggesusers# reporter as expected Start test: Success tests.R:6:3 [success] End test: Success Start test: Failure:1 tests.R:12:3 [failure] End test: Failure:1 Start test: Failure:2a tests.R:17:3 [failure] End test: Failure:2a Start test: Error:1 tests.R:23:3 [error] End test: Error:1 Start test: errors get tracebacks tests.R:31:3 [error] End test: errors get tracebacks Start test: explicit skips are reported tests.R:37:3 [skip] End test: explicit skips are reported Start test: empty tests are implicitly skipped tests.R:40:1 [skip] End test: empty tests are implicitly skipped Start test: warnings get backtraces tests.R:49:3 [warning] tests.R:45:1 [skip] End test: warnings get backtraces testthat/tests/testthat/_snaps/quasi-label.md0000644000176200001440000000141014172347322021121 0ustar liggesusers# produces useful summaries for long calls Code expr_label(quote(foo(a = "this is a long argument", b = "this is a long argument", c = "this is a long argument"))) Output [1] "foo(...)" Code expr_label(quote(arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg)) Output [1] "... + arg" Code expr_label(quote(arg + (arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg))) Output [1] "arg + ..." Code expr_label(quote(function(a, b, c) { a + b + c })) Output [1] "function(a, b, c) ..." # informative error for missing arg Code expect_equal() Error argument `object` is missing, with no default. testthat/tests/testthat/_snaps/snapshot-manage.md0000644000176200001440000000162314172347326022021 0ustar liggesusers# informs about files being accepted Code snapshot_accept(path = path) Message Updating snapshots: * a.md * b.md --- Code snapshot_accept(path = path) Message No snapshots to update # can accept specific files Code snapshot_accept("a", path = path) Message Updating snapshots: * a.md --- Code snapshot_accept("test/a.txt", path = path) Message Updating snapshots: * test/a.txt --- Code snapshot_accept("test/", path = path) Message Updating snapshots: * test/a.txt # can work with variants Code snapshot_accept(path = path) Message Updating snapshots: * foo/a.md --- Code snapshot_accept("foo/a", path = path) Message Updating snapshots: * foo/a.md testthat/tests/testthat/_snaps/reporter-junit.md0000644000176200001440000000501714172347323021723 0ustar liggesusers# reporter doesn't change without warning FALSE is not TRUE `actual`: FALSE `expected`: TRUE FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() 2. testthat::expect_true(FALSE) Error in `eval(code, test_env)`: stop Error in `h()`: ! Backtrace: 1. f() 2. g() 3. h() testthat/tests/testthat/_snaps/reporter-tap.md0000644000176200001440000000160514172347324021356 0ustar liggesusers# reporter works 1..9 # Context Successes ok 1 Success # Context Failures not ok 2 Failure:1 FALSE is not TRUE `actual`: FALSE `expected`: TRUE not ok 3 Failure:2a FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() reporters/tests.R:17:2 2. testthat::expect_true(FALSE) reporters/tests.R:16:7 # Context Errors not ok 4 Error:1 Error in `eval(code, test_env)`: stop not ok 5 errors get tracebacks Error in `h()`: ! Backtrace: 1. f() reporters/tests.R:31:2 2. g() reporters/tests.R:27:7 3. h() reporters/tests.R:28:7 # Context Skips ok 6 # SKIP Reason: skip ok 7 # SKIP Reason: empty test # Context Warnings ok 8 # WARNING def Backtrace: 1. f() reporters/tests.R:49:2 ok 9 # SKIP Reason: empty test testthat/tests/testthat/_snaps/reporter-summary.md0000644000176200001440000001044614172347324022272 0ustar liggesusers# can control appearance of dots reporters/tests: Successes: . Failures: 12 Errors: 34 Skips: SS Warnings: WS == Skipped ===================================================================== 1. explicit skips are reported (tests.R:37:3) - Reason: skip 2. empty tests are implicitly skipped (tests.R:40:1) - Reason: empty test 3. warnings get backtraces (tests.R:45:1) - Reason: empty test == Warnings ==================================================================== 1. warnings get backtraces (tests.R:49:3) - def == Failed ====================================================================== -- 1. Failure (tests.R:12:3): Failure:1 ---------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE -- 2. Failure (tests.R:17:3): Failure:2a --------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() reporters/tests.R:17:2 2. testthat::expect_true(FALSE) reporters/tests.R:16:7 -- 3. Error (tests.R:23:3): Error:1 -------------------------------------------- Error in `eval(code, test_env)`: stop -- 4. Error (tests.R:31:3): errors get tracebacks ------------------------------ Error in `h()`: ! Backtrace: 1. f() reporters/tests.R:31:2 2. g() reporters/tests.R:27:7 3. h() reporters/tests.R:28:7 == DONE ======================================================================== --- reporters/tests: Successes: Failures: 12 Errors: 34 Skips: SS Warnings: WS == Skipped ===================================================================== 1. explicit skips are reported (tests.R:37:3) - Reason: skip 2. empty tests are implicitly skipped (tests.R:40:1) - Reason: empty test 3. warnings get backtraces (tests.R:45:1) - Reason: empty test == Warnings ==================================================================== 1. warnings get backtraces (tests.R:49:3) - def == Failed ====================================================================== -- 1. Failure (tests.R:12:3): Failure:1 ---------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE -- 2. Failure (tests.R:17:3): Failure:2a --------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() reporters/tests.R:17:2 2. testthat::expect_true(FALSE) reporters/tests.R:16:7 -- 3. Error (tests.R:23:3): Error:1 -------------------------------------------- Error in `eval(code, test_env)`: stop -- 4. Error (tests.R:31:3): errors get tracebacks ------------------------------ Error in `h()`: ! Backtrace: 1. f() reporters/tests.R:31:2 2. g() reporters/tests.R:27:7 3. h() reporters/tests.R:28:7 == DONE ======================================================================== # can control maximum reports reporters/tests: Successes: . Failures: 12 Errors: 34 Skips: SS Warnings: WS == Skipped ===================================================================== 1. explicit skips are reported (tests.R:37:3) - Reason: skip 2. empty tests are implicitly skipped (tests.R:40:1) - Reason: empty test 3. warnings get backtraces (tests.R:45:1) - Reason: empty test == Warnings ==================================================================== 1. warnings get backtraces (tests.R:49:3) - def == Failed ====================================================================== -- 1. Failure (tests.R:12:3): Failure:1 ---------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE -- 2. Failure (tests.R:17:3): Failure:2a --------------------------------------- FALSE is not TRUE `actual`: FALSE `expected`: TRUE Backtrace: 1. f() reporters/tests.R:17:2 2. testthat::expect_true(FALSE) reporters/tests.R:16:7 ... and 2 more Maximum number of 2 failures reached, some test results may be missing. == DONE ======================================================================== testthat/tests/testthat/_snaps/expect-inheritance.md0000644000176200001440000000051714172347322022510 0ustar liggesusers# expect_s[34]_class can check not S3/S4 factor() is an S3 object --- A() is an S4 object # test_s4_class respects class hierarchy C() inherits from 'C'/'A'/'B'/'list'/'vector' not 'D'. # test_s3_class respects class hierarchy `x` inherits from 'a'/'b' not 'c'. --- `x` inherits from 'a'/'b' not 'c'/'d'. testthat/tests/testthat/_snaps/expect-constant.md0000644000176200001440000000072014172347321022043 0ustar liggesusers# logical tests act as expected FALSE is not TRUE `actual`: FALSE `expected`: TRUE --- TRUE is not FALSE `actual`: TRUE `expected`: FALSE # expect_null works 1L is not NULL `actual` is an integer vector (1) `expected` is NULL --- environment() is not NULL `actual` is an environment `expected` is NULL testthat/tests/testthat/test-expect-output.R0000644000176200001440000000234714164710003021044 0ustar liggesusersf <- function() NULL g <- function() cat("!") test_that("expect = NA checks for no output", { expect_success(expect_output(f(), NA)) expect_failure(expect_output(g(), NA), "produced output") }) test_that("expect = NULL checks for some output", { expect_failure(expect_output(f(), NULL), "produced no output") expect_success(expect_output(g(), NULL)) }) test_that("expect = string checks for match", { expect_success(expect_output(g(), "!")) expect_failure(expect_output(g(), "x"), 'does not match "x"') expect_failure(expect_output("a", "x"), "produced no output") }) test_that("multiline outputs captures and matches", { expect_success(expect_output(cat("1\n2"), "1\n2")) }) test_that("expect_output sets width", { x <- expect_output(getOption("width"), NA) expect_equal(x, 80) x <- expect_output(getOption("width"), NA, width = 20) expect_equal(x, 20) }) test_that("... passed on to grepl", { expect_success(expect_output(print("X"), "x", ignore.case = TRUE)) }) test_that("returns first argument", { expect_equal(expect_output(1, NA), 1) }) test_that("uses unicode characters in output where available", { skip_if_not(l10n_info()$`UTF-8`) bar <- "\u2551" expect_success(expect_output(cat(bar), "\u2551")) }) testthat/tests/testthat/test-snapshot-manage.R0000644000176200001440000000314714166627056021323 0ustar liggesuserstest_that("informs about files being accepted", { path <- local_snapshot_dir(c("a.md", "a.new.md", "b.md", "b.new.md")) expect_snapshot(snapshot_accept(path = path)) expect_equal(dir(file.path(path, "_snaps")), c("a.md", "b.md")) expect_snapshot(snapshot_accept(path = path)) }) test_that("can accept specific files", { path <- local_snapshot_dir(c("a.md", "a.new.md", "b.md", "b.new.md")) expect_snapshot(snapshot_accept("a", path = path)) expect_equal(dir(file.path(path, "_snaps")), c("a.md", "b.md", "b.new.md")) path <- local_snapshot_dir(c("test/a.txt", "test/a.new.txt")) expect_snapshot(snapshot_accept("test/a.txt", path = path)) expect_equal(dir(file.path(path, "_snaps"), recursive = TRUE), "test/a.txt") # or whole directory path <- local_snapshot_dir(c("test/a.txt", "test/a.new.txt")) expect_snapshot(snapshot_accept("test/", path = path)) expect_equal(dir(file.path(path, "_snaps"), recursive = TRUE), "test/a.txt") }) test_that("can work with variants", { # Can accept all path <- local_snapshot_dir(c("foo/a.md", "foo/a.new.md")) expect_snapshot(snapshot_accept(path = path)) expect_equal(dir(file.path(path, "_snaps", "foo")), "a.md") # Can accept specified path <- local_snapshot_dir(c("foo/a.md", "foo/a.new.md")) expect_snapshot(snapshot_accept("foo/a", path = path)) expect_equal(dir(file.path(path, "_snaps", "foo")), "a.md") }) # snapshot_meta ----------------------------------------------------------- test_that("returns empty data frame for empty directory", { path <- tempfile() dir.create(path) expect_equal(nrow(snapshot_meta(path = path)), 0) }) testthat/tests/testthat/test-reporter-check.R0000644000176200001440000000162514165427540021145 0ustar liggesuserstest_that("basic report works", { withr::defer(file.remove(test_path("testthat-problems.rds"))) expect_snapshot_reporter(CheckReporter$new()) rds <- test_path("testthat-problems.rds") expect_true(file.exists(rds)) }) test_that("doesn't truncate long lines", { on.exit(unlink(test_path("testthat-problems.rds"))) expect_snapshot_reporter(CheckReporter$new(), test_path("reporters/long-test.R")) }) test_that("always shows summary", { file.create(test_path("testthat-problems.rds")) expect_snapshot_reporter(CheckReporter$new(), test_path("reporters/successes.R")) # and cleans up testthat-problems expect_false(file.exists(test_path("testthat-problems.rds"))) }) test_that("shows warnings when not on CRAN", { on.exit(unlink(test_path("testthat-problems.rds"))) withr::local_options("NOT_CRAN" = "true") expect_snapshot_reporter(CheckReporter$new(), test_path("reporters/tests.R")) }) testthat/tests/testthat/test-expect-known.txt0000644000176200001440000000001614172347322021256 0ustar liggesusersü ⩝ 我 и testthat/tests/testthat/test-verify-unicode-false.txt0000644000176200001440000000006714172347327022667 0ustar liggesusers> cat(cli::symbol$info, cli::symbol$cross, "\n") i x testthat/tests/testthat/test-expect-constant.R0000644000176200001440000000134614165743661021354 0ustar liggesuserstest_that("logical tests act as expected", { local_output_override() expect_success(expect_true(TRUE)) expect_success(expect_false(FALSE)) expect_snapshot_failure(expect_true(FALSE)) expect_snapshot_failure(expect_false(TRUE)) }) test_that("logical tests ignore attributes", { expect_success(expect_true(c(a = TRUE))) expect_success(expect_false(c(a = FALSE))) }) test_that("additional info returned in message", { expect_failure(expect_true(FALSE, "NOPE"), "\nNOPE") expect_failure(expect_false(TRUE, "YUP"), "\nYUP") }) test_that("expect_null works", { local_output_override() expect_success(expect_null(NULL)) expect_snapshot_failure(expect_null(1L)) expect_snapshot_failure(expect_null(environment())) }) testthat/tests/testthat/test-expect-inheritance.R0000644000176200001440000000402714164710003021772 0ustar liggesuserstest_that("expect_type checks typeof", { expect_success(expect_type(factor("a"), "integer")) expect_failure(expect_type(factor("a"), "double")) }) test_that("expect_is checks class", { local_edition(2) expect_success(expect_is(factor("a"), "factor")) expect_failure(expect_is(factor("a"), "integer")) }) test_that("expect_s3/s4_class fails if appropriate type", { A <- methods::setClass("A", contains = "list") expect_failure(expect_s3_class(1, "double"), "not an S3 object") expect_failure(expect_s3_class(A(), "double"), "not an S3 object") expect_failure(expect_s4_class(factor(), "double"), "not an S4 object") }) test_that("expect_s[34]_class can check not S3/S4", { expect_success(expect_s3_class(1, NA)) expect_snapshot_failure(expect_s3_class(factor(), NA)) A <- methods::setClass("A", contains = "list") expect_success(expect_s4_class(1, NA)) expect_snapshot_failure(expect_s4_class(A(), NA)) }) test_that("test_s4_class respects class hierarchy", { A <- methods::setClass("A", contains = "list") B <- methods::setClass("B", contains = "list") C <- methods::setClass("C", contains = c("A", "B")) on.exit({ methods::removeClass("A") methods::removeClass("B") methods::removeClass("C") }) expect_success(expect_s4_class(C(), "A")) expect_success(expect_s4_class(C(), "B")) expect_snapshot_failure(expect_s4_class(C(), "D")) }) test_that("test_s3_class respects class hierarchy", { x <- structure(list(), class = c("a", "b")) expect_success(expect_s3_class(x, "a")) expect_success(expect_s3_class(x, "b")) expect_snapshot_failure(expect_s3_class(x, "c")) expect_snapshot_failure(expect_s3_class(x, c("c", "d"))) }) test_that("test_s3_class can request exact match", { x <- structure(list(), class = c("a", "b")) expect_failure(expect_s3_class(x, "a", exact = TRUE)) expect_success(expect_s3_class(x, c("a", "b"), exact = TRUE)) }) test_that("expect_s3_class allows unquoting of first argument", { f <- factor("a") expect_success(expect_s3_class(!! rlang::quo(f), "factor")) }) testthat/tests/testthat/test-snapshot-serialize.R0000644000176200001440000000126414165635513022054 0ustar liggesuserstest_that("single test case can roundtrip", { x <- list(test = '[1] "x"') x_snap <- snap_to_md(x) x_lines <- strsplit(x_snap, "\n")[[1]] y <- snap_from_md(x_lines) expect_equal(x, y) }) test_that("multiple tests can roundtrip", { x <- list(foo = c("a","b"), bar = "d", baz = letters[1:3]) x_snap <- snap_to_md(x) x_lines <- strsplit(x_snap, "\n")[[1]] y <- snap_from_md(x_lines) expect_equal(x, y) }) test_that("snapshots always use \n", { path <- withr::local_tempfile() x <- list(foo = c("a","b"), bar = "d", baz = letters[1:3]) write_snaps(x, path) snap <- brio::read_file(path) has_cr <- grepl("\r", snap, fixed = TRUE) expect_equal(has_cr, FALSE) }) testthat/tests/testthat/test-test-files.R0000644000176200001440000000514714164710003020276 0ustar liggesusers# test_dir() -------------------------------------------------------------- test_that("stops on failure", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") expect_error( test_dir(test_path("test_dir"), reporter = "silent") ) }) test_that("runs all tests and records output", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") res <- test_dir(test_path("test_dir"), reporter = "silent", stop_on_failure = FALSE) df <- as.data.frame(res) df$user <- df$system <- df$real <- df$result <- NULL local_reproducible_output(width = 200) local_edition(3) # set to 2 in ./test_dir expect_snapshot_output(print(df)) }) test_that("complains if no files", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") path <- tempfile() dir.create(path) expect_error(test_dir(path), "test files") }) test_that("can control if failures generate errors", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") test_error <- function(...) { test_dir(test_path("test-error"), reporter = "silent", ...) } expect_error(test_error(stop_on_failure = TRUE), "Test failures") expect_error(test_error(stop_on_failure = FALSE), NA) }) test_that("can control if warnings errors", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") test_warning <- function(...) { test_dir(test_path("test-warning"), reporter = "silent", ...) } expect_error(test_warning(stop_on_warning = TRUE), "Tests generated warnings") expect_error(test_warning(stop_on_warning = FALSE), NA) }) # test_file --------------------------------------------------------------- test_that("can test single file", { out <- test_file(test_path("test_dir/test-basic.R"), reporter = "silent") expect_length(out, 5) }) test_that("complains if file doesn't exist", { expect_error(test_file("DOESNTEXIST"), "does not exist") }) # setup-teardown ---------------------------------------------------------- test_that("files created by setup still exist", { # These files should be created/delete by package-wide setup/teardown # We check that they exist here to make sure that they're not cleaned up # too early expect_true(file.exists("DELETE-ME")) expect_true(file.exists("DELETE-ME-2")) }) # helpers ----------------------------------------------------------------- test_that("can filter test scripts", { x <- c("test-a.R", "test-b.R", "test-c.R") expect_equal(filter_test_scripts(x), x) expect_equal(filter_test_scripts(x, "a"), x[1]) expect_equal(filter_test_scripts(x, "a", invert = TRUE), x[-1]) # Strips prefix/suffix expect_equal(filter_test_scripts(x, "test"), character()) expect_equal(filter_test_scripts(x, ".R"), character()) }) testthat/tests/testthat/reporters/0000755000176200001440000000000014164710003017135 5ustar liggesuserstestthat/tests/testthat/reporters/backtraces.R0000644000176200001440000000307514165743507021407 0ustar liggesusers test_that("errors thrown at block level are entraced", { f <- function() g() g <- function() stop("foo") f() }) test_that("errors thrown from a quasi-labelled argument are entraced", { foo <- function() stop("foo") expect_s3_class(foo(), "foo") }) test_that("errors thrown from a quasi-labelled argument are entraced (deep case)", { foo <- function() stop("foo") f <- function() g() g <- function() expect_s3_class(foo(), "foo") expect_s3_class(f(), "foo") }) test_that("errors thrown from a quasi-labelled argument are entraced (deep deep case)", { foo <- function() bar() bar <- function() stop("foobar") f <- function() g() g <- function() expect_s3_class(foo(), "foo") f() }) test_that("failed expect_error() prints a backtrace", { f <- function() signaller() signaller <- function() stop("bar") expect_error(f(), "foo") }) test_that("Errors are inspected with `conditionMessage()`", { rlang::scoped_bindings( .env = globalenv(), conditionMessage.foobar = function(...) "dispatched" ) rlang::abort("Wrong message", "foobar") }) test_that("also get backtraces for warnings", { foo <- function() bar() bar <- function() warning("foobar", call. = FALSE) foo() expect_true(TRUE) }) test_that("deep stacks are trimmed", { f <- function(x) { if (x > 0) f(x - 1) else stop("This is deep") } f(25) }) # Expectations ---------------------------------------------------------------- f <- function() g() g <- function() h() h <- function() expect_true(FALSE) f() test_that("nested expectations get backtraces", { f() }) testthat/tests/testthat/reporters/error-setup.R0000644000176200001440000000013714164710003021550 0ustar liggesusersf <- function() g() g <- function() h() h <- function() stop("!") local_edition(2) setup(f()) testthat/tests/testthat/reporters/tests.R0000644000176200001440000000121214164710003020416 0ustar liggesuserslocal_edition(2) context("Successes") test_that("Success", { succeed() }) context("Failures") test_that("Failure:1", { expect_true(FALSE) }) test_that("Failure:2a", { f <- function() expect_true(FALSE) f() }) context("Errors") test_that("Error:1", { stop("stop") }) test_that("errors get tracebacks", { f <- function() g() g <- function() h() h <- function() stop("!") f() }) context("Skips") test_that("explicit skips are reported", { skip("skip") }) test_that("empty tests are implicitly skipped", { }) context("Warnings") test_that("warnings get backtraces", { f <- function() { warning("def") } f() }) testthat/tests/testthat/reporters/successes.R0000644000176200001440000000023414164710003021257 0ustar liggesuserstest_that("two successes", { expect_true(TRUE) expect_true(TRUE) }) test_that("five more successes", { for (i in 1:5) { expect_true(TRUE) } }) testthat/tests/testthat/reporters/long-test.R0000644000176200001440000000022114164710003021167 0ustar liggesuserstest_that("That very long test messages are not truncated because they contain useful information that you probably want to read", { fail() }) testthat/tests/testthat/reporters/context.R0000644000176200001440000000014014164710003020737 0ustar liggesuserstestthat:::local_edition(2) context("my context") test_that("a test", { expect_true(TRUE) }) testthat/tests/testthat/reporters/fail.R0000644000176200001440000000020314164710003020166 0ustar liggesusers test_that("two failures", { expect_true(FALSE) expect_false(TRUE) }) test_that("another failure", { expect_true(FALSE) }) testthat/tests/testthat/reporters/skips.R0000644000176200001440000000021114164710003020403 0ustar liggesuserstest_that("regular skip", { skip("regular skip") }) test_that("skip with details", { skip("longer skip:\nthis is what happened") }) testthat/tests/testthat/reporters/fail-many.R0000644000176200001440000000011114164710003021126 0ustar liggesuserstest_that("Example", { for (i in 1:11) { expect_true(FALSE) } }) testthat/tests/testthat/test-verify-unicode-true.txt0000644000176200001440000000007314172347327022551 0ustar liggesusers> cat(cli::symbol$info, cli::symbol$cross, "\n") ℹ ✖ testthat/tests/testthat/test-snapshot/0000755000176200001440000000000014172362302017730 5ustar liggesuserstestthat/tests/testthat/test-snapshot/test-snapshot.R0000644000176200001440000000055114164710003022664 0ustar liggesuserstest_that("errors reset snapshots", { if (nzchar(Sys.getenv("TESTTHAT_REGENERATE_SNAPS"))) { expect_snapshot(print(1)) } else { expect_snapshot(stop("failing")) } }) test_that("skips reset snapshots", { if (nzchar(Sys.getenv("TESTTHAT_REGENERATE_SNAPS"))) { expect_snapshot(print(1)) } else { expect_snapshot(skip("skipping")) } }) testthat/tests/testthat/test-snapshot/_snaps/0000755000176200001440000000000014164710003021207 5ustar liggesuserstestthat/tests/testthat/test-snapshot/_snaps/snapshot.md0000644000176200001440000000022314172347326023402 0ustar liggesusers# errors reset snapshots Code print(1) Output [1] 1 # skips reset snapshots Code print(1) Output [1] 1 testthat/tests/testthat/test-snapshot/test-expect-condition.R0000644000176200001440000000021414164710003024275 0ustar liggesuserstest_that("can use failing condition expectation inside `expect_snapshot()`", { local_edition(3) expect_snapshot(expect_error(NULL)) }) testthat/tests/testthat/test-reporter-location.R0000644000176200001440000000013214164710003021654 0ustar liggesuserstest_that("reporter as expected", { expect_snapshot_reporter(LocationReporter$new()) }) testthat/tests/testthat/test_dir/0000755000176200001440000000000014172347717016746 5ustar liggesuserstestthat/tests/testthat/test_dir/test-helper.R0000644000176200001440000000020414164710003021300 0ustar liggesusers# test that the companion helper script is sourced by test_dir test_that("helper test", { expect_equal(hello(), "Hello World") }) testthat/tests/testthat/test_dir/test-failures.R0000644000176200001440000000032114164710003021633 0ustar liggesuserstest_that("just one failure", { expect_true(FALSE) }) test_that("one failure on two", { expect_false(FALSE) expect_true(FALSE) }) test_that("no failure", { expect_false(FALSE) expect_true(TRUE) }) testthat/tests/testthat/test_dir/test-bare-expectations.R0000644000176200001440000000002314164710003023435 0ustar liggesusersexpect_equal(2, 2) testthat/tests/testthat/test_dir/test-skip.R0000644000176200001440000000013614164710003020773 0ustar liggesuserstest_that("Skips skip", { skip("Skipping to avoid certain failure") expect_true(FALSE) }) testthat/tests/testthat/test_dir/test-errors.R0000644000176200001440000000051014164710003021335 0ustar liggesuserstest_that("simple", { stop("argh") }) test_that("after one success", { expect_true(TRUE) stop("argh") expect_true(TRUE) }) test_that("after one failure", { expect_true(FALSE) stop("argh") }) test_that("in the test", { expect_true(stop("Argh")) }) test_that("in expect_error", { expect_error(stop("Argh")) }) testthat/tests/testthat/test_dir/helper_hello.R0000644000176200001440000000004213514346760021523 0ustar liggesusershello <- function() "Hello World" testthat/tests/testthat/test_dir/test-empty.R0000644000176200001440000000012014164710003021154 0ustar liggesuserstest_that("empty test", NULL) test_that("empty test with error", stop("Argh")) testthat/tests/testthat/test_dir/test-basic.R0000644000176200001440000000066714164710003021117 0ustar liggesuserstest_that("logical tests act as expected", { expect_true(TRUE) expect_false(FALSE) }) test_that("logical tests ignore attributes", { expect_true(c(a = TRUE)) expect_false(c(a = FALSE)) }) test_that("equality holds", { expect_equal(5, 5) expect_identical(10, 10) }) test_that("can't access variables from other tests 2", { a <- 10 }) test_that("can't access variables from other tests 1", { expect_false(exists("a")) }) testthat/tests/testthat/test-reporter-summary.R0000644000176200001440000000054514164710003021551 0ustar liggesuserstest_that("can control appearance of dots", { expect_snapshot_reporter(SummaryReporter$new(show_praise = FALSE, omit_dots = FALSE)) expect_snapshot_reporter(SummaryReporter$new(show_praise = FALSE, omit_dots = TRUE)) }) test_that("can control maximum reports", { expect_snapshot_reporter(SummaryReporter$new(show_praise = FALSE, max_reports = 2)) }) testthat/tests/testthat/test-expect-condition.R0000644000176200001440000001641514165427540021507 0ustar liggesuserstest_that("returns condition or value", { expect_equal(expect_error(1, NA), 1) expect_s3_class(expect_error(stop("!")), "simpleError") }) test_that("regexp = NULL checks for presence of error", { expect_success(expect_error(stop())) expect_snapshot_failure(expect_error(null())) }) test_that("regexp = NA checks for absence of error", { expect_success(expect_error(null(), NA)) expect_failure(expect_error(stop("Yes"), NA)) }) test_that("regexp = string matches for error message", { expect_success(expect_error(stop("Yes"), "Yes")) expect_error(expect_error(stop("Yes"), "No")) expect_snapshot_failure(expect_error("OK", "No")) }) test_that("class = string matches class of error", { blah <- function() { abort("hi", class = c("blah", "error", "condition")) } expect_success(expect_error(blah(), class = "blah")) expect_error(expect_error(blah(), class = "blech"), class = "blah") }) test_that("check type of class and pattern", { expect_error(expect_error(stop("!"), regexp = 1), "single string") expect_error(expect_error(stop("!"), class = 1), "single string") }) test_that("... passed on to grepl", { expect_success(expect_error(stop("X"), "x", ignore.case = TRUE)) }) test_that("message method is called when expecting error", { local_bindings( conditionMessage.foobar = function(err) "dispatched!", .env = globalenv() ) fb <- function() abort("foobar", "foobar") expect_error(fb(), "dispatched!", class = "foobar") expect_snapshot_failure(expect_error(fb(), NA)) }) test_that("rlang backtrace reminders are not included in error message", { f <- function() g() g <- function() h() h <- function() abort("foo") expect_error(f(), "foo$") }) test_that("can capture Throwable conditions from rJava", { local_bindings( conditionMessage.Throwable = function(c, ...) unclass(c)$message, conditionCall.Throwable = function(c, ...) unclass(c)$call, `$.Throwable` = function(...) stop("forbidden"), `$<-.Throwable` = function(...) stop("forbidden"), .env = globalenv() ) throw <- function(msg) stop(error_cnd("Throwable", message = msg)) expect_error(throw("foo"), "foo", class = "Throwable") }) # expect_warning() ---------------------------------------------------------- test_that("warnings are converted to errors when options('warn') >= 2", { withr::with_options(c(warn = 2), { expect_warning(warning("foo")) expect_error(warning("foo")) }) }) test_that("can silence warnings", { expect_warning(suppressWarnings(warning("foo")), NA) # Can't test with `expect_warning()` because the warning is still # signalled, it's just not printed # https://github.com/wch/r-source/blob/886ab4a0/src/main/errors.c#L388-L484 withr::with_options(c(warn = -1), warning("foo")) }) # expect_message ---------------------------------------------------------- test_that("regexp = NA checks for absence of message", { expect_success(expect_message(null(), NA)) expect_failure(expect_message(message("!"), NA)) }) # expect_condition -------------------------------------------------------- test_that("continues evaluation", { expect_condition({ message("Hi") new_variable <- 1 }) expect_equal(exists("new_variable"), TRUE) }) test_that("but not after error", { expect_condition({ stop("Hi") new_variable <- 1 }) expect_equal(exists("new_variable"), FALSE) }) test_that("captured condition is muffled", { expect_message(expect_condition(message("Hi")), NA) expect_warning(expect_condition(warning("Hi")), NA) expect_error(expect_condition(stop("Hi")), NA) }) test_that("only matching condition is captured, others bubble up", { f1 <- function() { message("Hi") message("Bye") } expect_condition(expect_condition(f1(), "Hi"), "Bye") expect_condition(expect_condition(f1(), "Bye"), "Hi") f2 <- function() { message("Hi") stop("Bye") } expect_error(expect_condition(f2(), "Hi"), "Bye") }) test_that("cnd expectations consistently return condition (#1371)", { f <- function(out, action) { action out } expect_s3_class(expect_message(f(NULL, message(""))), "simpleMessage") expect_s3_class(expect_warning(f(NULL, warning(""))), "simpleWarning") expect_s3_class(expect_error(f(NULL, stop(""))), "simpleError") # Used to behave differently with non-`NULL` values expect_s3_class(expect_message(f("return value", message(""))), "simpleMessage") expect_s3_class(expect_warning(f("return value", warning(""))), "simpleWarning") expect_s3_class(expect_error(f("return value", stop(""))), "simpleError") # If there is no condition expected we return the value expect_equal(expect_message(f("return value", NULL), regexp = NA), "return value") expect_equal(expect_warning(f("return value", NULL), regexp = NA), "return value") expect_equal(expect_error(f("return value", NULL), regexp = NA), "return value") }) test_that("cli width wrapping doesn't affect text matching", { skip_if_not_installed("cli", "3.0.2") skip_if_not_installed("rlang", "1.0.0") local_use_cli() expect_error( abort("foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz"), "foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz" ) }) test_that("can match parent conditions (#1493)", { parent <- error_cnd("foo", message = "Parent message.") f <- function() abort("Tilt.", parent = parent) expect_error(f(), class = "foo") expect_error(f(), "^Parent message.$") # Pattern and class must match the same condition expect_error(expect_error(f(), "Tilt.", class = "foo")) # Can disable parent matching expect_error(expect_error(f(), class = "foo", inherit = FALSE)) expect_error(expect_error(f(), "Parent message.", inherit = FALSE)) }) # second edition ---------------------------------------------------------- test_that("other conditions are swallowed", { f <- function(...) { conds <- c(...) for (cond in conds) { switch(cond, message = message("message"), warning = warning("warning"), error = stop("error"), condition = signal("signal", class = "signal") ) } } local_edition(2) # if condition text doesn't match, expectation fails (not errors) expect_failure(expect_error(f("error"), "not a match")) expect_failure(expect_warning(f("warning"), "not a match")) expect_failure(expect_message(f("message"), "not a match")) expect_failure(expect_condition(f("condition"), "not a match")) # if error/condition class doesn't match, expectation fails expect_failure(expect_error(f("error"), class = "not a match")) expect_failure(expect_condition(f("message"), class = "not a match")) # expect_message() and expect_warning() swallow all messages/warnings expect_message(expect_message(f("message", "message")), NA) expect_warning(expect_warning(f("warning", "warning")), NA) }) test_that("can match parent conditions (edition 2, #1493)", { local_edition(2) parent <- error_cnd("foo", message = "Parent message.") f <- function() abort("Tilt.", parent = parent) expect_error(f(), class = "foo") expect_error(f(), "^Parent message.$") # Can disable parent matching expect_error(expect_error(f(), class = "foo", inherit = FALSE)) expect_error(expect_error(f(), "Parent message.", inherit = FALSE)) }) testthat/tests/testthat/test-reporter-multi.R0000644000176200001440000000055514164710003021207 0ustar liggesuserstest_that("MultiReporter", { reports <- lapply(seq_len(3), function(x) ListReporter$new()) reporter <- MultiReporter$new(reporters = reports) with_reporter(reporter, test_one_file("context.R")) dfs <- lapply(reports, function(x) as.data.frame(x$get_results())) expect_equal(dfs[[2]][1:7], dfs[[1]][1:7]) expect_equal(dfs[[3]][1:7], dfs[[1]][1:7]) }) testthat/tests/testthat/test-parallel-teardown.R0000644000176200001440000000062614164710003021631 0ustar liggesusers test_that("teardown error", { skip("teardown errors are ignored") withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( suppressMessages(testthat::test_local( test_path("test-parallel", "teardown"), reporter = "silent" )), error = function(e) e ) expect_s3_class(err, "testthat_process_error") expect_match(err$message, "Error in teardown", fixed = TRUE) }) testthat/tests/testthat/test-context.R0000644000176200001440000000262114164710003017675 0ustar liggesusersCountReporter <- R6::R6Class("CountReporter", inherit = Reporter, public = list( context_i = 0, context_count = 0, test_i = 0, test_count = 0, start_context = function(context) { self$context_count <- self$context_count + 1 self$context_i <- self$context_i + 1 }, end_context = function(context) { self$context_i <- self$context_i - 1 stopifnot(self$context_i >= 0) }, start_test = function(context, test) { self$test_count <- self$test_count + 1 self$test_i <- self$test_i + 1 }, end_test = function(context, test) { self$test_i <- self$test_i - 1 stopifnot(self$test_i >= 0) } ) ) test_that("contexts are opened, then closed", { local_edition(2) report <- CountReporter$new() with_reporter(report, test_one_file("context.R")) expect_equal(report$context_count, 2) expect_equal(report$context_i, 0) expect_equal(report$test_count, 4) expect_equal(report$test_i, 0) }) test_that("context_name strips prefix and extensions correctly", { expect_equal(context_name("test-metrics.R"), "metrics") # uppercase expect_equal(context_name("test-metrics.r"), "metrics") # lowercase expect_equal(context_name("test-check.Rfile.R"), "check.Rfile") # suffix only expect_equal(context_name("test-test-test.R"), "test-test") # 1st prefix only expect_equal(context_name("test_metrics.R"), "metrics") }) testthat/tests/testthat/test-deprec-condition.R0000644000176200001440000000122714172347251021452 0ustar liggesuserstest_that("is_informative_error returns TRUE for basic errors", { withr::local_options(lifecycle_verbosity = "quiet") is_informative <- function(x) is_informative_error(catch_cnd(x)) expect_false(is_informative(stop("!"))) expect_false(is_informative(abort("!"))) expect_false(is_informative(abort("!", class = "Rcpp::eval_error"))) expect_false(is_informative(abort("!", class = "Rcpp::exception"))) expect_true(is_informative(abort("!", class = "error_custom"))) with_bindings( .env = global_env(), is_informative_error.error_custom = function(...) FALSE, expect_false(is_informative(abort("!", class = "error_custom"))) ) }) testthat/tests/testthat/test-parallel-startup.R0000644000176200001440000000057414164710003021512 0ustar liggesusers test_that("startup error", { skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( suppressMessages(testthat::test_local( test_path("test-parallel", "startup"), reporter = "silent" )), error = function(e) e ) expect_s3_class(err, "testthat_process_error") expect_match(err$message, "This will fail", fixed = TRUE) }) testthat/tests/testthat/test-parallel-setup.R0000644000176200001440000000061114164710003021140 0ustar liggesusers test_that("error in parallel setup code", { skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( suppressMessages(testthat::test_local( test_path("test-parallel", "setup"), reporter = "silent" )), error = function(e) e ) expect_s3_class(err, "testthat_process_error") expect_match(err$message, "Error in setup", fixed = TRUE) }) testthat/tests/testthat/test-edition.R0000644000176200001440000000212314164710003017641 0ustar liggesuserstest_that("can locally override edition", { local_edition(3) expect_equal(edition_get(), 3) local_edition(2) expect_equal(edition_get(), 2) }) test_that("deprecation only fired for newer edition", { local_edition(2) expect_warning(edition_deprecate(3, "old stuff"), NA) local_edition(3) expect_snapshot(edition_deprecate(3, "old stuff")) }) test_that("required only fired for older edition", { withr::local_options(testthat.edition_ignore = FALSE) local_edition(2) expect_error(edition_require(3, "new stuff")) withr::local_options(testthat.edition_ignore = FALSE) local_edition(3) expect_error(edition_require(3, "new stuff"), NA) }) test_that("edition for testthat is 3", { expect_equal(find_edition(package = "testthat"), 3) }) test_that("edition for non-package dir is 2", { expect_equal(find_edition(tempdir()), 2) }) test_that("can set the edition via an environment variable", { local_edition(zap()) withr::local_envvar(TESTTHAT_EDITION = 2) expect_equal(edition_get(), 2) withr::local_envvar(TESTTHAT_EDITION = 3) expect_equal(edition_get(), 3) }) testthat/tests/testthat/helper-assign.R0000644000176200001440000000002014011070223017756 0ustar liggesusersabcdefghi <- 10 testthat/tests/testthat/test-reporter.R0000644000176200001440000000121214166627056020067 0ustar liggesuserstest_that("can control output with file arg/option", { # powered through Reporter base class so we only test one reporter path <- tempfile() withr::defer(unlink(path)) with_reporter( MinimalReporter$new(file = path), test_one_file(test_path("reporters/tests.R")) ) expect_snapshot_output(readLines(path)) withr::local_options(testthat.output_file = path) with_reporter( MinimalReporter$new(), test_one_file(test_path("reporters/tests.R")) ) expect_snapshot_output(readLines(path)) }) test_that("should not automatically skip in non-utf-8 locales", { withr::local_locale(LC_CTYPE = "C") expect_true(TRUE) }) testthat/tests/testthat/test-expect-vector.R0000644000176200001440000000027314164710003021002 0ustar liggesuserstest_that("basic properties upheld", { skip_if_not_installed("vctrs", "0.1.0.9002") expect_success(expect_vector(1:10, size = 10)) expect_failure(expect_vector(1:10, size = 5)) }) testthat/tests/testthat/test-expect-comparison.R0000644000176200001440000000310614164710003021650 0ustar liggesuserstest_that("basic comparisons work", { expect_success(expect_lt(10, 11)) expect_failure(expect_lt(10, 10)) expect_success(expect_lte(10, 10)) expect_success(expect_gt(11, 10)) expect_failure(expect_gt(10, 10)) expect_success(expect_gte(10, 10)) }) test_that("comparison result object invisibly", { out <- expect_invisible(expect_lt(1, 10)) expect_equal(out, 1) }) test_that("comparisons with Inf work", { expect_success(expect_lt(10, Inf)) expect_failure(expect_lt(Inf, Inf)) expect_success(expect_lte(Inf, Inf)) expect_success(expect_gt(Inf, 10)) expect_failure(expect_gt(Inf, Inf)) expect_success(expect_gte(Inf, Inf)) }) test_that("comparisons with NA work", { expect_failure(expect_lt(10, NA_real_)) expect_failure(expect_lt(NA_real_, 10)) expect_failure(expect_lt(NA_real_, NA_real_)) expect_failure(expect_lte(NA_real_, NA_real_)) expect_failure(expect_gt(10, NA_real_)) expect_failure(expect_gt(NA_real_, 10)) expect_failure(expect_gt(NA_real_, NA_real_)) expect_failure(expect_gte(NA_real_, NA_real_)) }) test_that("comparisons with more complicated objects work", { time <- Sys.time() time2 <- time + 1 expect_success(expect_lt(time, time2)) expect_success(expect_lte(time, time2)) expect_success(expect_gt(time2, time)) expect_success(expect_gte(time2, time)) }) test_that("comparison must yield a single logical", { expect_error(expect_lt(1:10, 5), "single logical") }) test_that("wordly versions are deprecated", { expect_warning(expect_less_than(1, 2), "Deprecated") expect_warning(expect_more_than(2, 1), "Deprecated") }) testthat/tests/testthat/helper-testthat.R0000644000176200001440000000045514164710003020354 0ustar liggesusersrlang_version <- function() { if (use_rlang_1_0()) "rlang-1.0" else "rlang-pre-1.0" } local_use_rlang_1_0 <- function(frame = caller_env()) { if (is_installed("rlang") && utils::packageVersion("rlang") >= "0.99.0.9001") { local_options("testthat:::rlang_dep" = "1.0.0", .frame = frame) } } testthat/tests/testthat/test-reporter-teamcity.R0000644000176200001440000000013214164710003021663 0ustar liggesuserstest_that("reporter basics work", { expect_snapshot_reporter(TeamcityReporter$new()) }) testthat/tests/testthat/test-local.R0000644000176200001440000000254414165635513017324 0ustar liggesuserstest_that("local context is 'as promised' inside test_that()", { # high-level expectations or expectations that preceded testthat 3e expect_true(is_testing()) expect_equal(testing_package(), "testthat") expect_false(is_interactive()) expect_equal(Sys.getenv("R_TESTS"), "") # set in local_test_context() expect_equal(Sys.getenv("TESTTHAT"), "true") # testthat 3e, set in local_reproducible_output() expect_equal(edition_get(), 3L) expect_equal(getOption("width"), 80) expect_false(getOption("crayon.enabled")) expect_false(getOption("cli.dynamic")) expect_false(getOption("cli.unicode")) expect_equal(getOption("lifecycle_verbosity"), "warning") expect_equal(getOption("OutDec"), ".") expect_false(getOption("rlang_interactive")) expect_false(getOption("useFancyQuotes")) expect_equal(getOption("max.print"), 99999) expect_equal(Sys.getenv("RSTUDIO"), "") expect_equal(Sys.getenv("LANGUAGE"), "en") expect_equal(Sys.getlocale("LC_COLLATE"), "C") }) test_that("can override usual options", { local_test_directory(tempdir(), "methods") expect_equal(testing_package(), "methods") }) test_that("can override translation of error messages", { skip_on_cran() local_reproducible_output(lang = "fr") expect_error(mean[[1]], "objet de type") local_reproducible_output(lang = "es") expect_error(mean[[1]], "objeto de tipo") }) testthat/tests/testthat/test-expect-reference.R0000644000176200001440000000042114164710003021431 0ustar liggesuserstest_that("succeeds only when same object", { local_edition(2) x <- y <- 1 expect_success(expect_reference(x, y)) expect_failure(expect_reference(x, 1)) }) test_that("returns value", { local_edition(2) one <- 1 expect_equal(expect_reference(one, one), 1) }) testthat/tests/testthat/test-test-example.R0000644000176200001440000000060614164710003020622 0ustar liggesuserstest_that("can test documentation from path or Rd object", { rd_path <- test_path("../../man/expect_length.Rd") skip_if_not(file.exists(rd_path)) test_example(rd_path) test_rd(tools::parse_Rd(rd_path)) }) test_that("returns false if no examples", { rd_path <- test_path("../../man/test_examples.Rd") skip_if_not(file.exists(rd_path)) expect_false(test_example(rd_path)) }) testthat/tests/testthat/test-mock.R0000644000176200001440000000730214164710003017143 0ustar liggesuserstest_that("deprecated in 3rd edition", { expect_warning(local_mock(), "deprecated") expect_warning(with_mock(is_testing = function() FALSE), "deprecated") }) test_that("can change value of internal function", { local_edition(2) with_mock( test_mock2 = function() 5, expect_equal(test_mock1(), 5) ) # and value is restored on error expect_error( with_mock( test_mock2 = function() 5, stop("!") ) ) expect_equal(test_mock1(), 10) }) test_that("mocks can access local variables", { local_edition(2) x <- 5 with_mock( test_mock2 = function() x, expect_equal(test_mock1(), 5) ) }) test_that("non-empty mock with return value", { local_edition(2) expect_true(with_mock( compare = function(x, y, ...) list(equal = TRUE, message = "TRUE"), TRUE )) }) test_that("nested mock", { local_edition(2) with_mock( all.equal = function(x, y, ...) TRUE, { with_mock( expect_warning = expect_error, { expect_warning(stopifnot(!compare(3, "a")$equal)) } ) }, .env = asNamespace("base") ) expect_false(isTRUE(all.equal(3, 5))) expect_warning(warning("test")) }) test_that("can't mock non-existing", { local_edition(2) expect_error(with_mock(..bogus.. = identity, TRUE), "Function [.][.]bogus[.][.] not found in environment testthat") }) test_that("can't mock non-function", { local_edition(2) expect_error(with_mock(pkg_and_name_rx = FALSE, TRUE), "Function pkg_and_name_rx not found in environment testthat") }) test_that("empty or no-op mock", { local_edition(2) expect_warning( expect_null(with_mock()), "Not mocking anything. Please use named parameters to specify the functions you want to mock.", fixed = TRUE ) expect_warning( expect_true(with_mock(TRUE)), "Not mocking anything. Please use named parameters to specify the functions you want to mock.", fixed = TRUE ) }) test_that("visibility", { local_edition(2) expect_warning(expect_false(withVisible(with_mock())$visible)) expect_true(withVisible(with_mock(compare = function() {}, TRUE))$visible) expect_false(withVisible(with_mock(compare = function() {}, invisible(5)))$visible) }) test_that("multiple return values", { local_edition(2) expect_true(with_mock(FALSE, TRUE, compare = function() {})) expect_equal(with_mock(3, compare = function() {}, 5), 5) }) test_that("can access variables defined in function", { local_edition(2) x <- 5 expect_equal(with_mock(x, compare = function() {}), 5) }) test_that("can mock if package is not loaded", { local_edition(2) if ("package:curl" %in% search()) { skip("curl is loaded") } skip_if_not_installed("curl") with_mock(`curl::curl` = identity, expect_identical(curl::curl, identity)) }) test_that("changes to variables are preserved between calls and visible outside", { local_edition(2) x <- 1 with_mock( show_menu = function() {}, x <- 3, expect_equal(x, 3) ) expect_equal(x, 3) }) test_that("mock extraction", { local_edition(2) expect_identical( extract_mocks(list(compare = compare), .env = asNamespace("testthat"))$compare$name, as.name("compare") ) expect_error( extract_mocks(list(..bogus.. = identity), "testthat"), "Function [.][.]bogus[.][.] not found in environment testthat" ) expect_equal( length(extract_mocks(list(not = identity, show_menu = identity), "testthat")), 2 ) }) # local_mock -------------------------------------------------------------- test_that("local_mock operates locally", { local_edition(2) f <- function() { local_mock(compare = function(x, y) FALSE) compare(1, 1) } expect_false(f()) expect_equal(compare(1, 1), no_difference()) }) testthat/tests/testthat/test-snapshot-cleanup.R0000644000176200001440000000363414165635513021517 0ustar liggesuserstest_that("snapshot cleanup makes nice message if needed", { dir <- local_snap_dir(c("a.md", "b.md")) expect_snapshot({ snapshot_cleanup(dir) snapshot_cleanup(dir, c("a", "b")) }) }) test_that("deletes empty dirs", { dir <- local_snap_dir(character()) dir.create(file.path(dir, "a", "b", "c"), recursive = TRUE) dir.create(file.path(dir, "b"), recursive = TRUE) dir.create(file.path(dir, "c"), recursive = TRUE) snapshot_cleanup(dir) expect_equal(dir(dir), character()) }) test_that("detects outdated snapshots", { dir <- local_snap_dir(c("a.md", "b.md", "b.new.md")) expect_equal(snapshot_outdated(dir, c("a", "b")), character()) expect_equal(snapshot_outdated(dir, "a"), c("b.md", "b.new.md")) expect_equal(snapshot_outdated(dir, "b"), "a.md") expect_equal(snapshot_outdated(dir), c("a.md", "b.md", "b.new.md")) }) test_that("preserves variants", { dir <- local_snap_dir(c("a.md", "windows/a.md", "windows/b.md")) expect_equal(snapshot_outdated(dir, "a"), "windows/b.md") # Doesn't delete new files in variants dir <- local_snap_dir(c("a.md", "windows/a.md", "windows/a.new.md")) expect_equal(snapshot_outdated(dir, "a"), character()) }) test_that("detects outdated snapshot files", { dir <- local_snap_dir(c("a/foo.txt", "b/foo.txt", "b/foo.new.txt")) expect_equal( snapshot_outdated(dir, character(), character()), c("a/foo.txt", "b/foo.new.txt", "b/foo.txt") ) expect_equal( snapshot_outdated(dir, character(), "a/foo.txt"), c("b/foo.new.txt", "b/foo.txt") ) expect_equal( snapshot_outdated(dir, character(), "b/foo.txt"), "a/foo.txt" ) expect_equal( snapshot_outdated(dir, character(), c("a/foo.txt", "b/foo.txt")), character() ) }) test_that("detects individual snapshots files to remove", { dir <- local_snap_dir(c("a/a1", "a/a2", "b/b1")) expect_equal( snapshot_outdated(dir, c("a", "b"), "a/a1"), c("a/a2", "b/b1") ) }) testthat/tests/testthat/test-reporter-minimal.R0000644000176200001440000000013114164710003021471 0ustar liggesuserstest_that("reporter as expected", { expect_snapshot_reporter(MinimalReporter$new()) }) testthat/tests/testthat.R0000644000176200001440000000005114164710003015227 0ustar liggesuserslibrary(testthat) test_check("testthat") testthat/src/0000755000176200001440000000000014172347717012716 5ustar liggesuserstestthat/src/reassign.c0000644000176200001440000000122613450426460014665 0ustar liggesusers#define USE_RINTERNALS #include #include #include SEXP reassign_function(SEXP name, SEXP env, SEXP old_fun, SEXP new_fun) { if (TYPEOF(name) != SYMSXP) error("name must be a symbol"); if (TYPEOF(env) != ENVSXP) error("env must be an environment"); if (TYPEOF(old_fun) != CLOSXP) error("old_fun must be a function"); if (TYPEOF(new_fun) != CLOSXP) error("new_fun must be a function"); SET_FORMALS(old_fun, FORMALS(new_fun)); SET_BODY(old_fun, BODY(new_fun)); SET_CLOENV(old_fun, CLOENV(new_fun)); DUPLICATE_ATTRIB(old_fun, new_fun); return R_NilValue; } SEXP duplicate_(SEXP x) { return duplicate(x); } testthat/src/test-example.cpp0000644000176200001440000000216012661230133016011 0ustar liggesusers/* * This file uses the Catch unit testing library, alongside * testthat's simple bindings, to test a C++ function. * * For your own packages, ensure that your test files are * placed within the `src/` folder, and that you include * `LinkingTo: testthat` within your DESCRIPTION file. */ // All test files should include the // header file. #include // Normally this would be a function from your package's // compiled library -- you might instead just include a header // file providing the definition, and let R CMD INSTALL // handle building and linking. int twoPlusTwo() { return 2 + 2; } // Initialize a unit test context. This is similar to how you // might begin an R test file with 'context()', expect the // associated context should be wrapped in braced. context("Sample unit tests") { // The format for specifying tests is similar to that of // testthat's R functions. Use 'test_that()' to define a // unit test, and use 'expect_true()' and 'expect_false()' // to test the desired conditions. test_that("two plus two equals four") { expect_true(twoPlusTwo() == 4); } } testthat/src/init.c0000644000176200001440000000117314164710003014006 0ustar liggesusers#include #include #include // for NULL #include /* .Call calls */ extern SEXP duplicate_(SEXP); extern SEXP reassign_function(SEXP, SEXP, SEXP, SEXP); extern SEXP run_testthat_tests(SEXP); static const R_CallMethodDef CallEntries[] = { {"duplicate_", (DL_FUNC) &duplicate_, 1}, {"reassign_function", (DL_FUNC) &reassign_function, 4}, {"run_testthat_tests", (DL_FUNC) &run_testthat_tests, 1}, {NULL, NULL, 0} }; void R_init_testthat(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } testthat/src/Makevars0000644000176200001440000000006412661230133014372 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include -DCOMPILING_TESTTHAT testthat/src/Makevars.win0000644000176200001440000000006412661230133015166 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include -DCOMPILING_TESTTHAT testthat/src/test-runner.cpp0000644000176200001440000000036712661230133015676 0ustar liggesusers/* * Please do not edit this file -- it ensures that your package will export a * 'run_testthat_tests()' C routine that can be used to run the Catch unit tests * available in your package. */ #define TESTTHAT_TEST_RUNNER #include testthat/src/test-catch.cpp0000644000176200001440000000173614164710003015447 0ustar liggesusers#include #include #include #include namespace { void ouch() { std::string message = "logic"; throw std::logic_error(message); } } // anonymous namespace context("Catch: Example Unit Test") { test_that("4 + 4 == 8") { expect_true((4 + 4) == 8); } } context("Catch: A second context") { test_that("2 - 2 == 0") { expect_true((2 - 2) == 0); } test_that("-1 is negative") { expect_true((-1 < 0)); } } context("Catch: Respect 'src/Makevars'") { bool compiling_testthat; #ifdef COMPILING_TESTTHAT compiling_testthat = true; #else compiling_testthat = false; #endif test_that("COMPILING_TESTTHAT is inherited from 'src/Makevars'") { expect_true(compiling_testthat); } } context("Catch: Exception handling") { test_that("we can use Catch to test for exceptions") { expect_error(ouch()); expect_error_as(ouch(), std::exception); expect_error_as(ouch(), std::logic_error); } } testthat/vignettes/0000755000176200001440000000000014172347717014137 5ustar liggesuserstestthat/vignettes/snapshotting.Rmd0000644000176200001440000002434214164723442017323 0ustar liggesusers--- title: "Snapshot tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Snapshot tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) set.seed(1014) ``` The goal of a unit test is to record the expected output of a function using code. This is a powerful technique because not only does it ensure that code doesn't change unexpectedly, it also expresses the desired behaviour in a way that a human can understand. However, it's not always convenient to record the expected behaviour with code. Some challenges include: - Text output that includes many characters like quotes and newlines that require special handling in a string. - Output that is large, making it painful to define the reference output, and bloating the size of the test file and making it hard to navigate. - Binary formats like plots or images, which are very difficult to describe in code: i.e. the plot looks right, the error message is useful to a human, the print method uses colour effectively. For these situations, testthat provides an alternative mechanism: snapshot tests. Instead of using code to describe expected output, snapshot tests (also known as [golden tests](https://ro-che.info/articles/2017-12-04-golden-tests)) record results in a separate human readable file. Snapshot tests in testthat are inspired primarily by [Jest](https://jestjs.io/docs/en/snapshot-testing), thanks to a number of very useful discussions with Joe Cheng. ```{r setup} library(testthat) ``` ```{r include = FALSE} snapper <- local_snapshotter() snapper$start_file("snapshotting.Rmd", "test") ``` ## Basic workflow We'll illustrate the basic workflow with a simple function that generates an HTML heading. It can optionally include an `id` attribute, which allows you to construct a link directly to that heading. ```{r} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } cat(bullets("a", id = "x")) ``` Testing this simple function is relatively painful. To write the test you have to carefully escape the newlines and quotes. And then when you re-read the test in the future, all that escaping makes it hard to tell exactly what it's supposed to return. ```{r} test_that("bullets", { expect_equal(bullets("a"), "
      \n
    • a
    • \n
    \n") expect_equal(bullets("a", id = "x"), "
      \n
    • a
    • \n
    \n") }) ``` This is a great place to use snapshot testing. To do this we make two changes to our code: - We use `expect_snapshot()` instead of `expect_equal()` - We wrap the call in `cat()` (to avoid `[1]` in the output, like in my first interactive example). This yields the following test: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r, include = FALSE} # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` When we run the test for the first time, it automatically generates reference output, and prints it, so that you can visually confirm that it's correct. The output is automatically saved in `_snaps/{name}.R`. The name of the snapshot matches your test file name --- e.g. if your test is `test-pizza.R` then your snapshot will be saved in `test/testthat/_snaps/pizza.md`. As the file name suggests, this is a markdown file, which I'll explain shortly. If you run the test again, it'll succeed: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r, include = FALSE} # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` But if you change the underlying code, say to tweak the indenting, the test will fail: ```{r, error = TRUE} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` If this is a deliberate change, you can follow the advice in the message and update the snapshots for that file by running `snapshot_accept("pizza")`; otherwise you can fix the bug and your tests will pass once more. (You can also accept snapshot for all files with `snapshot_accept()`). ### Snapshot format Snapshots are recorded using a subset of markdown. You might wonder why we use markdown? It's important that snapshots be readable by humans, because humans have to look at it during code reviews. Reviewers often don't run your code but still want to understand the changes. Here's the snapshot file generated by the test above: ``` md # bullets
    • a
    ---
    • a
    ``` Each test starts with `# {test name}`, a level 1 heading. Within a test, each snapshot expectation is indented by four spaces, i.e. as code, and are separated by `---`, a horizontal rule. ### Interactive usage Because the snapshot output uses the name of the current test file and the current test, snapshot expectations don't really work when run interactively at the console. Since they can't automatically find the reference output, they instead just print the current value for manual inspection. ## Other types of output So far we've focussed on snapshot tests for output printed to the console. But `expect_snapshot()` also captures messages, errors, and warnings. The following function generates a some output, a message, and a warning: ```{r} f <- function() { print("Hello") message("Hi!") warning("How are you?") } ``` And `expect_snapshot()` captures them all: ```{r} test_that("f() makes lots of noice", { expect_snapshot(f()) }) ``` Capturing errors is *slightly* more difficult because `expect_snapshot()` will fail when there's an error: ```{r, error = TRUE} test_that("you can't add a number and a letter", { expect_snapshot(1 + "a") }) ``` This is a safety valve that ensures that you don't accidentally write broken code. To deliberately snapshot an error, you'll have to specifically request it with `error = TRUE`: ```{r} test_that("you can't add a number and a letter", { expect_snapshot(1 + "a", error = TRUE) }) ``` When the code gets longer, I like to put `error = TRUE` up front so it's a little more obvious: ```{r} test_that("you can't add weird thngs", { expect_snapshot(error = TRUE, { 1 + "a" mtcars + iris mean + sum }) }) ``` ## Other types of snapshot `expect_snapshot()` is the most used snapshot function because it records everything: the code you run, printed output, messages, warnings, and errors. But sometimes you just want to capture the output or errors in which you might want to use `expect_snapshot_output()` or `expect_snapshot_error()`. Or rather than caring about side-effects, you may want to check that the value of an R object stays the same. In this case, you can use `expect_snapshot_value()` which offers a number of serialisation approaches that provide a tradeoff between accuracy and human readability. ## Whole file snapshotting `expect_snapshot()`, `expect_snapshot_output()`, `expect_snapshot_error()`, and `expect_snapshot_value()` all store their snapshots in a single file per test. But that doesn't work for all file types --- for example, what happens if you want to snapshot an image? `expect_snapshot_file()` provides an alternative workflow that generates one snapshot per expectation, rather than one file per test. Assuming you're in `test-burger.R` then the snapshot created by `expect_snapshot_file(code_that_returns_path_to_file(), "toppings.png")` would be saved in `tests/testthat/_snaps/burger/toppings.png`. If a future change in the code creates a different file it will be saved in `tests/testthat/_snaps/burger/toppings.new.png`. Unlike `expect_snapshot()` and friends, `expect_snapshot_file()` can't provide an automatic diff when the test fails. Instead you'll need to call `snapshot_review()`. This launches a Shiny app that allows you to visually review each change and approve it if it's deliberate: ![](review-image.png) ![](review-text.png) The display varies based on the file type (currently text files, common image files, and csv files are supported). Sometimes the failure occurs in a non-interactive environment where you can't run `snapshot_review()`, e.g. in `R CMD check`. In this case, the easiest fix is to retrieve the `.new` file, copy it into the appropriate directory, then run `snapshot_review()` locally. If your code was run on a CI platform, you'll need to start by downloading the run "artifact", which contains the check folder. In most cases, we don't expect you to use `expect_snapshot_file()` directly. Instead, you'll use it via a wrapper that does its best to gracefully skip tests when differences in platform or package versions make it unlikely to generate perfectly reproducible output. ## Previous work This is not the first time that testthat has attempted to provide snapshot testing (although it's the first time I knew what other languages called them). This section describes some of the previous attempts and why we believe the new approach is better. - `verify_output()` has three main drawbacks: - You have to supply a path where the output will be saved. This seems like a small issue, but thinking of a good name, and managing the difference between interactive and test-time paths introduces a suprising amount of friction. - It always overwrites the previous result; automatically assuming that the changes are correct. That means you have to use it with git and it's easy to accidentally accept unwanted changes. - It's relatively coarse grained, which means tests that use it tend to keep growing and growing. - `expect_known_output()` is finer grained version of `verify_output()` that captures output from a single function. The requirement to produce a path for each individual expectation makes it even more painful to use. - `expect_known_value()` and `expect_known_hash()` have all the disadvantages of `expect_known_output()`, but also produce binary output meaning that you can't easily review test differences in pull requests. testthat/vignettes/parallel.Rmd0000644000176200001440000002135514164710003016364 0ustar liggesusers--- title: "Running tests in parallel" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Running tests in parallel} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} editor_options: markdown: wrap: sentence --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` To take advantage of parallel tests, add the following line to the `DESCRIPTION`: Config/testthat/parallel: true You'll also need to be using the 3rd edition: Config/testthat/edition: 3 ## Basic operation Starting a new R process is relatively expensive, so testthat begins by creating a pool of workers. The size of the pool will be determined by `getOption("Ncpus")`, then the `TESTTHAT_CPUS` envvar. If neither are set, then two processes are started. In any case, testthat will never start more subprocesses than test files. Each worker begins by loading testthat and the package being tested. It then runs any setup files (so if you have existing setup files you'll need to make sure they work when executed in parallel). testthat runs test *files* in parallel. Once the worker pool is initialized, testthat then starts sending test files to workers, by default in alphabetical order: as soon as a subprocess has finished, it receives another file, until all files are done. This means that state is persisted across test files: options are *not* reset, loaded packages are *not* unloaded, the global environment is *not* cleared, etc. You are responsible for making sure each file leaves the world as it finds it. Because files are run in alphabetical order, you may want to rename your slowest test files so that they start first, e.g. `test-1-slowest.R`, `test-2-next-slowest.R`, etc. ## Common problems - If tests fail stochastically (i.e. they sometimes work and sometimes fail) you may have accidentally introduced a dependency between your test files. This sort of dependency is hard to track down due to the random nature, and you'll need to check all tests to make sure that they're not accidentally changing global state. - If you use [packaged scope test fixtures](https://testthat.r-lib.org/articles/test-fixtures.html#package), you'll need to review them to make sure that they work in parallel. For example, if you were previously creating a temporary database in the test directory, you'd need to instead create it in the session temporary directory so that each process gets its own independent version. ## Performance There is some overhead associated with running tests in parallel: - Startup cost is linear in the number of subprocesses, because we need to create them in a loop. This is about 50ms on my laptop. Each subprocess needs to load testthat and the tested package, this happens in parallel, and we cannot do too much about it. - Clean up time is again linear in the number of subprocesses, and it about 80ms per subprocess on my laptop. - It seems that sending a message (i.e. a passing or failing expectation) is about 2ms currently. This is the total cost that includes sending the message, receiving it, and replying it to a non-parallel reporter. This overhead generally means that if you have many test files that take a short amount of time, you're unlikely to see a huge benefit by using parallel tests. For example, testthat itself takes about 10s to run tests in serial, and 8s to run the tests in parallel. ### Changing the order of the test files By default testthat starts the test files in alphabetical order. If you have a few number of test files that take longer than the rest, then this might not be the best order. Ideally the slow files would start first, as the whole test suite will take at least as much time as its slowest test file. You can change the order with the `Config/testthat/start-first` option in `DESCRIPTION`. For example testthat currently has: Config/testthat/start-first: watcher, parallel* The format is a comma separated list of glob patterns, see `?utils::glob2rx`. The matching test files will start first. (The `test-` prefix is ignored.) ## Reporters ### Default reporters See `default_reporter()` for how testthat selects the default reporter for `devtools::test()` and `testthat::test_local()`. In short, testthat selects `ProgressReporter` for non-parallel and `ParallelProgressReporter` for parallel tests by default. (Other testthat test functions, like `test_check()`, `test_file()` , etc. select different reporters by default.) ### Parallel support Most reporters support parallel tests. If a reporter is passed to `devtools::test()`, `testthat::test_dir()`, etc. directly, and it does not support parallel tests, then testthat runs the test files sequentially. Currently the following reporters *don't* support parallel tests: - `DebugReporter`, because it is not currently possible to debug subprocesses. - `JunitReporter`, because this reporter records timing information for each test block, and this is currently only available for reporters that support multiple active test files. (See "Writing parallel reporters" below.) - `LocationReporter` because testthat currently does not include location information for successful tests when running in parallel, to minimize messaging between the processes. - `StopReporter`, as this is a reporter that testthat uses for interactive `expect_that()` calls. The other built-in reporters all support parallel tests, with some subtle differences: - Reporters that stop after a certain number of failures can only stop at the end of a test file. - Reporters report all information about a file at once, unless they support *parallel updates*. E.g. `ProgressReporter` does not update its display until a test file is complete. - The standard output and standard error, i.e. `print()`, `cat()`, `message()`, etc. output from the test files are lost currently. If you want to use `cat()` or `message()` for print-debugging test cases, then the best is to temporarily run tests sequentially, by changing the `Config` entry in `DESCRIPTION` or selecting a non-parallel reporter, e.g. the `CheckReporter`: ``` {.r} devtools::test(filter = "badtest", reporter = "check") ``` ### Writing parallel reporters To support parallel tests, a reporter must be able to function when the test files run in a subprocess. For example `DebugReporter` does not support parallel tests, because it requires direct interaction with the frames in the subprocess. When running in parallel, testthat does not provide location information (source references) for test successes. To support parallel tests, a reporter must set `self$capabilities$parallel_support` to `TRUE` in its `initialize()` method: ``` {.r} ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE ... } ... ``` When running in parallel, testthat runs the reporter in the main process, and relays information between the reporter and the test code transparently. (Currently the reporter does not even know that the tests are running in parallel.) If a reporter does not support parallel updates (see below), then testthat internally caches all calls to the reporter methods from subprocesses, until a test file is complete. This is because these reporters are not prepared for running multiple test files concurrently. Once a test file is complete, testthat calls the reporter's `$start_file()` method, relays all `$start_test()` , `$end_test()`, `$add_result()`, etc. calls in the order they came in from the subprocess, and calls `$end_file()` . ### Parallel updates The `ParallelProgressReporter` supports parallel updates. This means that once a message from a subprocess comes in, the reporter is updated immediately. For this to work, a reporter must be able to handle multiple test files concurrently. A reporter declares parallel update support by setting `self$capabilities$parallel_updates` to `TRUE`: ``` {.r} ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$capabilities$parallel_updates <- TRUE ... } ... ``` For these reporters, testthat does not cache the messages from the subprocesses. Instead, when a message comes in: - It calls the `$start_file()` method, letting the reporter know which file the following calls apply to. This means that the reporter can receive multiple `$start_file()` calls for the same file. - Then relays the message from the subprocess, calling the appropriate `$start_test()` , `$add_result()`, etc. method. testthat also calls the new `$update()` method of the reporter regularly, even if it does not receive any messages from the subprocess. (Currently aims to do this every 100ms, but there are no guarantees.) The `$update()` method may implement a spinner to let the user know that the tests are running. testthat/vignettes/review-image.png0000644000176200001440000023026614164710003017216 0ustar liggesusersPNG  IHDRX;> iCCPICC ProfileHPS{o: -! %@A:JH %T ,+*`Y EYu`C^v{oޙs99)ιP"I @X. d$%000P\LŠѷFbU(tr y0j7'miR@F8sp(c4G}b([@ sLND㐣QvEbr(yܹ#< /BCfxQ!dh(sX,ŠZ=s#T,N5"( a̓Sƙ P͙9*7Yp8KƨreH٬qJ'*Uv_ K|QqeFLUv"FU@87D{/8ra\wD1k",IU_<U$9*AN.ˏU핣hfqãDP ȁpA1#ͰJHEB98#9Nf8O{$G%D2a[Y qRyb~#i'l6IB?fÌ` hC8|@ ād0䢕rPJzT`7gEpw# zK0ރaBz d C.򃂡H(JҠLH )EJ**]P5t:.C @a&4LGq,8΃ Bx-\W,|w/! j1E&F$"Kb Bjf t"' C00L&a``*01 [. 5cl6;[-c/``{q8gąqY5:\ ׍zx{/> E3GB!Ą2iMB/aI$z|:b3:8L"Y|Iq,rR9tVMMLKmHmZaKj]jd;2JVג[o)%BSR)(O)թuRJ4,e4k h54ٚ\%5iiQrhкէ׶kj>MET6G]IC@h4-VB;Dk h$ש9IGVt=~~y$$Փj'ݜA@7@W[[{GC/X/[o^} y;/ | xG †v1 w^3226 5m5:g4`L702d|ڸ߄jg"2drCb0a ]fff+̞̙[-L,Y,xhIdZ -XY~JZehgkͱ.~lCɳmefna۹ *"&'WM@v`9;8t9#W86:b1%eʆ)mS9;8qz¹ ϥ+5ukk7{7Ti[ݿzxzH=j==-<y{x˽zsoT=S}||;~i~?uslYYWNlobvKT\4,$3&d0=tahK6,"lC=ǩ {/?Axi)lO q@'jcԓh3p3gTx(-;'@uqm  K;$-N,JnJ$Mv~^}j[W+_)q*)+?\}Ǻqn߰T{㴍 7'O;izΩuI O+j <:ѹs8~!¥!ϵ\tWWz\m~_={^oucj7 u6;t܍{^}r~pѲO4=5|Zu=}7o_z SVb拞Ekͫ``kk5o{u(z?}sy__ʿ~m2WpQA}| !H3Qƾ F 'GZTFjEqvuU,e,.ʷF*U*+_>%olcc@ߏ )c3_z#?*eXIfMM*JR(iZXASCIIScreenshot,Ռ pHYs%%IR$tiTXtXML:com.adobe.xmp Screenshot 1384 964 1 2 @IDATx%E]`=Iu sؿ+5k3LR\@$#ArN3C;էuޛ twoU:N}ChBc 4h,X@c 4h,X@c 4h,X@c 4h,X@c cJfbnU& tut`i= <"k=ձPRiߔLWGN&աKq|) u9,M'C:|SkCK+5a]b4:7h,X=65'8¬$xVi \4mub0x{Zi7\8M[ǹmcL/k}jOZDZyt*ŪrצFVۡsC͹k Zj5m7d4rFͫdṪJưZS|G  4h,Xhm`c4mvhN7~`TF]%]?:z=3~=Ot=O[yUq} ק*x@3T:ҽb/˧^,mزeo naSק-h?FYlebh(>6zɨSn:ۧ]7ϳZ-3֪Ќc8<5w8|2Uk@ 4h,Xqo[CG7\a~MP1<-֧{j92`oq/xrc[lp N\0:bB*ÆJ r|S:V it1iy )] -e0p-Un|,6| nq/xr!TFZq} )] -a> <1\{{ b[ nSuy1!Ka15L^q]ٞO]gV(X\<~hPmBc 4x"X uר~p}; +ݯ ȬK3xi߭|y]]RM@c M 5 W*uqy9C}צ0Yhz1F6qc 4h,n~nʼĺxF/ubufץ3MgrOu3|uc~/'mtU_U^Egi܋_w>5R j#n\Y rk^)&X@cO롥կNUypӡ}^C?Vuaf 1r.űy&X@c xï^N<*:pég3rr|Rr+Oi>9^x茾 4h,Xh_|W[4479Y|rQ-N 9^* ]p9XJХ8|;hrH)K$];OuJ9l4tpr5I򮓯ch锃`uR\] *5I㝃uO׳nNz呗T)!ӗ#ՁuVF=)Lj)^Pߞ7pre[<t3sz]NGӟ~JToҹņIme its3|=BɱdX\E˛0Zl6 n>6Q^|H/}y 7oR:q].#I=OS4r)^|mSX@cK u+M+tu+C^ZnyN),nʉxH:uxp b*iޗUS!8*6: fiU{\oti}|ΧS4q}:ѥIU{:NҼ[F'W٢.^J|:KקSn;]U{eiyK⑷e,4y:H|R~iL] fyNq2O,o x$C*.X nwDzMnڠF^vMkmje)M,6>+ݫ7XϗY3X_7l_ r/>U>t.̪cX/Ŕ{:kOˍ}pr5Igr)^UIt~{áKey^`yO?Vt&7M7bBjdm8t}pr0dpr5IgAL0cw?t& >mRr7=>il&&놓+zUL V[ţ7h,X@c5e*s&|J++2mi=,MU0>x3=\'}<|.m14ChWli,Ki 4h,XHZ-NuI>KwYY.6-mq/Z,uhs8-U n1t،oi,OxU=.8qH[0gp8![3#^H5LgtVhixk+o]nk *o]jo;P?1RxUlZtR不ޖi:!#orbg}.o0u`)4o4-V4]'&zNCO gṴ- KOZ>k֬l뮻nYzMZMÇЍGZ8rF8]^t9zԥKRrtV7˯ :iM_֋r-zyܔ*yFgVљ,pʫ)4ߍ,6Z󖶸rYx󲠭|fۥrsyņC|WQNW?tQBKeAY7o:Ud6?Cti31k.L;?Iq0KSfi=h`{\M{uvt˕y-Fs10oKϜ9sӗqo֍]b^m'F"TGCj ur:蚶s$usy1?]- <9,tJ']J]Bdcmeumܸ1 dmU۩ 'TE=󮒢NC5顇j]ݴѐ'4I,9]1[hrp˲-r\-1snz(Ք)SジZ˷J>ȭ2>cp<YEi²C^PeQweQVgUiP ,{Qީ }lH-kYG#/uϡwWLκWQͶ+gw/Į#N(ewZz˫P cSޚw uokm Yhծ E:P*~giu"Jz1-eezNE1)gGy{әgwOn&QV}H|Y*o.Oqʏ;ws9gg=Y_b-w}a+WX ]S& ҰXtNLjB#dCL|E) ,xj^@ڮ bmRL\mtqlO4m1YNb1HqfsJ');t2?ܸɾe9tcb!=Nez%+@$3WpaiP" +k(JE(bE2-Hfi5P@O;{6hpw}g$kObs@<-mž,M|@V0V;Ri2Kb#m8Yx=^`ͽW/X`pG@BK%4"_8E-QȊ$J(-Ti |2rhA,`TŬ&e+[BD.eYqGW2:T;K]Dc 4X[hwxcrЙx놳&u5ZY'N8vٲe+M6nҤIo3e_)k=k{0*Rr/t;8idg zD`Vq-=,M|`G?V6;Ih9tG/*MZNCe%MnN)~y/,dWJꐭö( R~>FNeb/mV~FHMc#<:*3Bs_ @sZ ִh)rd ٨Ig8.aY;2邪&'Oe%w?VLA4rΪ{X0 fvBZK9Tvpre^gFV&YbEc'[R/ڭQaRsPG N]eC+j3m/_t)4| l\}*p羺bOݹ \խon%Q !LxIe+p5U{\X_aVN8(,c1"0!U˦!ý-6xUl,[}7];A^!2LMhUEl?KZ?i" e#w{pp0la}sco1] bD ^d@WKW\ƅ護n 6F `1F`j7ȓ*+R|rl+,&Ơ=#bb(VnoX &`H^M%h|`kOmBË`iǒwBк)ӕYRhӆ,{-+ƯKmUJRTjl1l!1cc5,E'D ߹88!K. n2ub?~M! ]e\?9̘9Cև)rSHhOx8֭hH.8D\ؽկo8Î;[<#b4'{K L@Fc&Y}$}3ŁCV>@-`-3ҏ +gyY7k=gfvS1|7+{8`~b1ŔY8X9PV^WND5u@ k\; \~X2\{WjŭGA[oz裏+ &rn = ,\/>ƹK/Tq -Jul-x\lpdäCFpj`\mL6Un'np8*[ۨ riA@6gl -vv|ڤn\ uYWJ(6-Y[RrRͺQ>^h1cI_n=٪}M 1mlцB*v]˥Ki}]x)pƠ=l.T[,\4qe>y$gL#R?89WuS8N;{Ikvnhߑi#wo8Cs H~7Y:ugLOW1oyT/*ゾ ;.cs1^7G3w`}9[/Ղ UW_Ν+G8Fp}_q]v>ğ8SO Xz^U/_| sxTbjELlN.h[02(OaJ8ԯrLqH{x. ,gP{5VVX ` /(:aVv:8t9gvtʕ|1rv``afmSqa`` 7Q6{HnYr{b g7" V/yKt'?ItTOvZхԾJIG Y KsJ:`9J:꣨%ido`"R 2],}[Ό#ڋ~95.p,f͜)Wűfyf'Omތns fعEpϻ_0`玧H#Lmvgp[) Y (W]`uE7v}qm޵MKW}3I͕9}:Ya]wQg8: 6W@i*h䖛8#ڙ'?YvfO~GvԆuAի)G̈́e ~_[3y3剰;\}B)&Β'F|gf8'+W.W88 YrC҅@#Xp| H &t\.MRnYBJ(dþR; 9~^6+']+]쌆cy8G ؑ?ޕ\]qf5`חҿW\qE$NI($\8L u8iSׄMrrI'm^җCda|K^KO,PΦB=[Ӧ\x[M\5Qs\5>,`|w33xfӴk nؔZjWnW[-x[m=d/rv$kђ%򉙨x<:M#<$My;.@€z,JDtNy8F)rcUя)Ñ}ӛE Ǟ>/ֳ/_җ½ޫboMv'l?#1N>)؍{p5ׄa=+;]9Xַ5s1NYS8#‡>Yn{ü;(8s _W \C926[o?݉K@?.VHq Cinmk (mM0noq/]h.P9OUoY"A}[PЦm/2/˻8`}OoAX!sKv\+u2=e":g%N[G5]\؞{0>`ux^WŋTW|At-Tଋ[$ig>9pg+2̙BԸE{ :c-Ne(zsip}x w 52馐7ȖiUŅSZ Pe nn5 M7DNJ]x_D裾ک\Ll&e/:ՉAd88B:CC@'f1bbWoX<^[Z8TYX8k»Нszk@n^[`HltOկR 䧬BĒ@ٹx+_N60Y"[I "y?VE&PwD*0W@iy*aS6<)远iS~M7?~8:-o~ :Rvv/7ӧwe]ٙzSVp^HI 2]n?.$P[pgMw}r~wy. g7/BՉ>Fy]nxz1[?_Cm^m6JDž%´yn ǟ2mŖdr塱vkۊ >\h -cAcA=&MW*ȫWi#?Dgb`nٝcg[%O)/k -M/cDQs]#^5qW]{xߦc1~}o$׿^]ΰUvL,9;|衇j펐 Ƴ7 vG/(~߫Ͱ#;ü7֭3>5M wyJ3#Awלo++,S{\(Z>0 TiXa$,SbS*)eV e25n#qmh,&ظ[^ft6z(Lèvpr-L|`U.gP(]ёA7vUa*b|INXV%q7jnҪDu,b,X̢bz|;m,>W|A$0gS5M`yLwqµr[x?ϐ[ݧ_+"8pU׿Ul8-ZeIKB,yѱkXt.oxi,$ɶ``q ;P\R8~:v|dL:y1&oAŇ}u:/bQҳ8MKg7z}y3tr,h i+)j:å CH^! :@<˷^ i ;p|̅'gO.@/"hA-Vҵr0=tguQx% l; qΝux1%m`\X<"14w.hР* J.D! ':ȃ<9B*ny/`ϐȌUdlva2.`#G[?{X-Ag56</hwI?S|k2?;_.d];mgq@O:#v_wMU|}B:ƶ M Nr|r۞ o;\8@v9#I_džI(!Ayp[.~[_ѳ 4< /{!@º' 9HxÒ?Řm$jhI3𹒝'O0Z0 IwpYs3y3Ȭ61bʾd¶3>=Uk4Щ]8 E+ o;vs#mGu7E=8>d,sS ٵIx2ÇuavmQX(z>'9u<5-$n N`ʼo(nGQG򔧨ģC_+)AtP ^U mӍ-DΑÙkh[nE@[? OH|k:B8'%# kC_@)W-ZvxDZ[tQBy"XI l#j@Nuq:AVC}rg[߾ŌUS'-ڳkJҖ7N)|x#84}`ZJu?h7bqn:j`C=#D`yK.I},'oiN}{;9DN`V 䢛\{9?)F@+IL9tTag00!mHC,b\ڔ/faӭJo8u;ڤM4UG g&x!Oao=W&e8h;CH yguN`<\vڃ`,m 4^o vdqP8`G1S󻏃p:aBv,p,Ҵwp O]!o)-}p}yN:O^r%rUgWO/OњÆ]yߴi\ΕW^N\9;E_g7'O.q6rox*[j*ߺ6+W̝tždpp[qr38C-(26VXŁ)33G?lnq'?qxի^N0;[OWl,s/5\!B󦒢ӊ&xqbqt[uygN Yi{O8v32NVRF;n'|t\V*NJ'?AvVĸ\4r2սҬ 1/J3*t-DeK&S!zW8KAԡ#.Aýޣht~:O t~:gy;rwEEh``` &Nd2Gؙu 8wu.r4]O;dI!ct G_!dOoQ/g|zTII#88~M.؝ /vp>S,<2qSv ^.4g'u' ^qˊK e |/ ]:ښ@13㐝> ԡ>f; \G87Svq@Е[XnC4\ -^s#q\mrc`cڈ(^љl0I/Yt4!yE6Zn5J_R`DY|tmCADP;?p22T1oY4WȮ7}h`p@ .Ѕ]2 /d6p ]1]Xv 5amٛcƬ,Fl,xl mE"7$+/W#>/&+85—ʦ:/r'C cvlC:p5'b]'./l!} |k"M7$e|"EYQVZ6ԁ;ܺ3͗K4>;;찣" 7tY9-Z)xzЭMGh'"s =FcOJc;ڙ'у;IvޘB]K+شbq H,z`kE~p;=bu* ܧ{#Z,N`I4Ld/е4^94Q4HV!,PP2/&7)l!*1$i-Q2Z:y?~\̗YO&BʃIc(<tŰMGml!l Eu] mRbc9@gDLJHZ"̍)rzu{ӭ m&ۀcQWh[*TΙqǸx3fÍ6o@\Ia8ESp=E/uqWm5 y: jniR'BΪ&m@Q&9l!AK[+Li[:{Vi fX9bxZi/ <ݎ~J"ay-Y>X8VFTe@pyN O1&E>[4=':5qoV Тl5BLF:S8j@8:InmSE v17ogA֩#@IJJمW <-T9۷]~ib.,. ~xpBL JI,u~*>=ngsS /s2;wy(Œhޒ>W;j}HD:RD̳yW%0v+GŒxE9żp"펿|`Uq2@op҄ny_fHuJaC&Mhq,^8oFƫe?ІeuT'FP8 VٍcRM] KAg{Fg[ErH$oń`0Y~JJt)餄BĢAJ%]Q+P&v9;c[A5q.:8踌cKj+{VQ^aQ.SM=H]4HY"eRDPɅx꥟Im6wfpOR4NFB2ZnR/dS)\UnJbióp,KpAn\`fP\C֑/X6qDVqoEƁJ\ lTds>-뷮&+j=A(tXN*NRm:B?r,[tjB Lή| zemROH d|zbm\ .@npKU& tcǞ'v޻la둆e5)i;ltݹ:vir:2<>7˖`˓Ӥee~$,Sd*n0c4o𞱜gQ4?4ʛwX@c = CfҷTΩư>G*#+3>>VV⍄B54671211Ov]wUJ3&n,X@cּ#ع+ K.tfv ' gN/ gq]+uhMAb_niSض4ccġ#}k_;,=&X@c ca;묳?Z8Z4]*O459Ss@ YY׸ՕIRh4pn0S4g=_-1Mª  4h,XjbM/zS5K$`+/ϥH}#`Y%z 74̗ئ~;W>K@c 4]͒b=׵6TDS|+}QiO3J[2U*zClin  V8Y1x:t 4h,Xd/^1`ubfW/?aKhYG(纕G:zڮr3El J[clt-Zp<ҽ#wSpy[:FovMX@cǗh98Ӛ*%3 V iZS˽^mkm3dbZU 7|kU{o;X(evW!_jua8pHFp ^=mND  4h,0-ϼ=TmyyieRoq\;))8._SHX򔥕uOq SY`#I}=,K)南/aV;F=&X@c8V,ěniWq?;c=jIႻ ;3l4qla;lH{ٽ&OvxpN 3̔ۯ?#V!OQla>2ñZ[<pJwsz1Uh|Or9e` i3 ZvN\nf}6lӼ' č 4`ív0eʔ;{࠾]|W׵ ue`Fa]!Vne9>a K.]8u w>r{xÁo O:Y HK[YW)5{Ц4i+n'yO)8>SVi1r2\5@c z{ڊsׂ'=I3jv]cȎݴqıŠq>SV<[amFryO 01+| |aGux]=;'4Ŏ^Y)}lnire[4i;u.O2{|i04Wܗb?L:U֕AGk,X6_O6Mw{4m]Z~>J@UWI,+~Cw_:icqOeFƽhrG|ղ0F~yaᒅ$[{]bfySϧ8CS G)pJX]+\%R)iGlᱶhY]C=]Ǝ~A9p̗9RXZÿ6]uI[Wڧk '-۩iѕ"hI4h,X趀-ģ[F~,`V>\kѭ旾@!ý*7ё&7bH;X^qVA)np =8G0*Dh  4h,X:mt1M9:J^3di G4U>%␙>V qwK!Ń?t7Z+3zפ& 4h,-\Ɠ~Cf ԚVgaXyT=Q7*JY 3X3$pR^t"~6QZ^p#; G!'IRg2,-Odct7@c̟\gs\i\ڏY㖬X$NUј1``O{SQ9x(Ϲ UpO3]w5)S %]-xo^,Og;-Kq<1\)7qck#LU$3:_s5|gxk^^җo~aѢEZvy_W*[{)(yUs#Ǝ^ ᔹGXc';& ܉O,>O9Qg?IiHOے[pGr AÊ=YٔuYZõaɛ%ow޽p--O Xn^xum5E}7x=(Q7[oZtJݫp[榅Wؕ>~a =ո>Q:߶T{)U#}RxR}Yprt`HӠW\qE馛uxS b.S'bs_9IC?O#N:p-drVAc"C` n0kpv|u6?w/aĩJ_9w#?/̜:3̚A8qW}i&hrLhe|Un^O$ϢuS2^ rpS7ݍO̞,ȕՕف=-//| OSu\8Xܣʄo|E/z.8V{챇:*gq?G?]yqa}Wt2HI`}:8~G>u/xAxárv.ZV,_vo}xӛޤg'}rꫯ~zKÌ3   uo~3`W ;^z_ !׬q!| R0s>OY׹s??va#~|nwc;e+}ovp>)?$a8fsq‡zW8O9mo9"]}I+©}> [E-w-燏\F!>#w<6<;Mg} U[GUk'3l8G߭,o|3'׭,9X׭"ҹ7;bmOՑOT b) pۍvsxnq!}QG镐"׳@yn1 oPync g]WӄsXwt؊'3WZAOr:*E͟ph>0qDՇM}QOL,p[ۏ"jG{=T瑃`Q &%N*v``'1±ݸs3k 4X0'r8]|2td\|O;A1`^h1p\L>+En`R,#\Хٛν7׾(̝ޤcGZ'gيea)]ueaԧ+Uw_aYǩ&Hپz+~W3 oڟR9uJ^9+X]K晄P"ni%y騪tƷVLg`G>-}ݺcn ɱ bNjv`L( Th,0C9;P\%OZ_ju!P5d08qxpv6YqI&8_y zUXHsxq㏗]"N rHʩysZh3gxG:SzQg%],zᑥݷ3ևnT w,6X4jEִ² _v\p煋_yO[SWngkw_lsl`?g8'˖ v8jLp"-sh9K!6DDN;zâev! ٩7*yd3E͉2s]d":q&~hXڳ9Yeӻ*cӍ~ op^3_I?An4aOV[@:@BEÊoɁ@+GG b/[OCBnÁ n}uo!$$ &m„#Xƃ2(x u<^7I {m(b b+r&>!b_Ɔ=ZH_ /Pq_ 30>=Uu\ˑaW\i\;w|PTI*C'{{Ԝǻ#)P4y]dEx7wn_w֬sȱaw߁DK>fnN|o2^u d} \oW{}[3N]ٛ>vt .qwI5s;" OYwnMKYzXnOwO< %bxWt.~I2 7~u @(Q_"=$I7IyH4QXue8A4@& O$<$U'@6í*l|d?$Pl:߄XjeQK@zHQP  Dq𛓔۰De Q)cժUTU{?v4b@|#{Y?\!CK!`#\x=.H2\x E-<תkޡi"Iľ(p$$熎v7i]3U1M90D 9UWw֡7[> `ݰkN[҅[[6f.= k!OhT`9"2m mg77!:߈G"yjPK#Qm}߱2m(R'>fGsP&.+W= Qzu@bMHګ! k1kyu)ǵ_|6]OR5 A[C:Swdc7"v]3dƝ2EٌnlΗ"Vomծz}&d5sN \e_g'[%FU+HaYgP/3 l5(tzX ')vv eCmacza{~bzڀ Z|f_ e!y׌P:r\kmnf[^?'$&>GNJ~AEy*.!X#nNlr7r|X&5${ϫC=X!4 5&o]A]yLNs9n3e<~S?٪[F7x1b5z=Oϭ[&D<,jC`!z^6eMj%[Br1G?|kClgH e;-%X( u湝 gemՕnNmo:,C'’!` V_*F"\mguJS2$*\){#=,rltU~T%Td(H+ScTam!`#EqiL),p؁; _nd1R`1PY-F(8ߘr>*rnm!`c\Иo|`PN10 OXW: $S[f\ԓ[h#! 6*Y6@r#]S[`Uz  "{|:mzGa!`<FA]0O]שG%ω]kf7{[pu2f`vOU F zqR~L8cRG` 9۪JlX "O-x ƛ{u򧙪AX0 x2A ;Y~y3Xc}Vw1 \}+gL9AyzC)6%V u+̚M^⮻/Y=fbiw}7ԏV/grc qszsԫaL'A1~D oV<Ғ!`F/\4) d<?_  䗝x7BX52q [ƺtrpƢ$jcC839r}za'1ՐB'_#ޒI7Na8QtK!`Up21d!/H=:SI[nc Q;- a^[` @by^^^;#Eyri7SQș!`E 5C_X;㉵O tJ9 [Wޚ C0 C`" ?A 1S֯燐Ml+fTPQ[W3>p3!`!`<&EZ< *K 49U!om-7 C0 1W[֙#О'm8S"Ɯ "lrU@9W C0 C`# hk#dT˩-Igژiqx6"McUvQ]ژmhG6!핶Zclc|%GW淇u+/ԩ.^lr/sNz#'0n*qs 1gY}aA=YM;lKɼ1@@dZJ]>Ւ\NJ_4h-++{pl1FRO mԥw6J?5}/s+0Jwq<Ҟ Yz~Ƌ6U2B}!GMڟR]3O!DZMDQIX΋!3n$mgh4CSdVBL?ޘK@7|aY[ yifG=_v/m9:1;ꓪҎyfO˞ uz<lyX2_gdlJ*xiG 3Xe38y Yg* sϴ/3(N{a=k}0*2Tvau4o㕳XRyyf%ک<Ѭ_D_}ezhY&G]fSea~{h:T:ӆG}xOd|Q=fy>#}epvhLUvILÏe}7u}YُAn6bn}e}F˥%X  G`痩6֣\=0ccǎ~(&4 C0N ӧN]"_oё0c &mʱ>k"1lAFꞐӰ|re0 C0\A|N]ԟ5@1S~àNؖ'r'Gs C0't2e Vl08&ϜPu`yAlgh9y+rWk 7Y;%F䬪IʞΣҽ95 Ch- \< Q6XSz kv@IDATG6`o|e<"ĶX' V>ak6p^ArCwCֻoMvt˛ACձ#}s-O,kд C0F;u̟r#îQf$h d6?f(hyooovK!v$x6Pb*l'r2P}QmIJӉG3e:&uvKҏٕaKMLyzlWߔMB&t2Թoa-w@Or\N5:>IB1VxӱXf!p" X1 bI !{L05k[v`9rD}L:׿C{7d$p<7QB]~ܹ/CB_\5۷oíH%S / l<݂ t hC? {јΝjǶoA~vnۿBʎ3WU?}[C܁7U'[~VbX^N%׿wC|ut*Vkl,EkԿ,HyalY_Vz랧>ídzr*-GeunuZaiBꧠ @A."Uܦ$S-UY2h"tR]BPoag>o]q7Mw뮻}k_Su bBWgӾ>:իV+W:HZ*3}cJЈ| ;:ұ >=Si1y6}Oh{=%HoV}~-$LB Q;'/1!Ey˞6hG<&O]vwv<|\x擄0ݛt#{܂z//~î[H]W|!`湶 C0N1S%s|ubDal5ќ*5V@HV @b,}P 5Aۺu;C)S(3  [~ >+Vhŭ3<=C_TqrC9nQ%{|"#xO#AGdk{ OX/Rn0 =&v}{4CVo#ouK~;vϸ9K*dj~=YN Rn~ ܤc_O܂35sɷAv%$p܆xes%C0 qd.l_ <_6*x%X(oEm d.~}s?rc_JW~~ 0ď X>?:kUp-}{~oXvޟ  %b 5=ً/+_ܜ!6'zUB$i}jR1/JM9mvvޥ=:|P|s]Y";RxU C0?2_4b.GPא, `t (#Qv\jhy $DCp[~7jvfr{z-Cp>,XPo bʕ+->яo˾+W]unoᛋwqFwJG>}×e%c[lqyk b S܎|߯籏 a\i „g_i6$4m2TW'b!Jdx@5WK4sY-_9;0!`A,!pY@/(v2_/:,3?P8: A`̨Wz+'R * #L10@X@ʞ񌧫n`aU U.ޞ^?x{U2[}\$L} =r]r%U- }b{^E93&_YF7գ7>6ٸL!a塟Hgg)B 2ې&|G6LM藺̳C!TzXB(?53V0 C8(J&8䵚2U4bJk2GY ^MCѾ3PkS(ysM<`a7KMv)) mC?xbŪ $ /Aǀد_!VquKuDmL?ƃIg" Ƿ\,!$Co)6_rnACNN…oګ+q [b!,jȴM 2iRZɋ*Z dX%(7(yC7a2ϰyctթLw _ RH 2e2YBeh$|=0_dʯJ~O{q 7 /P'&?%M?LyͱpQ ~>gñEdA@3%K&$4HkԶU` Q @Cc;lnBRsUh e)Yq]U2/2\z 9V7,OL6w3ndM C0727;pоP77j,7 9&:«RjFC&rC0 C8Oj8@bvߘFmj`8EɊ"kOA,Y+!`x5iFm+Rk4=[!`!09f8Rq`ꥂ֓<點!`)E ea1s\ex,z0 L^z$\-AѥrC0 C8@afn8Ǐ-Bv=jyYLVXlSGd s< *3GX,#gCyU'^{La}uXf{;z9}tC9)uFe:Ї˨;6l(rYucmzlG9v/mʴ/2e,ԣu&ڣ봣n[c9ڑbANO/-r$Sm3Q9依2 u 6lC>ZPԏEiXT)C_~88rn!`M!ū##G̙31igp^̩WGQdR@$f}AϦ̘9͛?O~V,7 C0 @ggۻga:t. y̷Gy:Em+6tgs kts} W$A+([5|qÌ<4ker3fY7Zsic.z'Hgm7% &4ȓgzu2ʈ=z`O`WjCE#5U2L/=m~9 \a;szf6Y(H#Om̪0Wi0VqS'0zNX`n'-X&\:b2I﷕*%XzovtX_8Pwww;:oGwXou'Y4͞=˝bE>1,fw"NAPJ07>r؆zaȾMFH?k#8nh`ɼ𡏷h!?E+XBF]%QA@'x}~-*R=>~ruLjyhU\ㄼp.r 5ıw@Vs~+}$Y=?&}c3nrYrA_y:1yT,Ms/'WX~jo6/Eet|{޻#){~+h/0aaSd4$Z8y^ C_B9Π,ƚɷLʘP=3~댩nT*dw7I&eӻU+{ܭۇ.!ZOX.\2x}fq w5[He * Q:^چ:R/e0er?w޽[ ulU~vo.\zzzT_Ov1 {W]t;XAm6xbw4| ȡ3k,7}]3c<"9ӌ/1&"J?l-¸u"IĘמHSTL|/U@qqe/g+bDȓ<`|++¥{pDV}U3܈0òzS@pd a:tHKQΜ~ ׾nΝ?_vx p<vo߻׻׼5o{aW߿}{s?4Rڢ2z衪m?Hs'fC+~;5|ov- ` "Oք͛箼*9n+`WfN n45%ܯ6^җ:|:>ܽ>S֫s7xø/yKܥ^6mW_S'~o rnwy["$b!P3}˿=[-lћ/[ߺ{rrvՂmQɭrE=n` buL&;wCBV +fMvM#,mP׺ݢi]*7C1ȘʧwGLOv%'< 7r^{ h1;w;Zs\ i;32NZoyĨ]-Y|e VfB>J^6>;v~o~wbO:#~jgើndF"\.;On3~9nLt^\={,_v3eHd4tM}{ۺu,[!8-ܢw<^&äa 3N {t|11'=)mw!;B]*=jj~hanrAf*zIvlKb!:OòG߶׭?pĽ',ͻNu g1#?鞼jO^y4~B6 f;ݏ?uu@;o?2 SmFguGs=1B_Rυ9s(A;y.lwqz{qϞ=E/z6ù=yۼyҗ~ӟ1&}cJ{kQ]BRH.!?+'{!iX6}׻#[s׷wk/禊tY9ă>vuB m?jK5pmJt{_$ s~onu<Γ_b{t]={=¹nєN(ݶ- |uÊ'> ]ZXWZц~s= }(lDZլO~]zR{u/r%{ֳȿۿW9Md %C65Chy/Gmޔ`yGLmMUd`!?rU`%+'T"q^h$aGcm70|EM626ҽB0F@V&jyjZ4)jz=+j{Ws P\{6fB$ ҥK ż]ͳ?yw/$<2@&,.9zPV bI:"L?=٣", {\g ? +L#i`?Y0rA2deBEoWу`Ҿ~q!d+i8nRN#q2c4qVp^밒C:VcIh /؝wy__VGOe9nڵz|t]tG|H =rƊ /t^v=~@ƋvwCY2 Sm۲=(T阜ZEm)^=:*ju^4$tOH^FZqDBEW҅OXp_E -V`h&]#'eq=[YՕ߰oIA1i xJ4{h01c`>m6?bO0\}$b!(Y0=4wW/vee _`A~PG6๋ [d䦁nLa=Yi:w47Gn,91ʊov 6K?GCJ|y^܌JApn"a_{^]u +֭SM:-/~Zs%-X|/\|>:B;l͜.Z]qQA+? 0\R7nau .p7p[jD߮陜6*!X92mypnOz5{u]eZNjiR+h $ %!O<_Ыҕ-yz |;u.H X ߸;8?ϐvFYX]r wÿG̑77 ?y~T&QjpI $) =nuXa/&EwIB/K__PE&3L@RaCu' l67q=In HZu9Vo?}>wlx` -;"<,Kۄuҋ7]D>Y~f`W@p+q}k렻Cҕ3d5;mt=p]f{j] [Zގ?XYGz ^^W)ƪ2nanḮZJ! <됃!?_}h)P<*|+V+VmBv:7eq Dcŷg+y'l٢;X2 SG.x̥6Ѝ}~r\X3ʙC2_arGcW, C_le|i].=e~SJt_BӄiC#nJ޾}ܖ]YBŠ &.\!YGg_ǞB2 "' toV[+{%5YZ*$xM'xـXVv:G䉇=I@s#K?8V~aq~hlwP!YDm҅UMb"s~q<٧s^KZq+|]z.OH<{Ac.!]{OiCˍ7F'?RdɇޞnYIO礸\. wrWy睝ebDIQ<|b@|Q_"( ~4%K60Hf|X ; m:ub, ڥPOufS^pQEC?A =V@V>!<`A[,u녿0?f_&FL0H$"KY'jS9|'$$P9B/n&8&eX Jr{d?|^VMXt7UH|FBO+ո[b+cIK\h-  &r;1z=^W| 3ÆurX|k^\˽xKƘdlJ^L o22t}[ȚM8 *f!`ބx[)my嘟<]]ml88Ww'x=A:ɋvLI(LQDk.$ײ䣝0Y!a~ܗĒdBЗs6U[њ9pRf|+A&SP2 lR<|[QhIn3,%դ:xۡDrrh+'+r2 t/n'u9fbRתsZ2:$B.˾ jr$ڃF~1 Uo,"xP%i80Ȥ<446l Wm0>'ukV;&MժkV;.q>D:"  ڱߖˣ>|#%{X'CGkΖKc)R1ZbjmB=m"6#YW/8|Ffw=)iIX72^;m4Xor=5'sf|?itB^!C;2jLfpV%_&8*9ssdcnDF>`P#9ưrTIfE`sߍ/cj'wȹ_tČN}'(Qm;e(Iyca;?{ƛoKs_e ۨS// Xg^7C}+9'%}ry["{e,o1yRH(ᅺtoVe V廊^G~PqlҊ*e,L%/Zi㛆^ \ i%  }R1N~tB~na'6JӨtjj|c S+qU$[%U|(/3%XAn|9ezC.ŏٍ0G1lkQL&F[Gٟ4$؊,djNJEK)!*,9-[(/~ v $gչ8΋V6 ChmeH|7k;z2v-\ʔr𣏕3;N&+V%(B> ?D -U弁ryR 9%*:~=!X8I{BpQǙe=QKS Cz`4TLqfVHV BpSz'GL4pdeY0 !MreߴE]UXoM[ENF=H鼨P(T#IPPCĪ9'o%=_*PH!1X0 C`47[O~nIZ"߼-Bj0r~t!œ*W$ҁ&؃+&mJEgW7 !pk^ykVG򁢫w[칉ֲ+X  >`S?L|%XTzC҄G@Os'ZJ#ix!`lŗ<uiÜ)/%Xҝz1^sVD L=,+.YO .{ƙc2C0ڋ\5|YCBp\ycv~9/;{ 4i]'dW44syrR%/F*/ֱVC0 A9sVرQL?FbbI &E*Od)£ m' NԐZZ:Od1^ !`@A콊fxMcFAunetv-d0-gPף)LR^C;5O-#ἰ1@s;4*N C˘ e1YYUcXJ"(0w5PwU/.pTY'u7@ ; pndYK6!!0Hlω9*7W,1{l\lmZ/.+P̣&(Ɋ5j7+]N߱ki)E2;0 6mrŞy3~sR΂^}~90VC_TꐃPWyME@NEN[Xl`!`x[U^x 'wpj*aX)Kb~9)VMOۂ1Dkob-=zB O CG!䵑[p hhrY1/1鯬-nJWQ5NPYV6 Ch'O022u]C}!ge VQG~r,Via+^^188~|r[՞ C0D ܑ^\ʶ,*ml,z6y!Rز(/(Xi0 C9,t%vڲq@Z5*&GP(!`'.=]cʗԭ~sr[':iw`G rQÕ+*ј笌v!p1qF722"f 9m𜔙3ge˖5֕~7<rJ!`'":sZLƶz9#X[vӫ?`^%cU} :*cO1nnKnb[auȑ F6M38Y !`nA839AR}za1ՐBkնd65ȼV55Duwwɓ'+b X7 דd g!`mz(QffaN6/X͙3Gm| C C0ڀlQ/26̛S>FbF(oEm1_rid6- @ℍopnm޼ynծ3#Y .TR,K!`'\[Kc%h$H_9͉x4gmݟc n۶MUOOO ޽[W+Nt!LrE 0 CA@:K5'&C{ܷEluRDf FH,+Da \ 9wu!]ѱ!` 6͸b3܁~{je a{ <<9}Զ SÇ0!(.(',Qn!`C@ %1Sj9@9hbvHR&&c5,2C1}0X-˜l+XU SOC?:V6 Csds|dW}7e1YYUcXG5[A>Zqg$XɱвOf̘7[c"gϞ=a!`#s8g ۋ8R΢=ąŤ$Zaՙg i;zmB$߇ !`G?,MeiGC$)4V~9.j9 ɬ : :ٻwnXǪ6Vϟ={(g[͛7oTc4熀!`!ford.rʙ%XUk_+KAm{6iZ ZGΝ; Hg[ J^$*e!`@@loІu+0v7\n!ț'Π_f{QN_+ҵQC 9 wv;v=V-A27e VYIzcyz>b~M  Rx";S,en0 C\#I>_`߭_OpFrYnGl˜>2;Qh'ABO22.\Wf C0 ȜH:.aרmXYU4Fht~5}Vn*> 9dL .t]h d a@eY2 C0E gn$U"ۊEVI--K , R}!HQd‡]A=4D$ idxs|=#lkde Va L?*iWH8Hn!`!09>g>KQ`rg%y"bYȪZc!U,نۑ`G !`!FIvI/kcWe XVJEmJ5}t歹8>i M8mAj(!`)CsS5AϨ+7ioXVPC_iӶ~o>`wd`|7}t] ֺutC;҅o Ο??O0GGg?m'ǎc!`/dy?|kԶwЙ|%XߚbQ5e sM*VbPSNUyzßi0!`!pșC(}PO6l+U/K sjtҕ+< %y7v?rcG-$Y vZ7008)SAg0 C8| B 615e V'@}bzF"')ƒCׯ_T VHNܹsUF}kOAɍNۜd vܩO WE6:-!`c tn961Y;V h{[=`6'm|„2dho d/_neC0 C`, "s2.G;V@A !"& J)0 C`^Cy|>Bcޟ+ ,It'SR^­Yf*J]TH؇5{v!yLn!`8˼?@ bh&OARy<$E{q^HV Z|[byFp2.ZH_8ǗY0 C8BW(ަg,|'1j0(dKըUZ=b &-`-դT3DMh.@D((lWsN׎;Xk;~<+X'xjmfH`܎ēZepPs%}C~[[wxNTo^/߹-(Bē)D HDq P}{p2w[*Ds-Z]9Y<(CO_}Zy LҠ,/;/;%$@"VS7?~'~~j/~^?tWT0}$@"$aχ4Ĩ7Q5Ecn)5{ Gd(w{ۻO~I-?{կ~/ |~տ+(}PɒMA}hȘB"$@"Oyr9y[[܈9С72tҠ勒?D Hg{9m.XK%QQ z![J JbȂOt)ɖ~gp"$@"X(gљsN>cG}5Wϛu}#XA>(<3V w'u]}/<$MD H:[;ϙJEߣhcV耵dȆpsМ~9 >@}*?G_׿^*f}O4x s֝<@"$ rp\[Ck5>w"ƖgĿ`1w&Ր˿ߨ΋(+__}tJ_*++G#D Hgk'3DSy[z^JGkКU| ^|~u?m};aɫAJ yQ&@"$OD80sX q-\k'wbhiQds^r/|0"_݃#ҥ/qixO?sm|c;CN$@"9?{:7a$׽ӫ'Y9\I]"$@"2z]ǨZ!K{ocRP+ Z9\PI]"$@"p@_>hε2kגx`Z:j3xL$@"dim@Q% I|=Ys2͠׷D HD x0Xժsfs|w5:`J 4WEŝ j4>Cgr"$2`y刾=4rb~tZkd[~+OD H'@e?gv|8F+I؈7)ט{1$!:iKD"0՛zs:jٖn1`F- 1 m:DI#PLosn.H=Ngcstu:`] |k/'),to87$B`)B}`?ߨz;Vsy6`yrI0Tg\Zuߑ/`[˸x&|(:c\RJDV]ߢMu=~<}N=64kVroW:._2pPDQOJ#>py"$CF;޷葸XK[@ZsZ`ociv+ጼ(]= G„*D)w$@V~3O5K U=}q|'\"$rw3Zͣ1yw0cI5R⫸\i0~:!)W"j [Ҋn>O>͛2>aAS<21=q<!$r>5Vwo-|ؠOͣ'hfִTYO]<P,}K_?^f}D\@}_v$cGA_5kPx.Gr->eP+z^ɵ O4H}_w}o޽y Z/'ɔay6[O`MXOIM%D`p?=Z̚mltCҒ0k6يk<\Dk*:}3N4H o?{?y٫Op%S^uҋIXTU|t[OrՐ+HDz\7Sc$5z8Ѥ@"sGތ۳۽{z[Ha%5,dJs\l0vnn>\\uc ,|2rӉ@"=ENq!h$HZ cCKrɷĔïY>TѧYvXr9,/Ɇc.ê\gRgɲdD x-,WjC[m9m&@|`Rv6՚M[1Z'$?NuWEw4@uzԓ|xGy@QscT46D H{@_L_K,,Oo\#YNG:YkSq_z4BȵCxKBs^q=>K]9ҙ11N iGZG9vWϳKm鰉boS-5w-qV>/S3)f#֟#C @"$v-[W,Dl<-ߐjUTVΣ&=X[z|_ޯdCryzr>d4x}V>諮V$+ژgͬȑɞ@{<1KA{_iy6.>jR Y=#ϕ$@"pzY9}TO=;~Q/t%Dž 耥Adf75+Q`> +kH =]UX#.f ykTve-Dy4%~G5O7]5ZTYS8dn[O[/Ca{9`DD HC`z;{TQSO'Tl:SŶ[X4Y|A$_S!6=>%7w^{]~Sy &A t4h6J7SkI>Y.~V-hwGYΎ[z(Lm~{6j_>TM6ix]tYV[iٚ%MD 8:˻i ͗$.ab̫Plk7%xԭuƿ,ݏ~Id> ZUU/lЩ\}z4u+ǩ&^s6J+oqrZ2O{k=RJD XC@~kXΫ%Mw#>V3xVYmkO|p/}>*&j*LCՔbR}&NOZ^DFh^5cyԘ=cA <;ziVn)ۼLÔ,YDUO. 9G)Zј(ŬT>iQQEx7yf7 sR^r&~iPBXؼWtQVkHGVu[(K"E'<^:wıw}mWmQjhyȾFGntu Xڦ4Fؤb^V_VwMir:P5PIU&ݿ+E&(zU&\r{[_*΂w{e<Arsyr%|rS3Ӯt=u{C[\fvuE>RE8#G7~ԏ0#vW[##WWz2gzSf.;[Kfs»' j$g>'Z#tun/|7V>;/zs(G͡< PP)ytض(5ր䝪aV3VrtKf Rf*H}k}STti^}ҍ[<:(q^jRND 8E`IzxO?` yMjFcAX,7x@JF)ёu`ɮO,?,A^v5жW޵.}V?`YlKgdD Hv"posGJޣ-+iyŴ!ho׳K/5hS #t0lH×QM~SWkȍh>k S.wyk}Ptؒ&@" ,Q'Uotb40-g٘H.CN?F `Q}ov{K{+}5[/D H*֚Ր[ zܖ/vbOt$U9{󱁘U[1sV^'d'^OwR+T -mcԗl}-nO^8x+l߯hKm'@"\I$sdџ<-;-V5` yj .^3uJW(30{^=8Č <Lw]{=%$@"L>@07o X:kzZQ]wsTAkFVc[1i?bJ{׶R.M@"$W@ ~JJ?Av֐ԩKyRXIj Z͡Ç%/|9P:{gpmI@>ݟX'Ƥ$@"ˈp/ |dh|t|=NjmXƕWq_GTt継'aZ;aů٢o=ز{Wz\@"$c4=g90;, cqeF, "CGk/6ΆrеߔO7ohk䙈yߟɍm&@@ﹺe߹ _KXkC [-Y7}/| vs?UюJyK-Tv&7ۺ^tǿbj~Rj]%㦼bE\f. |35= )9jڃbʪ_ V/E?K,g}NrWjq*{.i/U=Mr8+:UOugQזkӵ>ɾԡvTZK\Ɇ]V^b%Z+ykPU9ɵbO,kksRp N&J}:+Tm{}GSUϕjP>5WE0)qC/[{dSYR"jRճ]sys\+><QSl5o  M=ץ?jKV{`խ^M[Ny ]g??tkVEko~7ڵY߰W2v7W{*D HDa ؗ9|G}-{c?b1w=:CvTF-p$$@"<K6;?~PQes!ڟ`T GTv_]wķ#D HDl@?ws^WO7|tؚb?Ә;D HD xP8bo&dU8IRxÿE=N_8ʕ$@"$2X9ngV@)@h 8|t{⛊D HD xӯ_V9MI9w:k*ljnt: 􎌏#)5r`cD HD xR|V~.hwNb]`菌=a?Py 1U>GJ!HD HG(j683NFboaUQfID HD !0x-$M-ŷfwˣVoyMD||Sޒ%EV\@"$@"d˃l3*QK6-{F8,)5M@c9hGuGG@"$@"4|Om 8kܣde1[wD HzU .eZsOHMCOQ]3B_z{.?ɩ?S+HD H-uҙ/~ޛS}v_f/>slƌX~mH0P%c?,rr$MD H@JIt֡KYfNpY5Z qŚ/1Gtt: 8p!\2.G?TNZaքC&@"$ ?糝O|жdj颏1$|Ov.*OOo>??[z7gRk"$@"<3%c:K-dӂN[z})z*&&@W?|;?7җNɢBr%@"$c@`ޔ߼__׀C"ŗ+<uReӤzvJJ??7~74 ??ޕz9OWq@KxM<|g  w~w~xgh:˚$kA'KW{6cHC'Wte4(9/ݻ3N~W4 &"+IDAT~~_oϼ7aq=,M :e+媺srimM*B*=;\"&ѷv#{?0%7q?=(k@O;X9Њߧ/ ܻ뿻r׊rBs{7xuy㚽ocգقᡜr%[&+2@ݕ///C9 }~PtO䋬A-Rٸ ;c%kM@&%.~R2Gv*>(2aeZ|',Ի+ٟɟkrQs‹b_ŭ,*?.ųdׂ/W}V]v7|oHskLn]ޗby븑y8NP:I8dt'Err.grZxzp4})o]W{6ZT:fMԇ@ҽ{(?mo~/~ͷ_џ;>zܜZ_~U/>U]1M=yT{+~2Y(:}lY| t,](:}geL{VU|ŧLq"9WnըnkD]?auo{zރЧ|/8ޣz?[w˾=|7>*[W:0Q.}ǽzS{~zf>couBEq{8zqItYb ,r&Yg͵q~w{Gjу:{8Y>T}X]zȩ^^tgwʿ)>u=HWd|9GWїʧV3M9Kk9Dqt蕮?7ܕz:_᯾%OO__k~p_W_3EﴸTNKprO_J׺$JTڥk=bzl<:h1-JSD?6t2EC;,TluЙ}Ea/CIUȱ5+^)}콨]G'x<7 {D HD>wy~18F| g B/Oʧan_T|-+y%;_E^G'1U!%.%'^9ੁ ^w _ԽSb-RS$_ώ~*zu9{u;}zn}{z_X:> T(щ%bܿRc%7-iNϘ^h,9QE}>R#Fv\QFP.]|\N7P xT>__1WjiWT+RE^28ԌؑE/>wbD$kgW9/kύsxrJwyN|btm^-EW1W1.;d]:%;//+lλ/<ȋ,Xe鴠-my~Uޫkaյq1x)tڴ"EWL.Ynb㒎O o+^+RUP$@"\9Q G2:Ql𢽋 kUGzQ-B]ydR /Zͩ8 ҬG?ɭK@',>EUuS_z_:)NT5b=RGK#;]_@"$%Cr=Ȣ~)VgtP.H!蠲QoO/NhwKXT(~ؔK|zzN/j?R.E»>}yOD HVnE٩72hbJ/ں&'uK04rA6)xHMz-rtIOH'jtE#_T^||!*Sk/\Z'izu9qV=jG!Z/ ?k8b J(bkqzGuN蓽C:/?-Źߊ9}Z8g\3Z9R?∑^kMOKt.wpqCC .R~I|o)zKEYPGJQ/YKTS%tUOv҄${|TJǥ#jWK}RND Hq.ȢRᝊ_|Z󣆨wxHE_j5_.5 ;UlLT`*lĹKSMxQN\_Kz#{OD H[}d׋GET6]E{_ׅyW :IמaܹBUoHD oZdKn]:GыG޿ hOO~}/-[η|Ot{$BACQls 6.4IK6GZFYz]rW4^EUuPgvG=vw}?s:k?5X޺c_=㭱u2Nht0E.i#<<:ȣQ"qx-t(?^cR3 @X|D\K>9E="b^:8][[ش%@"$p.OG'ʥ||t*qkH:GO q5*Z#f/Kk1M- YSNs^+ҨjPx ?Fl!M[/ĻnG^>ƩV|:G(&;x_?>ĺnWDz-?QGs9Gr_[[{ɀTxFs!{>t^~K"xJ']@Fx-tN_:VK *)1cz\CK6^==:ת}8jߪpբlɲtl{z11KVx[[zkvn9΍kы玸7CMՋ}@nQ׉]:{xt+&1OqYr,}+]y\e|C]ߢO,J]]CGT9-.O:hCW ^l֢>쭥n\b1zO_{q5^6tVdl[Tx-ՅrW,H.[(7Z`a ^d#S唟tg*EYGnV\D Hp*u t/Z>$NyCeXyAɺJK"a9AӒE?Tk/^z-;?YW|5 7D HDs6֗= Qt>kKzeG'1MEG,$ .Rqdbo-+ DuepBVl뢞ۊk]iE`/򥦩m.CHԹb\m1N2K~Z[q[z~zCFhçes_HcZkM^=|ZueC?h#G?cF*riyI3JMC\=m#\TE#@uVtZ7iW<A'WG/ïQlaHv.C(odK5,/IK*JCT6 /"FqÕt\`ТvEw|Kp֨"ߒ-][`"#VsbzqJNzC8r>ȹF[}c#m;hǧxkຈlޭ{p9Kaw8j.1;z]qok26Q]w YE=[kފ٥y < UN-]qm.EI:ۓыu'@"$ #ڐ{T{=^#;|K:C` 8dD2 W>"'ÑS"CQrO=i"$@"2T-:-mˏ^Z<:k|IKg5HD x6v.xtN#ߒχ 6yMGs+exщ֒[X+ȮIsй[G}ʉ@"$CF k Um-Oy|\'vt5> ; Eۚ,]y5n-hzK6@Ʈ++>ƹV:ߋ$kmQ{|O:8+"[}u8irD[;nV_mu4-y)4{̇Zz'(/|O:껯kՓz9E=o~QVr^ˑGYyZN'uYnG]sDGa+.dl#4Sw^[akQ׉"$M-#/nҹ1Wʉ@"$:w3 Y4\<6~JZGklZ䟤-I%Vl"UE9:r@c.N#ߒ"$k"$@"4 Q2k=z/㣟dH'Y@m{ʇ7"%qz}QfKkcOCԑh}yZrHMb(Vѯ%_/:1N:_k]+n6j퉋z/{㨥xdz_>,G]Ov}]<-hGto}g<P+R2Xkl=Hq`{[:!a>E޻pk]G}؃EOѣOeq#J.CE}\НEjU/\>w_(Mmջu} ^GnߣmktˆOqyo=}7}x޺{1{:{g{sه\RwqxѵrQs-;uFr|~MvvsKnyz>ߠV um.p־u5z9p^־u513{q[ʈǘ,4#>QA2ȗ}k|>:罦.RsFĜ`o+gۣĹy=[=q{bT{+g[q9qk|ul{Ĩ޼ԸE9<E{9{zr==q=zN:ˍ}ih}&(>1>]v.7^l۔}tt-YmO}+@snhOO-;~2NZ,[]o+ߖѼw(;Cҵ1׳te/[ƩCwc"oMV-tՋon긙g:QeuQnt}Fds#ƍX}bX}y,X޺ϼww89z>-}EYu.-sKL ZZV{_#:1'@"$ ֙mQ&wGY~-]/}io=X^ucݳ>ƽ$@"<8Duqc{k[:-c =~+~gӵg'Scwm-Fn}KՍ8'OD H; $ksk5᳋jX8Ζ}H|Cɑ4HD xn@8{>=cAc(рӹFG|ڈ|tF|3W"$@"L9:lm~o?qgѷ1L93'= )cD H-s<~hnͿ!`os^s$@"<54Ywx!h_{o\3,6gttsP^-G?r"_z1нq䄢g鉗] ٩xOV˾6xQQOl_ZuЉjy~=񉴕5GltZy?3 H\ܸVmziy-;><.o۪'9Eۘ:6gwbqŜVbG+>ƴ|tį܋>MhL8#ωw__#L>HD xS%k$^?Wc€5/H[of1ex(>Q28]v"⺏#C.{JD H*5W8>QO7 ~7$@"<#rt~oQcwmxn=0={ZI/hC@n|Z8(\8^8~my?C+9qƼŧ=t1qh=os|$^8~qW"VzFEiFy[|˸Xէtiz~H^sKD HD xC߭4<=J>HD xNqN{1y,q`|=g~(_D HD HD HD HD HD HD HD HD HD7+\\yvVIENDB`testthat/vignettes/skipping.Rmd0000644000176200001440000001116114164710003016406 0ustar liggesusers--- title: "Skipping tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Skipping tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Some times you have tests that you don't want to run in certain circumstances. This vignette describes how to **skip** tests to avoid execution in undesired environments. Skipping is a relatively advanced topic because in most cases you want all your tests to run everywhere. The most common exceptions are: - You're testing a web service that occasionally fails, and you don't want to run the tests on CRAN. Or maybe the API requires authentication, and you can only run the tests when you've [securely distributed](https://gargle.r-lib.org/articles/articles/managing-tokens-securely.html) some secrets. - You're relying on features that not all operating systems possess, and want to make sure your code doesn't run on a platform where it doesn't work. This platform tends to be Windows, since amongst other things, it lacks full utf8 support. - You're writing your tests for multiple versions of R or multiple versions of a dependency and you want to skip when a feature isn't available. You generally don't need to skip tests if a suggested package is not installed. This is only needed in exceptional circumstances, e.g. when a package is not available on some operating system. ```{r setup} library(testthat) ``` ## Basics testthat comes with a variety of helpers for the most common situations: - `skip_on_cran()` skips tests on CRAN. This is useful for slow tests and tests that occasionally fail for reasons outside of your control. - `skip_on_os()` allows you to skip tests on a specific operating system. Generally, you should strive to avoid this as much as possible (so your code works the same on all platforms), but sometimes it's just not possible. - `skip_on_ci()` skips tests on most continuous integration platforms (e.g. GitHub Actions, Travis, Appveyor). You can also easily implement your own using either `skip_if()` or `skip_if_not()`, which both take an expression that should yield a single `TRUE` or `FALSE`. All reporters show which tests as skipped. As of testthat 3.0.0, ProgressReporter (used interactively) and CheckReporter (used inside of `R CMD check`) also display a summary of skips across all tests. It looks something like this: ── Skipped tests ─────────────────────────────────────────────────────── ● No token (3) ● On CRAN (1) You should keep an on eye this when developing interactively to make sure that you're not accidentally skipping the wrong things. ## Helpers If you find yourself using the same `skip_if()`/`skip_if_not()` expression across multiple tests, it's a good idea to create a helper function. This function should start with `skip_` and live somewhere in your `R/` directory. ```{r} skip_if_Tuesday <- function() { if (as.POSIXlt(Sys.Date())$wday != 2) { return(invisible(TRUE)) } skip("Not run on Tuesday") } ``` It's important to test your skip helpers because it's easy to miss if you're skipping more often than desired, and the test code is never run. This is unlikely to happen locally (since you'll see the skipped tests in the summary), but is quite possible in continuous integration. For that reason, it's a good idea to add a test that you skip is activated when you expect. skips are a special type of condition, so you can test for their presence/absence with `expect_condition()`. For example, imagine that you've defined a custom skipper that skips tests whenever an environment variable `DANGER` is set: ```{r} skip_if_dangerous <- function() { if (identical(Sys.getenv("DANGER"), "")) { return(invisible(TRUE)) } skip("Not run in dangerous enviromnents") } ``` Then you can use `expect_condition()` to test that it skips tests when it should, and doesn't skip when it shouldn't: ```{r} test_that("skip_if_dangerous work", { # Test that a skip happens withr::local_envvar(DANGER = "yes") expect_condition(skip_if_dangerous(), class = "skip") # Test that a skip doesn't happen withr::local_envvar(DANGER = "") expect_condition(skip_if_dangerous(), NA, class = "skip") }) ``` Testing `skip_if_Tuesday()` is harder because there's no way to control the skipping from the outside. That means you'd need to "mock" its behaviour in a test, using the [mockery](https://github.com/r-lib/mockery) or [mockr](https://krlmlr.github.io/mockr/) packages. testthat/vignettes/test-fixtures.Rmd0000644000176200001440000003575414164710003017426 0ustar liggesusers--- title: "Test fixtures" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Test fixtures} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", # Since after is not available prior to 3.5 eval = getRversion() >= "3.5" ) ``` ## Test hygiene > Take nothing but memories, leave nothing but footprints. > > ― Chief Si'ahl Ideally, a test should leave the world exactly as it found it. But you often need to make some changes in order to exercise every part of your code: - Create a file or directory - Create a resource on an external system - Set an R option - Set an environment variable - Change working directory - Change an aspect of the tested package's state How can you clean up these changes to get back to a clean slate? Scrupulous attention to cleanup is more than just courtesy or being fastidious. It is also self-serving. The state of the world after test `i` is the starting state for test `i + 1`. Tests that change state willy-nilly eventually end up interfering with each other in ways that can be very difficult to debug. Most tests are written with an implicit assumption about the starting state, usually whatever *tabula rasa* means for the target domain of your package. If you accumulate enough sloppy tests, you will eventually find yourself asking the programming equivalent of questions like "Who forgot to turn off the oven?" and "Who didn't clean up after the dog?". It's also important that your setup and cleanup is easy to use when working interactively. When a test fails, you want to be able to quickly recreate the exact environment in which the test is run so you can interactively experiment to figure out what went wrong. This article introduces a powerful technique that allows you to solve both problems: **test fixtures**. We'll begin with an introduction to the tools that make fixtures possible, then talk about exactly what a test fixture is, and show a few examples. Much of this vignette is derived from ; if this is your first encounter with `on.exit()` or `withr::defer()`, I'd recommend starting with that blog as it gives a gentler introduction. This vignette moves a little faster since it's designed as more of a reference doc. ```{r} library(testthat) ``` ## Foundations Before we can talk about test fixtures, we need to lay some foundations to help you understand how they work. We'll motivate the discussion with a `sloppy()` function that prints a number with a specific number of significant digits by adjusting an R option: ```{r include = FALSE} op <- options() ``` ```{r} sloppy <- function(x, sig_digits) { options(digits = sig_digits) print(x) } pi sloppy(pi, 2) pi ``` ```{r include = FALSE} options(op) ``` Notice how `pi` prints differently before and after the call to `sloppy()`. Calling `sloppy()` has a side effect: it changes the `digits` option globally, not just within its own scope of operations. This is what we want to avoid[^1]. [^1]: Don't worry, I'm restoring global state (specifically, the `digits` option) behind the scenes here. ### `on.exit()` The first function you need to know about is base R's `on.exit()`. `on.exit()` calls the code to supplied to its first argument when the current function exits, regardless of whether it returns a value or errors. You can use `on.exit()` to clean up after yourself by ensuring that every mess-making function call is paired with an `on.exit()` call that cleans up. We can use this idea to turn `sloppy()` into `neat()`: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) print(x) } pi neat(pi, 2) pi ``` Here we make use of a useful pattern `options()` implements: when you call `options(digits = sig_digits)` it both sets the `digits` option *and* (invisibly) returns the previous value of digits. We can then use that value to restore the previous options. `on.exit()` also works in tests: ```{r} test_that("can print one digit of pi", { op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) expect_output(print(pi), "3") }) pi ``` There are three main drawbacks to `on.exit()`: - You should always call it with `add = TRUE` and `after = FALSE`. These ensure that the call is **added** to the list of deferred tasks (instead of replaces) and is added to the **front** of the stack (not the back, so that cleanup occurs in reverse order to setup). These arguments only matter if you're using multiple `on.exit()` calls, but it's a good habit to always use them to avoid potential problems down the road. - It doesn't work outside a function or test. If you run the following code in the global environment, you won't get an error, but the cleanup code will never be run: ```{r, eval = FALSE} op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) ``` This is annoying when you are running tests interactively. - You can't program with it; `on.exit()` always works inside the *current* function so you can't wrap up repeated `on.exit()` code in a helper function. To resolve these drawbacks, we use `withr::defer()`. ### `withr::defer()` `withr::defer()` resolves the main drawbacks of `on.exit()`. First, it has the behaviour we want by default; no extra arguments needed: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) withr::defer(options(op)) print(x) } ``` Second, it works when called in the global environment. Since the global environment isn't perishable, like a test environment is, you have to call `deferred_run()` explicitly to execute the deferred events. You can also clear them, without running, with `deferred_clear()`. ```{r} withr::defer(print("hi")) #> Setting deferred event(s) on global environment. #> * Execute (and clear) with `deferred_run()`. #> * Clear (without executing) with `deferred_clear()`. withr::deferred_run() #> [1] "hi" ``` Finally, `withr::defer()` lets you pick which function to bind the clean up behaviour too. This makes it possible to create helper functions. ### "Local" helpers Imagine we have many functions where we want to temporarily set the digits option. Wouldn't it be nice if we could write a helper function to automate? Unfortunately we can't write a helper with `on.exit()`: ```{r} local_digits <- function(sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) } neater <- function(x, sig_digits) { local_digits(1) print(x) } neater(pi) ``` This code doesn't work because the cleanup happens too soon, when `local_digits()` exists, not when `neat()` finishes. Fortunately, `withr::defer()` allows us to solve this problem by providing an `envir` argument that allows you to control when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there's a common pattern you can use without understanding all the details. Your helper function should always have an `env` argument that defaults to `parent.frame()`, which you pass to the second argument of `defer()`: ```{r} local_digits <- function(sig_digits, env = parent.frame()) { op <- options(digits = sig_digits) withr::defer(options(op), env) } neater(pi) ``` Just like `on.exit()` and `defer()`, our helper also works within tests: ```{r} test_that("withr lets us write custom helpers for local state manipulation", { local_digits(1) expect_output(print(exp(1)), "3") local_digits(3) expect_output(print(exp(1)), "2.72") }) print(exp(1)) ``` We always call these helper functions `local_`; "local" here refers to the fact that the state change persists only locally, for the lifetime of the associated function or test. ### Pre-existing helpers But before you write your own helper function, make sure to check out the wide range of local functions already provided by withr: | Do / undo this | withr function | |-----------------------------|-------------------| | Create a file | `local_file()` | | Set an R option | `local_options()` | | Set an environment variable | `local_envvar()` | | Change working directory | `local_dir()` | We can use `withr::local_options()` to write yet another version of `neater()`: ```{r} neatest <- function(x, sig_digits) { withr::local_options(list(digits = sig_digits)) print(x) } neatest(pi, 3) ``` Each `local_*()` function has a companion `with_()` function, which is a nod to `with()`, and the inspiration for withr's name. We won't use the `with_*()` functions here, but you can learn more about them at [withr.r-lib.org](https://withr.r-lib.org). ## Test fixtures Testing is often demonstrated with cute little tests and functions where all the inputs and expected results can be inlined. But in real packages, things aren't always so simple and functions often depend on other global state. For example, take this variant on `message()` that only shows a message if the `verbose` option is `TRUE`. How would you test that setting the option does indeed silence the message? ```{r} message2 <- function(...) { if (!isTRUE(getOption("verbose"))) { return() } message(...) } ``` In some cases, it's possible to make the global state an explicit argument to the function. For example, we could refactor `message2()` to make the verbosity an explicit argument: ```{r} message3 <- function(..., verbose = getOption("verbose")) { if (!isTRUE(verbose)) { return() } message(...) } ``` Making external state explicit is often worthwhile, because it makes it more clear exactly what inputs determine the outputs of your function. But it's simply not possible in many cases. That's where test fixtures come in: they allow you to temporarily change global state in order to test your function. Test fixture is a pre-existing term in the software engineering world (and beyond): > A test fixture is something used to consistently test some item, device, or piece of software. > > --- [Wikipedia](https://en.wikipedia.org/wiki/Test_fixture) A **test fixture** is just a `local_` function that you use to change state in such a way that you can reach inside and test parts of your code that would otherwise be challenging. For example, here's how you could use `withr::local_options()` as a test fixture to test `message2()`: ```{r} test_that("message2() output depends on verbose option", { withr::local_options(verbose = TRUE) expect_message(message2("Hi!")) withr::local_options(verbose = FALSE) expect_message(message2("Hi!"), NA) }) ``` ### Case study: usethis One place that we use test fixtures extensively is in the usethis package ([usethis.r-lib.org](https://usethis.r-lib.org)), which provides functions for looking after the files and folders in R projects, especially packages. Many of these functions only make sense in the context of a package, which means to test them, we also have to be working inside an R package. We need a way to quickly spin up a minimal package in a temporary directory, then test some functions against it, then destroy it. To solve this problem we create a test fixture, which we place in `R/test-helpers.R` so that's it's available for both testing and interactive experimentation: ```{r, eval = FALSE} local_create_package <- function(dir = file_temp(), env = parent.frame()) { old_project <- proj_get_() # create new folder and package create_package(dir, open = FALSE) # A withr::defer(fs::dir_delete(dir), envir = env) # -A # change working directory setwd(dir) # B withr::defer(setwd(old_project), envir = env) # -B # switch to new usethis project proj_set(dir) # C withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C dir } ``` Note that the cleanup automatically unfolds in the opposite order from the setup. Setup is `A`, then `B`, then `C`; cleanup is `-C`, then `-B`, then `-A`. This is important because we must create directory `dir` before we can make it the working directory; and we must restore the original working directory before we can delete `dir`; we can't delete `dir` while it's still the working directory! `local_create_package()` is used in over 170 tests. Here's one example that checks that `usethis::use_roxygen_md()` does the setup necessary to use roxygen2 in a package, with markdown support turned on. All 3 expectations consult the DESCRIPTION file, directly or indirectly. So it's very convenient that `local_create_package()` creates a minimal package, with a valid `DESCRIPTION` file, for us to test against. And when the test is done --- poof! --- the package is gone. ```{r eval = FALSE} test_that("use_roxygen_md() adds DESCRIPTION fields", { pkg <- local_create_package() use_roxygen_md() expect_true(uses_roxygen_md()) expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)")) expect_true(desc::desc_has_fields("RoxygenNote", pkg)) }) ``` ## Scope So far we have applied our test fixture to individual tests, but it's also possible to apply them to a file or package. ### File If you move the `local_()` call outside of a `test_that()` block, it will affect all tests that come after it. This means that by calling the test fixture at the top of the file you can change the behaviour for all tests. This has both advantages and disadvantages: - If you would otherwise have called the fixture in every test, you've saved yourself a bunch of work and duplicate code. - But on the downside, if you a test fails and you want to recreate the failure in an interactive environment so you can debug, you need to remember to run all the setup code at the top of the file first. Generally, I think it's better to copy and paste test fixtures across many tests --- sure, it adds some duplication to your code, but it makes debugging test failures so much easier. ### Package To run code before any test is run, you can create a file called `test/testthat/setup.R`. If the code in this file needs clean up, you can use the special `teardown_env()`: ```{r, eval = FALSE} # Run before any test write.csv("mtcars.csv", mtcars) # Run after all tests withr::defer(unlink("mtcars.csv"), teardown_env()) ``` Setup code is typically best used to create external resources that are needed by many tests. It's best kept to a minimum because you will have to manually run it before interactively debugging tests. ## Other challenges A collection of miscellaneous problems that I don't know where else to describe: - There are a few base functions that are hard to test because they depend on state that you can't control. One such example is `interactive()`: there's no way to write a test fixture that allows you to pretend that interactive is either `TRUE` or `FALSE`. So we now usually use `rlang::is_interactive()` which can be controlled by the `rlang_interactive` option. - If you're using a test fixture in a function, be careful about what you return. For example, if you write a function that does `dir <- create_local_package()` you shouldn't return `dir`, because after the function returns the directory will no longer exist. testthat/vignettes/custom-expectation.Rmd0000644000176200001440000000674614164710003020432 0ustar liggesusers--- title: "Custom expectations" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Custom expectations} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` This vignette shows you how to create custom expectations that work identically to the built-in `expect_` functions. Since these functions will need to be loaded when your package is loaded for testing, it is recommended that `expect_` functions be defined in `test-helpers.R` in your packages `R/` directory. ## Creating an expectation There are three main parts to writing an expectation, as illustrated by `expect_length()`: ```{r} expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object), arg = "object") # 2. Call expect() act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) # 3. Invisibly return the value invisible(act$val) } ``` ## Quasi-labelling The first step in any expectation is to capture the actual object, and generate a label for it to use if a failure occur. All testthat expectations support quasiquotation so that you can unquote variables. This makes it easier to generate good labels when the expectation is called from a function or within a for loop. By convention, the first argument to every `expect_` function is called `object`, and you capture it's value (`val`) and label (`lab`) with `act <- quasi_label(enquo(object))`, where `act` is short for actual. ### Verify the expectation Next, you should verify the expectation. This often involves a little computation (here just figuring out the `length`), and you should typically store the results back into the `act` object. Next you call `expect()`. This has two arguments: 1. `ok`: was the expectation successful? This is usually easy to write 2. `failure_message`: What informative error message should be reported to the user so that they can diagnose the problem. This is often hard to write! For historical reasons, most built-in expectations generate these with `sprintf()`, but today I'd recommend using the [glue](https://glue.tidyverse.org) package ### Invisibly return the input Expectation functions are called primarily for their side-effects (triggering a failure), so should invisibly return their input, `act$val`. This allows expectations to be chained: ```{r} mtcars %>% expect_type("list") %>% expect_s3_class("data.frame") %>% expect_length(11) ``` ## `succeed()` and `fail()` For expectations with more complex logic governing when success or failure occurs, you can use `succeed()` and `fail()`. These are simple wrappers around `expect()` that allow you to write code that looks like this: ```{r} expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object), arg = "object") act$n <- length(act$val) if (act$n == n) { succeed() return(invisible(act$val)) } message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n) fail(message) } ``` ## Testing your expectations Use the expectations `expect_success()` and `expect_failure()` to test your expectation. ```{r} test_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) ``` testthat/vignettes/third-edition.Rmd0000644000176200001440000002245714164710003017337 0ustar liggesusers--- title: "testthat 3e" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{testthat 3e} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` testthat 3.0.0 introduces the idea of an "edition" of testthat. An edition is a bundle of behaviours that you have to explicitly choose to use, allowing us to make otherwise backward incompatible changes. This is particularly important for testthat since it has a very large number of packages that use it (almost 5,000 at last count). Choosing to use the 3rd edition allows you to use our latest recommendations for ongoing and new work, while historical packages continue to use the old behaviour. (We don't anticipate creating new editions very often, and they'll always be matched with major version, i.e. if there's another edition, it'll be the fourth edition and will come with testthat 4.0.0.) This vignette shows you how to activate the 3rd edition, introduces the main features, and discusses common challenges when upgrading a package. If you have a problem that this vignette doesn't cover, please let me know, as it's likely that the problem also affects others. ```{r, message = FALSE} library(testthat) local_edition(3) ``` ## Activating The usual way to activate the 3rd edition is to add a line to your `DESCRIPTION`: Config/testthat/edition: 3 This will activate the 3rd edition for every test in your package. You can also control the edition used for individual tests with `testthat::local_edition()`: ```{r} test_that("I can use the 3rd edition", { local_edition(3) expect_true(TRUE) }) ``` This is also useful if you've switched to the 3rd edition and have a couple of tests that fail. You can use `local_edition(2)` to revert back to the old behaviour, giving you some breathing room to figure out the underlying issue. ```{r} test_that("I want to use the 2nd edition", { local_edition(2) expect_true(TRUE) }) ``` ## Changes There are three major changes in the 3rd edition: - A number of outdated functions are now **deprecated**, so you'll be warned about them every time you run your tests (but they won't cause `R CMD check` to fail). - testthat no longer silently swallows **messages**; you now need to deliberately handle them. - `expect_equal()` and `expect_identical()` now use the [**waldo**](https://waldo.r-lib.org/) package instead of `identical()` and `all.equal()`. This makes them more consistent and provides an enhanced display of differences when a test fails. ### Deprecations A number of outdated functions have been deprecated. Most of these functions have not been recommended for a number of years, but before the introduction of the edition idea, I didn't have a good way of preventing people from using them without breaking a lot of code on CRAN. - `context()` is formally deprecated. testthat has been moving away from `context()` in favour of file names for quite some time, and now you'll be strongly encouraged remove these calls from your tests. - `expect_is()` is deprecated in favour of the more specific `expect_type()`, `expect_s3_class()`, and `expect_s4_class()`. This ensures that you check the expected class along with the expected OO system. - The very old `expect_that()` syntax is now deprecated. This was an overly clever API that I regretted even before the release of testthat 1.0.0. - `expect_equivalent()` has been deprecated since it is now equivalent (HA HA) to `expect_equal(ignore_attr = TRUE)`. The main difference is that it won't ignore names; so you'll need an explicit `unname()` if you deliberately want to ignore names. - `setup()` and `teardown()` are deprecated in favour of test fixtures. See `vignette("test-fixtures")` for details. - `expect_known_output()`, `expect_known_value()`, `expect_known_hash()`, and `expect_equal_to_reference()` are all deprecated in favour of `expect_snapshot_output()` and `expect_snapshot_value()`. - `with_mock()` and `local_mock()` are deprecated; please use the [mockr](https://krlmlr.github.io/mockr/) or [mockery](https://github.com/r-lib/mockery#mockery) instead. Fixing these deprecation warnings should be straightforward. ### Warnings In the second edition, `expect_warning()` swallows all warnings regardless of whether or not they match the `regexp` or `class`: ```{r} f <- function() { warning("First warning") warning("Second warning") warning("Third warning") } local_edition(2) expect_warning(f(), "First") ``` In the third edition, `expect_warning()` captures at most one warning so the others will bubble up: ```{r} local_edition(3) expect_warning(f(), "First") ``` You can either add additional expectations to catch these warnings, or silence them all with `supressWarnings()`: ```{r} f() %>% expect_warning("First") %>% expect_warning("Second") %>% expect_warning("Third") f() %>% expect_warning("First") %>% suppressWarnings() ``` Alternatively, you might want to capture them all in a snapshot test: ```{r} test_that("f() produces expected outputs/messages/warnings", { expect_snapshot(f()) }) ``` The same principle also applies to `expect_message()`, but message handling has changed in a more radical way, as described next. ### Messages For reasons that I can no longer remember, testthat silently ignores all messages. This is inconsistent with other types of output, so as of the 3rd edition, they now bubble up to your test results. You'll have to explicit ignore them with `supressMesssages()`, or if they're important, test for their presence with `expect_message()`. ### waldo Probably the biggest day-to-day difference (and the biggest reason to upgrade!) is the use of [`waldo::compare()`](https://waldo.r-lib.org/reference/compare.html) inside of `expect_equal()` and `expect_identical()`. The goal of waldo is to find and concisely describe the difference between a pair of R objects, and it's designed specifically to help you figure out what's gone wrong in your unit tests. ```{r, error = TRUE} f1 <- factor(letters[1:3]) f2 <- ordered(letters[1:3], levels = letters[1:4]) local_edition(2) expect_equal(f1, f2) local_edition(3) expect_equal(f1, f2) ``` waldo looks even better in your console because it carefully uses colours to help highlight the differences. The use of waldo also makes precise the difference between `expect_equal()` and `expect_identical()`: `expect_equal()` sets `tolerance` so that waldo will ignore small numerical differences arising from floating point computation. Otherwise the functions are identical (HA HA). This change is likely to result in the most work during an upgrade, because waldo can give slightly different results to both `identical()` and `all.equal()` in moderately common situations. I believe on the whole the differences are meaningful and useful, so you'll need to handle them by tweaking your tests. The following changes are most likely to affect you: - `expect_equal()` previously ignored the environments of formulas and functions. This is most like to arise if you are testing models. It's worth thinking about what the correct values should be, but if that is to annoying you can opt out of the comparison with `ignore_function_env` or `ignore_formula_env`. - `expect_equal()` used a combination of `all.equal()` and a home-grown `testthat::compare()` which unfortunately used a slightly different definition of tolerance. Now `expect_equal()` always uses the same defintion of tolerance everywhere, which may require tweaks to your exising tolerance values. - `expect_equal()` previously ignored timezone differences when one object had the current timezone set implicitly (with `""`) and the other had it set explictly: ```{r, error = TRUE} dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0) attr(dt1, "tzone") <- "" attr(dt2, "tzone") <- Sys.timezone() local_edition(2) expect_equal(dt1, dt2) local_edition(3) expect_equal(dt1, dt2) ``` ### Reproducible output In the third edition, `test_that()` automatically calls `local_reproducible_output()` which automatically sets a number of options and environment variables to ensure output is as reproducible across systems. This includes setting: - `options(crayon.enabled = FALSE)` and `options(cli.unicode = FALSE)` so that the crayon and cli packages produce raw ASCII output. - `Sys.setLocale("LC_COLLATE" = "C")` so that sorting a character vector returns the same order regardless of the system language. - `options(width = 80)` so print methods always generate the same output regardless of your actual console width. See the documentation for more details. ## Alternatives You might wonder why we came up with the idea of an "edition", rather than creating a new package like testthat3. We decided against making a new package because the 2nd and 3rd edition share a very large amount of code, so making a new package would have substantially increased the maintenance burden: the majority of bugs would've needed to be fixed in two places. If you're a programmer in other languages, you might wonder why we can't rely on [semantic versioning](https://semver.org). The main reason is that CRAN checks all packages that use testthat with the latest version of testthat, so simply incrementing the major version number doesn't actually help with reducing R CMD check failures on CRAN. testthat/vignettes/review-text.png0000644000176200001440000022726614164710003017126 0ustar liggesusersPNG  IHDRX;> iCCPICC ProfileHPS{o: -! %@A:JH %T ,+*`Y EYu`C^v{oޙs99)ιP"I @X. d$%000P\LŠѷFbU(tr y0j7'miR@F8sp(c4G}b([@ sLND㐣QvEbr(yܹ#< /BCfxQ!dh(sX,ŠZ=s#T,N5"( a̓Sƙ P͙9*7Yp8KƨreH٬qJ'*Uv_ K|QqeFLUv"FU@87D{/8ra\wD1k",IU_<U$9*AN.ˏU핣hfqãDP ȁpA1#ͰJHEB98#9Nf8O{$G%D2a[Y qRyb~#i'l6IB?fÌ` hC8|@ ād0䢕rPJzT`7gEpw# zK0ރaBz d C.򃂡H(JҠLH )EJ**]P5t:.C @a&4LGq,8΃ Bx-\W,|w/! j1E&F$"Kb Bjf t"' C00L&a``*01 [. 5cl6;[-c/``{q8gąqY5:\ ׍zx{/> E3GB!Ą2iMB/aI$z|:b3:8L"Y|Iq,rR9tVMMLKmHmZaKj]jd;2JVג[o)%BSR)(O)թuRJ4,e4k h54ٚ\%5iiQrhкէ׶kj>MET6G]IC@h4-VB;Dk h$ש9IGVt=~~y$$Փj'ݜA@7@W[[{GC/X/[o^} y;/ | xG †v1 w^3226 5m5:g4`L702d|ڸ߄jg"2drCb0a ]fff+̞̙[-L,Y,xhIdZ -XY~JZehgkͱ.~lCɳmefna۹ *"&'WM@v`9;8t9#W86:b1%eʆ)mS9;8qz¹ ϥ+5ukk7{7Ti[ݿzxzH=j==-<y{x˽zsoT=S}||;~i~?uslYYWNlobvKT\4,$3&d0=tahK6,"lC=ǩ {/?Axi)lO q@'jcԓh3p3gTx(-;'@uqm  K;$-N,JnJ$Mv~^}j[W+_)q*)+?\}Ǻqn߰T{㴍 7'O;izΩuI O+j <:ѹs8~!¥!ϵ\tWWz\m~_={^oucj7 u6;t܍{^}r~pѲO4=5|Zu=}7o_z SVb拞Ekͫ``kk5o{u(z?}sy__ʿ~m2WpQA}| !H3Qƾ F 'GZTFjEqvuU,e,.ʷF*U*+_>%olcc@ߏ )c3_z#?*eXIfMM*JR(iZXASCIIScreenshot,Ռ pHYs%%IR$tiTXtXML:com.adobe.xmp Screenshot 1384 964 1 2 @IDATx]E'=$^@: ("XwUuweׂe]u-®v^(. "C!=!9s͛w{~_Nr3s9Ν;Z@kZ h-Z@kZ h-Z@kZ h-Z@kZ h-Z@k#>Dg٠_y]p`y= mɒۦTZ-O9LePYJ#XWŧm;lSiӼpK<|J#XW%?m; ?47@# !KeЩ+ހU-nHn s#̹hj Zjm7d4sƲdUḞJǰZ_|ǣ [Z h-Zx601k~x{D4Z'(?ţ6X єI7 w7MW]:|S48MuiZu0HRJSYidZ[v"Mw0S)n@4S~qP A7HFrL ]?g? Z&6 achx@+w4|*ւZ h-ZZCǪ7ZQLH*dOiЦAFZ B. .*W,by)]|s~]^<_gx9}Js<_F'٢)^Nҥ/ϧi:}ZyaMs< L NNӓEh'&*E0+ͧ7/}:UFt]Sc^bS:5NJ8iT'qGRDHߔhrY)Tj O©QE'y,݈ ]"0t:|SMSxNLIFNl}I<sC'YpMӒRqp`&:'UobvnKz5R%˧t 9+0=Z'`}UyKUi:8)C WшvP,4҃hZ h-0a`\`*CҊso kB߄ 'KeuySq2x\_iʔ'-^uVl\y*rR/:n)ߏ7+ocn F*ocj8l;P?FĄ^"UN. ='k:!#/9*WI7W}UQ2VrMprA"*S Wi9vaIlɄu9ĂA$x4eCS6Z@k6b L0αx+8q+XpLtuvZKKXiI+UyI4HV tR8]^XiXi,+8rSiB./ʼnݺ OS.׭NuOҵi : 7 KqsNh𬢓,p9<ߏNeEKҼҊ('~cpS x M奲KnȶVU!ai^iutAk(!mJʂi^SMl~·tHӢ8No-X`ŋnN”Li)LuaS\OF)GWU”Vb`)+=k֬ooyxGfmvuûkʲOˀa2/< V%pv) ,xO)*鬊}.:y╗TEۉ,գGutq_^͞4C49dQ!b3:(P ;HzJǼ *ۅGryRr~)e/]vk۩оôEqWeo!ɰ[oxǝf5] %taҤI뒼QN4Ep8e/Y;V{uhM{h hD4m$]4 ⓧ0+LqJ+X߸C5&/OJ+Fi/]Nlj_ '3f׭sg|Zh ک=+3e:@K'-wl<wW'$e1)?sm H!/?<ѹDOz&9]Ro=kz8 F (Pʂp錝Xgczv5ѽ_ہf̛%|rtt1λ~d hmMe'ۻ,x#u$Pma\CyH6̸R6YhkHm)aOah;ek./)sBi^tI^ ~o(/{(/">,^HGm'o?N;iycCXҔNO4-FEHA]تaO|:!94_,t]~W}ٻ>)O;8;{5kzv&r90\*蘅0Y/3`K!}b)yㄙTi=K} ]O3TG',YA9Rtu݆1GV&J5-"ijV6ڶmЯGDwzmϖ]O_x7L2yOJ\3}ɾ٭G`c\^Q P m_:FUCۗ 0U9 S}H q,^G[vTkZ UKvrznȹ`]<+k-Hjh ZZ571 A&iR%Q>MUF> )ޓ!6@U)L ^Vr:*OJ GFbƕ]Ñ${Opbgzj‡ܱ4Co0qk֬z{e} QmfZz}}ڥZ`4hC!T0sq^8i& Ҭ֓XaÃI3ag”)Spksx:Fj -/Z]Pnr^8uLLmY>m1Ne ZU\r eqz*hqsLʱLc?)$`3.@U7;R&j8-ܴx=>N5/If1xM74lboO(n|N"D'Ấ qf`n\f|"Y&Рwl鶉ϧfB&񑣡gO Usϒ l Ms)|yamg6XF֑UP8 */\x0юy[N*<`i  ,PMMzbax(;"̄nQN9z`'TbѺ$h0H Kǣ=#lVi@jjV0y,{alrXNܹsHx;O7`X"tM;tgSm[&a-D4 I/B%7n }xM}e~nY"ZfjFxfΜwj쓑Ыԟe˖km\-lwö \3l̯6oya9۸üAEsƸʱjWJ:#477p(q$5*O͝lMjΟOW'Σ>N&<5[<^GV…wͬ1ºP 9oy (P8xI2҂Vli`I|SxPO0+N <:e2Yߥ@yEЊFUwqgˁYI NGEa2Á zkwpbF젛;j˖/ |Ke}ƞӲcbdΛ7ם+d<` S$5ў?N+*t+6kXuaܹ>JW6o^57[\ @ ݰ4( o;7!^F hWkln90X9xcX2m;lg mJ2;ENk,7ɩnM' JigL:uwF]K:ʅzM,)àP\HcʁaHwbSle`0&6ɜGxL.x=ҩd$[T<Ž&tM(hȖ޷llM83lYiEKpSL gL7iy99O۱L:t ^2^o & cekvCmCkf<߲eK}=k?E4+Wqo],ᔎ-R|/0D4hປB6. f9`]h7i}A8#&! LNbc`{<%v0Eww]|nP/}b2mPhc퓬n< @n{aDy-ַzyϰ Ƕm }8lc0^ {o N;?w㊹lJz\dK5gV`X*I9[n [ns.;o=\qEo~f͜7Ikl^=)aԇqτ;?!j ۘQU$Fvc~s0ŜUɞ%iaĖu>Ϲ !u(ֱBK85IN"ї]Mά.^&/XD"s\Ai4RZei\^ ? >)<ϧxS\ylG>ʞw1,r~jw27ϱc96<'nmD8qQG/zRA]rsݺ {3m|sw_o~3lkW^^CPϡ/`Mϟ}.o;/"7o3}#ΫBP s&Qh/1b1Y\INzGyȣ3VOu6(M\ePؽhS311c(-b n~._tN*[:>oup̱v!csy駝fu;/^:*?4sm@uvI.gsN+:Y, \_R:ѓ Zԛ,~gg(^UVvw1\w͵&#$/|qz9Mo|clEzы_&)FVWR:vQɮLI~1#>==U'B!"C:zЁ#mp0GxË1@{a]pc?s77,6LӶ, [p#sM{pqؘgس6Wno7?f4qxݲ`%IԩqȘb<}7C2 RV,/WމOAAM<te O8aiNNGb"~3a[;Ɨ@fpG+@П;V&٦z;,2O3ww@h/&Yƥk8n8X|8_ ifLӟ|RFv~Y,؅k w}WxǸ>baECާVw??b;3^(Al1d8/{|9S|x@Mہ^\#ecv4Q']`@CHڜvִsluYgy2v'ZTA"?2_B fIwwkxӞ}<0WvS})O~?Ķnؑ?viG?SF<3P/*w9|1Xo◾e]滞k]ޗqpԯw{g4x1|C7 Nŝ<Ĺao~s8Yb'3 ?lm;n3̰^{9 w| s\.[-wKvN;#N¸|s: {Ɇy{y+_+iJt·#7w'>a7!_G_{^__q!g؍X:(E<.lAĆbE,<9X.[f ix (/`Z G[oC |ljԇ;lsvο׉@c't/ yfCW#a"ڠN+e(ElNˢ #Gom^c^ġbO]Fv c>`UEe_u]TQ"td-g0ikmCm^{z_KB7&KDÂAy(w =<. ^x=aa]W۹1^pcEhzKbOlHuiiDXbx~qh$mxYK]%$sq#N 6xq9FcIC?`QG.bOL [oLH蓟d/~ / (YLpXXycs9y\!'?d~ϰ#;f}\ zs9*ۭe bSnΗn6_J= E*/xze4(8-*=+S9|~=5üYĥ'c1Xq:(letG*SЙ!Xe27a]-`T(ql7?x98 x17p6^78{(gc Eݢ1@>0yV߉77j{85 e=}IKX̥ㇱtàmo,Nq`w:ksڎvH9Sn+~+~,/Lgʙ*Q6i+ՍN(sԸ1%`@OF=f{嗻k-dsQPB(›Po:2 S.,,ZJ%֙3rz%zM cp0ɣHh $!zv+e?0[]koyYL'|rq{Bon}K 8XL/:Y@3v(~"O`'3[,:Y_ wG# ԁj6sεs}-ylғ)k-U T&p&o$"I%vQ]w"K,,w?5{uY[1j،>EB9Y,d| s$h w3qqƇ>!āv/?uǙ*_ܩC<;=8LW2ۏ;a<K}~nqG=*|ӟ5;Ns _z-^w;w󖚳U>fq㣞؄^CL/ (y i6N@>q6e:K¯/]6v,rz2O7.o*v~} rt%LXai#zΰvy:pAzի &`hO:KVl -)xJ?U[?VQߴD)'/kg DNx2;1E*[`Q-sө;0s63n$ý%% ?:Nu';^GgK{S闿w2qzw{ܡIH-;P\ogrΝ "u˃4`Ӡ5dSOLJSh\Qi]?_6oqg\8[`/n/~׳q +ѩ"Ti<1vY`;8(f;J!3oΈF_r| _HԒƂ,\0%[ueQ`;wn1;8VȜi뮅8;i&7p$Y|%8䑿Չ;T0`YO8I`,j}K0{[Ҫgȱ &ƨhgZ[me-Ri.ۻZ%TZS_Yw(jpRJcc6aͲ5]”e ]{􌍷a{cBǑya81ɷEq̜#ejM62Xx2e\kG4p#4˜4<ܹ\e0cQd>;g3.;"\z=|{ok?!Ɯ N>NKq(7Y:Qƴ1--Z"'slevx{Eڈd.p٥hͩ}"DٱUy= T?q}j/x0XSX"[d|0o7] \!O{etS\b$ю<2$`ܜ|p!|:?J7?eLG(3sYy`Q)H bK\U< )] oIzJ !`)SHrTF^?>wZev4tb+4|8YCwX] {@`BQ%ܘz->_c[ޡ+(Gg:)V1Э$(? NtvMi$ %f#}m&,:yMW-ζhB|Ⱦإ/j/∋^^C}Є3Hx@Gߤ\{u'+)gV(X?j&"&ByfLq<IJҊя ~ZK9/uM6MT&.ջߓ{߿)M{αM pܩm$c9X* qԀz|0V*P_PyjWZʧVb{si" ]uNDcQE"ĉϔYMBc\QxMLrm(ۆZſs zЃ-xboq"纕:v"N/OB Q~i>%E>tIRa_v1!{t6zh0`~1p9.fS006y OIlp`@mP,pۢ^v*d3eԅ)p6BrA粽Q;A'v#'Z^H32:tE E]6A3v(yƠ꫶z=div|\{61L;>nt'ta6z9lJnĺyzu`}}p fٙ4uG:xY#YOY$܌uԍl-%.qV~} 8`w^QyܝKioܹvLvl\PO`0F$VĤ7w%MW* nyy霖2iR#L0Iі;XKM Rre o& 8 R.ZLy>ƺ~dhUu͵CcEvno 7c;^`ꞎ:lḻa c$Py2'"'?~yvnԂ#`]y6wwJ u+:BQ"I&1G7i˥[GJ%j "BQMdQ(ͣO9hMg8 [, VE!ɯ̙xJeDWzOY%!" |+S*Ki;V onp3QcFsiɘDtezDt(O&i ;к2ӱ˭]m6!ʩasǭnt5=l8juݮv3J<&;^ѐtaNS.&3"MJUdWIv!g)6)Jn-c=enԱR׸뀝N/9Dm9-鲴>QK 沤>i-#`dGT䩢seYRXوϳP[buOG+`7÷Uvrzf|p\&zI2#U%Ur JhQ%+VYA"_ҡ0p:QE=t&Awy 8ҘQ;/qo /ԟjtD{;QsseCOAWp KERx \TˣH9[Շ: mDn;1K.@jKծЈi{~W6hl>ZѪn@u.nx€291{}XƓQc`yzb*/x5"ߨIN6(X:u)Ktk.e4N.)M6HV3)\W. >yZx|Wl lM:Ɔo Ej LEgbrn3LKs+$k*VQ> ȈԤ>KD/stKkB_s>,YdtҒmeWň$)i8cKdGr IRkN <Jl6QH
    LvCʥ^ ^W: Ё^CSkd*UcM_~γ)S&OwnZ h-඀-8˼iMp\";Z20.G.4 )婌AmGL)YG:_IajO Mt lKX?C1xaK: .~W|Z h-Z`[=RmI)O??^禧Nmѹ5>_S_ `,U縂\9Xb"< xZ| |x%0DUâ ,MZ b!n}ou-p}#:փx47uҴp-sL!娝 ZeS$)l ^ӧLlsH8~'3fO M* \Eb>2ʃڪL0, ` b^UѤeiZU(\` Dyojwd6n-Z8sŋ 7vm0cƌ[o5wüyqUz]p--7V^jYt>ZE+ʵ_`MuG}6yzE |/D*@sW4y/~VJӴ2*Kai 'Sp~Wl']PZ h-Z`%onى7o^cyj0 p7f!c餭ԉšI>FVipV[ b()v,{%ĺ,x4eU Oe5g٦V7U 1eF x N#=i~dM JZ <,zM7,_,-y[~+vAҌt/@犥K}S\`i>MeA4yyBUϛLXK'xjoJ Ky9<8C4G) hpJXSJIRR#EY nf[f9YaVPL? 1 mZ碪U*7L7]ͫR X;&J&Hy;p;ɠI`r`ۼ\Q+4(/˝lCkm 0Ƴu[ZbϥqH QowoIM3/?K QHV٢Uw+oSv}=> Kaiz4zt*^5JqUE`OcMiIy٨ $f\`2 -*~9 x>}.|͞y?)Yk&{618i+W}kxE Ç?pXO=T*}χ>G:K_p)NR";/Kq(۸@koͱy.5%o zի‹_/}),[s熟'^ sn#\sgG8aRwp6wp8~މa'?v|Sk~M]I}>F48SGiNCz=غpDžvzR)4([۳YWd6ŃVSXʻ'J=;I0%`XnN4*w3X xUrR8i ZxeL>qp=EL෸/) o{XpYRdpwEᤓN ᄏ_oy}tRxt+_ ￿ R: ]7eye#ͷh-0,JAU4ƼQGG?w~u_.M|c79>YCcofMvai3A;L!9Pqo-S~ٟUwDٲ5KuKn\pӲ+JqnԵUk]nRõz*û*ܺzvec3v_CvzT8|#Å~>o.RkbmgZ j:pR4]G?H&cmᑏ|d9ЮpWSvvn馀sW@~#j+梋.=#ホs衇zwG<ةKCgؽ:êWtNw3 Ò%K[o}Be]JPWZ>`s O{Ӽ^"t0sL qF])"#ĹkJya7:`/IяzT9e%6ZbMO|>̚5+N׿<9|c?r ёƿ/uw9?sج32>ϛ7:[nſc|?g~Gv˰e'[~Ӆamd5_'+:q_t n;۝]6=d߆3fۗ~v}o m;ߜ| Y709pXn?doN}>v-?p?Ù4ar5l⧾@Gp)4=<{zPV#*QZq?Qiɏ7}_ O|y<H~~g ‰z^Wso}(b?^s oܩz[y.O};  o|_~3wQ@ /Kw _iӦ9vpN8l-!,Z]W^wl_|;ulv~8|gq;8o˻ÿǓn?'ѫ*G?Q :b@kqg>%/ G? OxrhKEt܅SO}cn؝gCG0nĩb'}xGx{4ƙ;RpsC8岏%+.aE /0gmK}M8f秄Ӷ o|;cw?:1K{m1u{!t9LJ0kYa&[^65\lǣ^-Z-Wc1Q>Ղ*x4ݏOU,*k*㱏}lx _~l1pxFϝ ;;_ ^:Q'?ᎋ~O.w}nfm184tҰf~׻߇m(.0 5b|տ?mye8 cOk_Z+=#c;0bs9'??Ozԟ9s攎Ud2r9Okr$1WpC\7w.hk!bGK.s|>u<srOowa'LG>w89:pyc=үufam /&|opnGyVYtaM؏IoNYai}g-wݏ ngmN[MZN:S}|\ן>pml}%S3On֏Ì3{nw~>u*M ZW+_T+U9XIJ5W~:MKrrbvw;.h*guuFꉃ{7pC;ݎA#0qjqFٴbƭZ G bgLo|}dk\j5;A=Kg]w3O84_}|rG/ٝ+Wvxz 3;s:#Cȹ{VtB9}vՔpՉ |rop͢+f¥. v挽燻%d{. ]᜛NaV6p_<VYmXcNo4# 5᏷^.OsY'MzQǨWi+ի7}**^2Hb")\i-u*}ŷQB$Щuj[}džG|<6c'GMp;0lsN`?g8XVw>ǎꎣD̛7իV.7m$33.^}Gx/7Ͽp없S O~UyJk~ᛗ~1п o=_{|wׅ_t͒}~R8jc?~'ܶ+^v9I/{?}+L4w8;Y-{YWm^|k\p-3i㧄|rAy֯Ui%\Ĕ fqWZy.J14&=dofv{0mt;복{MmۂӁa,;Zrp (gzxdwT!Qc8٣78Ugo!NNw_Sv]y`p9ԇ4p؇a,Sj ii[GDX~[k֭[OioJsgV]X|aL6a%aYޛ@uTgͣf=ͳѲ-e˱~ Bd̬fJȟtd%E)ϐLdccl}Uk];w֙:\OGzݫz`|mJ{/L羋ÿ.zfϦyh}? #SmgD)>(9Y\y !j\1 A]k}aW pk2B\x ߢuqAhB6?kihY+?X!Z6+˅,c!u`.K&[n*[)PgYIod%%ɥ>1j(o ס+no0,aO\m'z\Y쐺v[oy[H;>ru;ME@ ʥkn)u@bèMp ݉ȓCh~  W_[<`] Ϥ\nHIN*Z( uұ¯xX||PA`I !1_t4c$ WI-. ϗ)E@Pq>/eۍ} #0ʿr} 7(c1>g>*Nn(*2r )9@"YX~N=_P E@P1Gs<˅"kz׸ܗyS9#ωy_׉4i.-2Fqgt#uz rqB4ev2c,ELAB.uEFn6,·gޤ8Dg҂"(|NPeA#op|Apn61p}o¡{c:9sޕϖC:|lZ]*_'qfV3MkM4ocz_@9Jږ}LʥF9zB@p\V"0>D[h͊RE@`Jp%R|cA87"3Bzyƅ|{\$ZezPHK>;4%)m}P%$FRqour:?GE0(ґHE@PnٜYi4s:W3cڃ϶D;parj|m9p˞LE@PE`"rPE/ו^չ|kK5Al2aejE@PE@<%Jõ9,QZ@iu>_JWE@PE@=?U/<" R %XN*N"K*!Kl5UE@Pq\e\r%{ꓡ>In&q['" ­ 5+"(q?N}'ȓp}'Cg| I&6T@_& |VE@PE`?I)zvzC\) %Xi ٍn vG}>#pګχE@PE:@ 1U >$/dIlC V "Il,&+>bh]rsmLΚ4q+Sl\hTFY(llδg, qQ b'*vbct-h}h#|~#fXأoH R).z},y}P]4YTb/5"[+.%Cf n\KڰSTz%+MOGWכ\Z!ey_TRVc;LbiBQ)qF)'jCJjr~9I>٩/ꌭqZ$6,=S76Q{FJ>; "є fčDi cڑzVz;&-J#}#maNTlg#?EH"J'c۝=X#2EUj[uruA4Ä4|d*M?`&)dz{]?sz;DnKڶ[RQsːD_I3yK9~,R米D޾~#E,yi'.EH8^ TR>D&il5`6NBܦ(JFRo>^RWOqQ+Rm߬#r#2HIRӄ)s*ckX8G4%ilՉ 5U/z4G/_xCŵPg=g<QsMGڷ][)#ۍR{IFj֏uv=mźp&2{O 㒼ԉ"(nѵ}y;ں2|Yj.v{I6q[_vٶ͚JNf G`΋Iٛ2M3y5֛<{T"("06^`f$KNtBl$}'m6KBK F>K'\a~Rl&,\d("(AƓ^$<6\N V *} 1٪իWLBCE*!"("::Lx/*Rr[q/~$M %XD"/eiy'u,I.JꓞL/ Ȑ-ME@PE g067?N`sp|vi\l|2/D(T"z7ou]Q9b0("(:1R<XW)Ϸ|%PUbX&j 6~zI\/"B="(")]׺ҞPۂ_?+*UE@PٳDXi?lΑ$9 ~b(J=$V*+i=S+ @E@PE`t1a`Gg%XY:!n's󢟖.X@E@PE@ V%ck#eI΋Lԉ\"t@r_]<.уЎ0bQACޮKё2tlkcFſBGdVǸ⩠E@Pdq݁DHl/I:\ Ш}e.-/C'Gptn:^noۀ>$ՉMa'2 Tk("p#!$ɥMF򒺺R6Ó4.V %X 8Klj[&,Y :FeM|Җq# W677Q"_W_OW.3n7U /R(11uц~-,J]#~LqеAjnHOwD v 6ZRE@yptnd@I 7:ѡP֩Y` r Fp|~P'}NAp7'xnJMMM2 Д)SWW aYJ#YBpCooЬY7H@IDATyQHEHjg:C=/݇'J=Ef@oqg}'%Fɘ"e8m}Zl.×?!W!$]Ofۤak'ͦ.`e, NV~L9M&^0[1c 9Ro}[O}O,bmb4ã^bYpǔx5 籄,.p t\ơ%vj[_qx]Y[^ 4[|Oos!2)K灁#z񁲝~ YE}yd5Ƌ&M+V)۩>я'?I?$ n/6^ڵB]~}ŋ0D~B=aQąwRCtVmmts4w :si-Y~Eb3}&PWY- tvCf,.FC9&NNݣQcdֽ>i>z櫧3yM SSE@P|;$d_qg43sȑ#4|S> a<Ƙn:{,͝;|nfٮW^}b\[M_q7!~\v͍Ysnj|z`|jbH#[5skj;]桟d$%> x?>'> ڳgyC1`$ŋ/__ѿPwOY~?5~fo˗/g?Yz^Gy{]zm߾ݴx57ʗ%@M-;w#Ȍ֩C?{Anl㇙ mcDŽ ]t@6:쾰˅C197:}Wo ^2型GiO/gO:g hW?xr9-^^ZthSoY[f,~=-&g?2>%fS?"/'O0'y/gΜ1(Kŋh|V?]p|#=zɂ. ,+Gyg=tI'N4})3f 6)['8>S c9a1~]hb{3=nX0Xav>GS: k?|;f?|%:yj`B@O\Җ]f"Mjo{VϤvҹKfV mTz1΅RG>;ۯ/ xa^\.^o| y:ַ.]D7˿ǎ]wE_ ڷo}Ao ۱cY7I7nҼy̛7|#~DmmmF6?Xḃ~&6+?x&kg򷱫}Y6D 3R-hәOФ+ ѧ+7o<<5y5Uy›pa O[;<A:hs]3 O}O%KPp_~|6}-=SHxٲeٳgӱchDa.͋/hƐCѪU._=J===K;׬Ycbg>V2@I>ᮮ.[_zY /+W)X%A7o6D+'ȟ}?HHKfuޣ ez^l_wxqi+/N_U<5y۝3&0#:x]2@Ub.{B>e"h1:vvy1*N~EE9roP϶DJZi}6GPGgJ:%mB~7^АMk^ć dl̙4gGw;>/~}lBw??o71oۼ,X`dF>zg|2& BymA4wu׳Nr{.좆晦ѧhE˥S?ٮ󛨟]|'^k0߾NZ2y~[Ȉ q>~fraVS$rT b%ªFQE3d /Ç~] J//m۶u[nŐ)|y-ZD37xuA lSg=.@d͘/{ӻhڴZgҥK#iƊKФf:3Qm?F{םl}d"ާ>I">˾=F˼^'x=KsVmyehi蕟Lp~ly?cƉ\A}n6bkz)"P+_f1rµh"*\H9,2<>e A; i:u}B; EЁ>o+|m1`.Cɤ˗/7ma,Cl&L0Kدgt˂.4L>AN_KgP> r7!T4nfkzl\ojnqkZcCLD 2y<1)kkm2.Rg;1?!Hj*rZ(`!NՕk?^-Гu2xA$FT& )kx ff7e|SBo>.~#?n`eiބ(÷M 1bӾa{/Qټ)09뜲cSNi ˃OWPGff O>٬WfV KuXY%,%_A_%2a.8`f0Ӆ1}\OВٓ+4gD^ôIx+O͉JD?K"*;"|T ի͋/0\x1?x3`}׽K[EwMoݻw?A&÷#7O~WʬYD oRLSxFnʳS5Ѵ90cL^gHMkt2{^sukɳ0VPF>a-yyo!n333ul׷c Zs<[~2vg^b7uN]ot$ dk[v5(X!!f7 3C z̾,k cLf0n:yi&Cvig?ڵk=cÆ tEC0~̙] Zxra+-",IY2HeM6n6lYX %7Ǚ/ ?fՀ!@vZx65P8>睿~ڶ }| O䫉g~egYRZx:MJ?_tД -WYҟ'ܥ: ;, %X:J=DrU"B R ɓA-%45, EϚ_(~]-ׄۯ_M旇 zx͇l1..l?u=<|c^[jf# +M0c?既=.,4=w7kWs05xmOn_ő^Ʋwep/!t K|n[J[a#U(Bvh-ۍ%j|x`~̋~1MbSB~_gDž7 l1+o֖VzIo~˛ͯi~F3/lq٧Aghy%3ZاUUx2G9,ں7 9 bC}TIh(("1 2+P/2 q@>#Qxc̆>nv{&?ƷUXf ryxH >@f߷=賌?|+-,  .I^5R dR [2OB 3b,!ꡜMKOI|I/!tg1Ӯhez+b77" 6 Û!ˎiO'{t4[b6}J5`XCS;˵Z',2$-E@kq@b)'zIa'y5QllRN+e_jŒb 6EuQlYv)|&w &mT5mbi(ASӑ=\DN]xv9- }y(!qމr=lVǺ|Cu|y:)\MGʐI Y V =. iڼW."^^SlS;9ŒĢ*"'xl3c;'`f糄 AJPo JpX%6@oe0v9-(`E"w6*}6>k[(y6K1w<*!>S%ᨴ("gx e.wpnlеˮ[΢ښr(*   &:̫"67%;\idFft~ {d/7_]`VYE@UDXQcj("(5G' r W%SbWI(*1Ҡ%JB?&I.Ks~ME@PE`I%@*ʤ>r$)M<V2MO۸>NgʠՊ"(uO`GtM914-Ar2$rZ("(c;vFaAs\fEYRH`I!,2Sz%yKiOtm=ağ@"Z7C"(cAhĉ0Ɛ4G1;`b86bGK 9cn%xUCH$(PCMEFˉL1w$ܣ~iٽH+"p}#PGgϜ9m$6ˇ%MK>}2.8.J|Ap>y ZH~C@G|AVs4GKaN!đM_(m-DA55a@ @V8!D< uX-yJPq|28qkk ͛76D|Wq0aM:PO/\g̙ÐJT3t}! 'OٳG&.'~~ZpM8}XI>]+Fy,?腐@˵fE@aA04:h䖡욍8(`#vg])3ivOӱeh.R;& .!]^$CGqFV]W^mH5&]3f B Bp9ڹyZd)3\j/bJ6mЎ/Б#GwCJ<ӑ\td֫,qa:v-X^$5K_m۶>D ݽn9400@O>sC:;;鉟=A7qvzI|"./᭷@ċ;_xNC<\Ƴ'{y*dzBO /P'/]8,'&] m<TJ7Z~:uzf+D \>Ӟcr-|ѣG^nܺz/ E{*cO˲%&^l63}A-dĎ%N&~l5%p͋y6{9yҥmwyC&ljI~C|`&g6jnYK<8Ν?O 73_C("0N/'`VBI>1ЯyvAI`&Txi,b TtY:e!*=+ͤꕫtqZx ?w|IԹs@ :c2ttDd8a"NOq,asԩS.ZDS'2&`h6ϝ;,H2dџ'|%mxUQ(|$-2y =fj:yG`ҥ>Az%BB,@B0ȱp}f?VW 5B^}z^cw%aƭϜ7>2.#.s/%&o yrQtM]zr0cf8 F,bv%:DG,L%X,ٳk7o i^5k?b*[X >_l9/k~30p4o$tXSE@Pş{y|b3N k+͆`!S^,)i.LE=Cxfidr,a+=f 6ˈ8c K 3.H #l[gn03kL0ai oy/g͞AvIlM$ 9fΛ$hc)~9R X= U˜ׅuA8,Q.˺m漮sLNy'/N__if1s5AP0m~dpI_ Rk[a)p'G=y\3m"(2VB8PT3-@P$">5 R)?~ن NlZx1MSX {ߕ4vF3_b^Bvl2@Yb^Ri }.,a@?@!x0*=6!y9vAIR}3IZq 8~<*{dR8wyIQ(8A?cH|>D62`t-Om(B IWZm#zvvu:߷"-Ts ޻Ł!o#ŕLPqxVOSoo|C NtRzJLk(9 $_%6ybc?_nϗE@O8A=mJ ]O]'vLAf\B: —Ŀ%^.mNYM|@ YM|~Jr/V7aʐLj41F tb]VE@E #1"VF'w wSu4UyH|: y1 WD ql?{qJ^Z8|ӘQE@ &=ȾT5p9X C[[Nli6Scv}R^qm$屝*S~s͏2Pq\1F5[ϏE@PJ}G_+dn9đO](JtT ⑻xEܶ'v ?c}~9?$ąFĬ䵖#|o$T"(" _%!u.U#Dd %X 3l(H9 _S;vq^HիUbku׮] N>q ʥKV6_KjHZ("p OQ+Ub "Vz(QN} " [x1\Lx7;‡^}3f0.QDtVPc$;BiHhbq %ۊa0x 9<N9lN~Ez@޽{z衇LSy#qk("p!ɽҎ8GIJhD5XIjo%'MmMO8afq.3##f2A\O6K ֭ -G~7o77bF[ٳM{J2?}j(Mpy\ %XA:VpzYy.mDl| ʣ$Opxn~pavh摃\L2<\6k,C/E@PE Mi}-K'l[/InP9Xvg8j@ԶP9t#V/3@K9\BVJ>Uq_ҞyW?"(7_{-KʋmuAr`2r1 &\Btp۵^^h#XՏ"(A a;޻ !v^fl?!OAJvmxv$K"("p#speq6`[%BIno48iQPE@P2"(LFPĿ P,CظqW {jTEF Zrx"Pƍ'P$cq9 ~\E\"R/#3eE@P _}QV X#v[ҡ"&zc&hٳvA׮ ֲI("p `!FeI̧0ȩ@i@Ժ́T޽;fNsM,XO- }ڴi}Cɓ'tFϮTE@ 6w ^(B#v>CImD_{wm'L\F;:D[n5dFjܧ>)lI(8(SP̸E&q^(| EvY&aSs08 2NNDFHJٳ6nHg.R //OҺu];Ns{Ģ."(("D%c 4tNR,4.eICGvrFB,+իfy $kǎfm6f79LN>M^Zj͛7ϐ^~@6oln/|.4l $sT("p=#D⸟_؜ΧW-{Cg$H2B7ۗ/㦸 ,;׎] pCedaь3̲xIdq&wy'-]onʍ<;Esɒ%63f RE@P!cʙW5l8B VZGА/r6qe2>eLj[@MMʹl2jmm[nlme B0%^҈#켣VR5$!^gn r!fsԢ4]!},ìV_%6i$S?"(@ [JdRb z>k2QF!|U/ɤopSG8؇bŊ"cEŋÖ#O?7B-#h^PEF  n90D<`ClKd,_p%>%:l~JF[r $F=qᒲ79\ E@P;0Pq:|"+*pdW"*1ړc*q E@P.9@[ƞVrmջ%PUuCNU+j洨("( 2y,f _%iV+a̓aW"("\G' i%T=Xp5WΧUĶ&)~-7A@ؤ{LS X9[/`I ɤc^5&1~E(gիWi̙!K.{˗/ӧ5yMOHx! O"(GT؏ N}2G%dJ|$IZNTrvLn]J|We#3twƾmf[Xi;[۳geĉ˫=6[L)KPE@HC3so:Uj .^`bF4E"Yr 8q͛g HuT >f<%L}c0o\G?Շ"(@;e;ei7Q?m _\DMs6V($jf3g{f+\2TmԩS~KK,1$>"`WEBR v^Pɤ\*mK`I@U;OhU'ݘdQ0 T>w\C,Dw{Y T2'eKɓ'-oyKL,ϟO&M7gYuE@Pn>VVdy%m>3#X-g#p_\6LLH w̯B_nQ=E@P0ÒQK8@`}v=O&!Kۃ%FHPRΗ}!boڋ@1n3@9a&3wQ( >1S_ ~O|K/6 e[ E@Pn>1>3xtGD`-8=ðؾ|V:KW[lIX+箻i\ Y4|Ɯ~yP͓x4UE@PRpI b֋<$4{_J,q`+*W $l@2h4G̊=Cի_:_EaM E@PŠDXw8N_OY!$RVU, *iGmD@͛ݜbBWٌ+"L$)!P]T' . EbחT4@#dJb84("\񸑅/Rn'C}ܶ GpO.A PӐ_""B*>4GSE@P#HiIu3\ 2\P@IDAT,ijCVQmް(8z24E@P31$iO=0H8IHI:Ir5i(  ĶGYf]vR1S2/|1;.VؤZϹƤ("CI<*dIV:6`$(zľ 8/>ЖxvNE:HOR\B/E@PCI;4I.|6>ؗ #8Dnw\rz",i^ &}W8ٳ& ͘N:eE iڴit9vZ|g[|)q|rƕ+W*rF"(7<^blW.5r9-2m,`zD$赊+4XOxޅ n:4٬}fp^lX c̯_~e 7nh`SSS.QU>eo6+<E@Pf?2iύn( rm/۟1֑6v38:Wjڲ*2PE@Pf<lHǩ*-f݃U0͖Q22$V@uqi \'O6{ { :B\g GNkKEm\Xg̘aHWX TҿC3Xճ (t]PE@N1ǔ. ֒UY( VCv__]/ӧeM@ Yj˘W3ge<썲*B/}^_~k_3YiNPE`l23DHYRە\!r[|(c7 AO%zdhGDO|fΜiX+i_ӧbmQUyUo !"("<r)#lˀmur6"/J =3Q%6GxR![2S `yڸqs-J{*"(IDc{؟$OrgsKdFZ]/YBA9؉ x6>Eo\6Y!8GZw˵jW*"\ȭrdG #$/ikO/+%: NjL\DNGC.uE@PE:Ex3Gc""LaնjnJ5U?۷E@PE` +lYS֖#V x2/i=~Q4mI*"(xD aڡOfۤ嫱5~C VZ|@ :V/zƺn9qW:^7tq~E@P 㽤5h"?A|@d!*c#fc&@rr0A{{{ ©jG*"XT2WCJ+B3>#̮ww%E>'Aک(k}@.pS+VI}EΫ0vqK/DOą3P9X?o7};] `TPE`\#MfJ8@`>;eZi,頤S\.op;`ץ@jWJA~F̈́SO8A;w4͎vyCpWn˜7R#~.H,@3"(8V9N >>"2߈Y" %X%n%PX?Vq W &ڵkOn,za3f0sf;ÔqۜЉ=~n̙377裏7\˻T E@P 9vh%XA20o]U , neZC\)/6z a!|UE@PJ_}Č-+ I(|M+a^sBa5i$_|0;=O 1R+Oi{0 /=b ya_jiiɳKKPEG@xJ .~F3JvB:_v>ķ >x`4t@,vݛ'lt2$1Y"m`o?o aeeF("(@ǭ</DnV|:y\;!vIԓiD bm޼Ds1u̢lqF8p4:w%q5U("DK uUl;Pe0ON.FEi(3 TeK%xH d8tP<3}v3[ 2K=~8a;{9Zp!XeGlÇӴc9s&-]ONsͣ ("p#P_I0IOfۣ$G(u (ꡜM]N2i0mۇyDFSc9a& W6 ā3,YbٺѣG駟xzGh˖-`Nm"(P*{ %nշ%Ipn:qYN>=XLJlñȃP 6H͌3_J++K[6m"[33LhOQci#0CC+藎~-*"( P_쇯]#>iiuG%`AT HulR&φ ]S4at`y0ɤ%JUdRE@ xLWyb#P㓗 Xv@@/hrdALuZľ&,Mz`5PěX"(;)_I wfJ/X/-+u$'S ba>P3E ^'N|Ͽ'O)\Lַ3'ŋ校rzYYm|KqD0Qc{;M:dR"(@2p0ĵ\"tO5$ E>V: 0y7/]gΝ35`Ү]tʕt ?.-b^޽͙s]x/|pĆD\&Y d{@/E@PE &XEc~GCdfIrWl9t%Kp,Az BΧ#n [/K苔r,` @nY̐&qX@,-  RU>.my[Q-LJ|.@z /Л?ߓɨIjY劀"(7eЁ$.\RFZ]/tˌU!#A8EIpFďG%o13<0@C3Kb Z{|Se,z^mkK n^4O;H +lǸ^VvCߍB!ѿ"(@Ih$Or >m6tb42 9%d'AF55A1@Ȇ^_zfϘIs .,T֨v(j  {z$~.\HM<?L-ٻo_HY'fꙌ*t5("0pTMTҹe `E&}uDK[:| sZR+0fq9Ě >6gO=M ,'zʜyeo?LWYU,X.gV <~2A$#\&BVE@$4zwvgbH8l_^qmTt|L0SqO|gGw(,É 3VndHX;52"NzvUJ`X 2/K-2rza*" d44(2 %Xe#QrYjb_L 7 _^,~_|ӧիxP("Pkx,)w+!\bD, WE[>TՅ"(??覑E0.2YYy>h%A$,T:VBBc+8糩x WKВ90|K,,IIӑv֖)"\d%nZUaeмYmXj:"tE򢦱,/\9/Qu\#pr#Ըx p +1KPE@d\Tvuy >" E/(JI=;X;oy؊}QL# L;JjU4.mP Ll׬-6|RgCK 3`R>X5׉k w}сh.IpwV 0{9'N0LNsFr]>;E@P+\`0ܫp4.t16mu/D^z%3d_nE;Ii:ÛOP"(1<ɽC0&.7qaٸ:PPZN%*ÜK&aVj4{"Ҁ!0n: ~Y*5>Zpi[ɓ'iEsC6w%r̤oGGRM*6"(76j7IN>';zjh_F1W3~ !. q ]vQ+̰!0sӧ?iWBC`s1-C.'/znL2KdX 9 D1$^.&f֯_OO>Wb-f~7ӄ}v7o^͖#*"$4v>mzn9<@0l|I>c;aRRW_5!e舞<DB@1ذ~%V&†ߥ|3/a_Z›&՟"(" Afb#u%i(*1LPq.G]<),bf Ku#΄ jBzz{h ߏC\կ~< |nlE@PF Z8AiK6A DJm]}~ rAZEu {g+u:vIsrCyJ HHG$^" BB!'!^$$&8S<=|>]};q9_tE9e]y?., ___ebַz_>+`@ aV3yƨdi;ĀE6ƹ[ o߶̀ue8b{N6Z9@ p|}"1`Ӫ2 3h5<(Δl6gs&9\u"O C@9 r?k6Auw^~@ߗdnR>[N]_mDks|?X@ @ Vi/se[--宧=LS @ &Ҁ7o}9j1Y:`5gK~[_}ϓ-| @ ]M➿<1@'KU'?XG(|#YˏHrpb#|h @ @ ACmF@gW϶ʳ7I ت6o5kz-KmZ닺}9?>t̙7r@ mH[,ޔе>f4g!̕>u(F42Nmd^}{_tm|'W@ @_-k{@`u5lO@IV 8d-~k˩v/p*4\S+O=yhK믿>ߡ_x< |޺;X v /|KߟOoC=믿8uw]wu7|sop\F@ Ç@jOݼZ3,fY{\-'縴iW^}O?IY?f1nc&TNC(=ug.~'^[M~äw6܁@ o6 Tk 6PYY]İres?ȝ#>i jذ/8gQ+"=bQb3mFEQ#@0! -5jXZ)[OOM[o03}0Ppkg?9Tj@ۗY:'lb@ C} S7)qOiXIFmoIO ,;8s륗^o?T?W\q>`Q;.Kx@  CǏVS31`q}7n|ᤡ;UX:}qW_؜GsN6A*JOW_}3-Kx@ Cid^Pq&Mht<_=3yqa__r^ |e~_r@w1wQ@ 8tj˓rCr6[rՀ@?6WKOzl;d{ .wj  ߗixC QAĉ/?J\zsm,| @ \d̆}kZ9ֿ",5Is%}/:)1[Chz=/cX[n)귩Ԑ"[~11XO7;p|gלKC_h^W`@ @+3/}vDXKm.j[FLMKgWk[;Fd D }D_4ؙ})}\od1ɀFl|HnNB>}Rm0w5ٶџ`]D@ #!?}t2s6 RM42@ 3~4;t6__8 {"WѦr6Y_ׄԗ >@ &~F,jglK:l5⠪au3XnI`gՕOw6l0=q ԃ-@ I~EhˬR:QJVӕRat:Z1zrMɷIA3 8~2ɷ4X)jU_JU_gU`@ ʫav𻜗[TJVչ@ë!⏏K: ZCړ5zjY9_.~w]vYcl_OmA7EZ+X-՚U ꪫ wt(=yO裏f_ 8rf^{mD~u]tQVS9<@wׯzTyo}[ݫ Rw|:@ ~e5,vP\}w(ߩ> ]س&Ѕ{l5P|9-ɓ'TWEAۅH{ڿ}Sww;{oqjڈ @ x 33hnnl;XM\~9,Š%'%y甁Fd~뮻.Z|v u՗ް_W`ŝ.- 78\s5y>-V @ 0@G1/ڒ]y%iUJ6Ԝ+zsJb|>[Q an:@ stz׻u1W>=yG? OTg}~d J[fc@ gUVLX1sMsdO/jCٷR[/&r4cE ug{`/~1?_ reȹ[W/3}pM/bctM#gy&\7`}os{C+@ B iyIdy!Y9:`$US'#*.mު[?"8W HE]wݕq׆Agx৻lW[bb_߮4`Ԉhݐ]>UAA@ @X"Pδ34$bJ:>-UY~Ȏk=W9mqs+tQm^lR_LA@ hv`O*;_ qC^)4`)B Ӹo,&ٜgװ24 n/ +ݡCxnd"_ QA߹ȵyC_)nEK*)1$s)_VS4Lm*/շBV}yB8N:ٽk-}W_~9[-ît0 @@ V j;o2;<}`rMi`O¹꫻|#9Az駻￿}o8ֆ*#˫O.o}{|K/uzV#Vl|[;ja@ 8hi=}loH_tioF*iH@N>J/=O.z{wjc7?#NC϶שSYG<`S5'u]}??s饗S9kq:@ E`gHk8MY_/(Kt!-^}]k6+O>dǝ*-M?pǝ;/(Y hdI|;7/u/Uy!j|46`O:;?|t7@ @;3X1J` aR\Mj+nE5aaG~8a֍7ؽkh){sx%|wv7|jԆU{7A?菺??Xa~ȱ@ W{+~DHP֔u |!M:hKW~3`Wc3<=ݯʯ"[2>6@  !]{,0?sHGKyߔ#B %Ӕr[?3:]Q P~gW^yN"}Ct7'_ 9xǘ<+#]8@ {^^ogdCY?mdo5TĞs-}e-Ta6N'zVy> }>{{ߟuRY)vK}18uҗ=ܓ=xK.碻M_P'C \ԉ@ ㍀pyEK'ߺW!_>:ͺk(NLMc׉J'SSvߪB $ ({]*,XxcG?okW\>S_ U??B ?xrM]A ߱ȿ˅,?X@ G C[ަ~Ykieb_ %UQ>cp(-dи˳SKGk^ԏu`ࡿ>teK_0w.7jU#@ O ~Q{)/&ZqCq%Z!]^S.o9V-JmX[6fί:A@  3iyDЎ_cc^=k+Z^'8K`/*?ə zJƠ@ @ pOrfYJy+{PdcǕ|)Y0O'jHO,@ @ pdQe6GCqmqڄm/ 5l}i:@ @ pXL>ټrKMC.mo@ *j~߬ r2jgK C%jm%o2NFz䒎?SbvJ'x{ݝ8q"f7IǞo~v_4ʷXvt^07c}&g*/U'@ _~_ 8q?'t((TѤ|KMN.wA5(S3$ vlz׻7{s[^x"ϗ^K_w Xs/ad h@ @`% LYzKœJ#o>Z,5Hq-;|}k>c6zw??};wn ?Oh\kuwvMBUgɿ=0ͽtVg?|ǻ__wdn @ 8>1yg.KYs1闡(3[XK$o˜FjW4#[{?R6m/3 ˴կ~=GqO/8Y[~A[.+zA1}/>jOӫhxC @ 8zؽaT\J:*Q4kKd_::e^:>7z衧0uGBW 3d5onnJ#ffK/Ro|L>G;܉}SoҚٟY;;ݭ^s=I @ 8,<,7ik(Q%k뀵I{D%QldSO?4ۿ_K]gd@+W~ʙz?}<ՕWc? Yc}My{<~tݫ~z ~?ZI/:7jx\pA{{n,u61bi=0=B%r?o,ztZzkeT<&MNۭ,^DUL<{'?PoϽ<}vpe/=w"cP>p(S{iȽvEs!f{W3c{<Hw_aj/$6d|o^r@K$qj=8xh{:p|3K}}=>Cw4;xҬ]5G# ٍ:;!wl~I*Cdx]w˟ԧL]:7 _W#]Z_%M|GiP_͐+|7%>|,Gq< R.zÞm.م"Og]$I#x>Wz#w\sM'k_Z+s>BIoߘzU?w~GOOݯگu}}Cz\$[:ձ5{3|:/yV\Α'RBoy][0 _zs>ڥB ng⿻EWa`ϯ[*_߭ǥI [Z0]5^qS(M?˽qyS c[2YIAӎ<;>QyGyOOgMu!G%}Ov[x-',K BC%C^Fwg'}l{t\ӡzkGS?V/_뵤W}MF;Y1i8]khȺLrqGCx\E{pU|Y~^PGZұIG/LF-kkl\NyTK=M:`J(}e6vIm[OغR^wO9K|h$a]SSi/MiM%mq$It?tTwxSbOXJ7JN8qv 'V5DWvOZ[\c_Z+$9y_+[ #9I6+&w;_ȝM'liP"29a~y,̧<{o&:V͓y\Gu Z E3d fu1K~ij]H΅=pɲU}_aZԄk[^W<_dǂ =Xֱ|3«@W1..5鸆k W5-7-2u MK6ii\bW%P~yw*՗ -mC{i|&d˃4ɴLL-.}\e*e%ϤS{L ]He>ӒՋ/Qts%C%[-AK RǤ4 ˱泴)=V/Ex]$ɲ @ [[yd{hP6 @iE|NjZKQ{j|J5T^-/Y瀍%ʪх]ɺC$dW<9bFzӁ낝籾\]xՓN2%y!M~5C<ksbe5Z!MrqPK,C=Ϟ*tP{0(IOr'ܢ\)j#曆k4`#E:=P>`e=rq0X%|v [ O,X eyjuGRjz^2#jq赦bȉMeUZxqJq=q:WS?W[I>m~6,s&N3um,ۛJuT 6dtæfcV:N=S~Ky.}c_1P|2Ky,c㬿xDo)|p%_t6[C|r[FǒMTl4/*})?ڀ$vxP@q 2D$ɥ8hW?b$jaS=t:C}$6K,b>!MSkE\]ZmԛaQvׄc,R#s/#[DYW-啌꒘et,omy%,o*@zA9NJq>?ȉ&S)ۑ9X\8bB;RG^ oʉl%/: MV~v)٣O^ X(m\.%߽":x(RVydl@ l탥yɖ|V[O]ґUlP_hznhDC\jP' 􌔵gҡTS=Xr{J:j:djgcd(-~%^CYbIV'~LP=ժT [8:%?O)\Et~}qĩ֘>wq ԏA}\%_EekyXg}-OP\-Kj3h.gY&sƩ9[OqAꓵځlj8ŋ*jzyʯz-J/6(%ju=U^̂r*I'=k;Mҩ'dyձXE ea>!@ F}/d?)xK;\zxy_9_6l4X 8ĠUdS"م/KQS<ޤ.٦kT~P_ZZz>1+0],]l{ݎ0`}^XW=U zk e,yD5\IQeu %ohZ9W7%!ɖ'W԰vdV.m@ lA[vd +8y[![Ƨy.]H_k;`J{+K)NzAxkC|\,:HN<]^V٘@ m#P'}MXy>*=d^I侪k5Ҳz˗| X,zBd[@ ;N=|Y2z\P|H.~ɮ^/o5~ͱ8[r5&wwMeq`b>eR/j-z짞yj\Q^c,RUh x}i+( _HO!jsF׊{_\kճ~^,l}-og6+~J\9QY^Z>'/-6N>+Q̾*Y%xj9mdՂ$/ūll}>ֿ:`ф.-$(6оeaHP{\ t=>IG/%S>PlC6Ok"np3P+{<%{?lѳZoeekXqӟd)v_NKCq_ӮP5^ʒ Wsfezd:Ot:WPch5W8U=pR/²$ck'XbWK25C׷jlmlrSsƩ6^KjR!DvV⥃Z/%?%]^H"o]/QJB&ē rYJNЩ)U /֐Kŧ@ @`*/5oi,;$<ҋ*ҋd(FWzX$ C6S94x K<_k* V(e_X򕿷a`}@ vg}}Gr̂z+[[.ecY-ūt6}6L$ ѨSXbї@dyNy58I&t%׼б<`^UM^cOYY-uV<1VE_V_ %OhRCoė(qfWIxl:QaF>x, /B@ @p [ߕ[KgK\:oI'*dKlo0hl o xQk{[I.|-e i>@ @0#s۫YY(q}K>-/ײvDlYѩ@_ج]{ zWqI/Y9$N\/b983g}-_qȬ!Rb_C`}ZZ] O~,]-yyq=B7tqm+A}j:Xcc)B::d?&?}l0:SQKzK=_ѱo!k @ p< ^oe !*^zɖZy_fie!Rd8h-X]CkCֲ1R[mG^S1WQgve?JcJQ^CYRt,_o]n;2]~%YaϯYCgv\qlRZ)q>Y<5Nx㣥8mRZ=S=gjq~^G_6+zol&}mN5]v^b q@[:+xRNc(V*t@ }knk󼗭OlBSV^~!C{^WV9^I'_%oO_z^z<[YYX披g0 KcMdq Kν6TZn-ڕǥML_;神ॗN}J_%>EiyV'^T}XY|>V/n jT<\acY<[>,"ZuNueKK\ɧZ||:oa<kՒ} y%>>Og TJT_Sk:nC>9Z{qs6rLa^uڗ(a=bya^WZt%Xǔdt,{9s0iv"{Vgy[%Y:O#T>>|do/Wg}-_-5j}-_/MP\^ӗY_|%Zkug7%>j~s5,?.ԳqKWW\Z^vO=>6dYhv+[^%)_OkoPȠsCkΚɴe}jڷv_t. Z+]~2Z-,]Hs7d/2:nvx ͍PWCvy8jKXz>J=|J~J_b:th[+˴u^.t*>-2y\ȩq-=Up] 3Av} ;L=qBe^ezK>c⫘*m}~%T])Ng@ %4?or{+jҗh_ngl҅v7]}\B@ ϵ_^_җtcԳ#Ѵd15|bhwnO _.Ę| @ p@=||e9jz| }3jX[gȿgHc|Q@  l\ͿS<->&ehJ4Vj%g@ @ X `ago.[+n=Ĕ1~4wKmS@  ([Ϧrk޹l߭mdL S뎉$ 9@ 8n0ؽr<~q>O|V缎蔓'_K[l,U!SR:b+ya=؇bDʡ{ZΒl)'*%FQ1%>|b46 X5?1֗9؜@ ojg[rlcoaziŬ]4 ioezŋ!9x^g}-_cue5yYqصE:+mCfE2)Q얗 gc+]Kњ^nlue_XOzŋ8m}\{ø6rF좧@ @"g[y'x؇]z\(@ @mO<]ԘQ"dCo9h+Ijqv-%NyWZ춖w&l?\sL|}^Ղʧk=*[8ɻZEj\ '[RXY戳yX"N5|?*%Οؼ%8_'ôJ=r/r .@ @ DH Uj!8F@ o'0U89MWzMeܦRζ#n;cLָeRζwzb]Jdv׿ƼJܘs:jGL]1Nčk0 _@ @ @ @ @ @ @ @ @ @`V?I~u_]IENDB`testthat/R/0000755000176200001440000000000014172347710012321 5ustar liggesuserstestthat/R/verify-output.R0000644000176200001440000001176214164710003015303 0ustar liggesusers#' Verify output #' #' @description #' `r lifecycle::badge("superseded")` #' #' This function is superseded in favour of `expect_snapshot()` and friends. #' #' This is a regression test that records interwoven code and output into a #' file, in a similar way to knitting an `.Rmd` file (but see caveats below). #' #' `verify_output()` is designed particularly for testing print methods and error #' messages, where the primary goal is to ensure that the output is helpful to #' a human. Obviously, you can't test that with code, so the best you can do is #' make the results explicit by saving them to a text file. This makes the output #' easy to verify in code reviews, and ensures that you don't change the output #' by accident. #' #' `verify_output()` is designed to be used with git: to see what has changed #' from the previous run, you'll need to use `git diff` or similar. #' #' @section Syntax: #' `verify_output()` can only capture the abstract syntax tree, losing all #' whitespace and comments. To mildly offset this limitation: #' #' - Strings are converted to R comments in the output. #' - Strings starting with `# ` are converted to headers in the output. #' #' @section CRAN: #' On CRAN, `verify_output()` will never fail, even if the output changes. #' This avoids false positives because tests of print methods and error #' messages are often fragile due to implicit dependencies on other packages, #' and failure does not imply incorrect computation, just a change in #' presentation. #' #' @param path Path to record results. #' #' This should usually be a call to [test_path()] in order to ensure that #' the same path is used when run interactively (when the working directory #' is typically the project root), and when run as an automated test (when #' the working directory will be `tests/testthat`). #' @param code Code to execute. This will usually be a multiline expression #' contained within `{}` (similarly to `test_that()` calls). #' @param width Width of console output #' @param crayon Enable crayon package colouring? #' @param unicode Enable cli package UTF-8 symbols? If you set this to #' `TRUE`, call `skip_if(!cli::is_utf8_output())` to disable the #' test on your CI platforms that don't support UTF-8 (e.g. Windows). #' @param env The environment to evaluate `code` in. #' @export #' @keywords internal verify_output <- function(path, code, width = 80, crayon = FALSE, unicode = FALSE, env = caller_env()) { local_reproducible_output(width = width, crayon = crayon, unicode = unicode) expr <- substitute(code) output <- verify_exec(expr, env = env) if (!interactive() && on_cran()) { skip("On CRAN") } compare_file(path, output, update = TRUE) invisible() } verify_exec <- function(expr, env = caller_env(), replay = output_replay) { if (is_call(expr, "{")) { exprs <- as.list(expr[-1]) } else { exprs <- list(expr) } withr::local_pdf(tempfile()) grDevices::dev.control(displaylist = "enable") exprs <- lapply(exprs, function(x) if (is.character(x)) paste0("# ", x) else expr_deparse(x)) source <- unlist(exprs, recursive = FALSE) handler <- evaluate::new_output_handler(value = testthat_print) results <- evaluate::evaluate(source, envir = env, new_device = FALSE, output_handler = handler ) output <- unlist(lapply(results, replay)) output <- gsub("\r", "", output, fixed = TRUE) output } output_replay <- function(x) { UseMethod("output_replay", x) } #' @export output_replay.character <- function(x) { c(split_lines(x), "") } #' @export output_replay.source <- function(x) { lines <- split_lines(x$src) # Remove header of lines so they don't get prefixed first <- lines[[1]] if (grepl("^# # ", first)) { header <- gsub("^# # ", "", first) lines <- lines[-1] } else { header <- NULL } n <- length(lines) if (n > 0) { lines[1] <- paste0("> ", lines[1]) if (n > 1) { lines[2:n] <- paste0("+ ", lines[2:n]) } } if (!is.null(header)) { underline <- strrep("=", nchar(header)) lines <- c("", header, underline, "", lines) } lines } #' @export output_replay.error <- function(x) { msg <- cnd_message(x) if (is.null(x$call)) { msg <- paste0("Error: ", msg) } else { call <- deparse(x$call) msg <- paste0("Error in ", call, ": ", msg) } c(split_lines(msg), "") } #' @export output_replay.warning <- function(x) { msg <- cnd_message(x) if (is.null(x$call)) { msg <- paste0("Warning: ", msg) } else { call <- deparse(x$call) msg <- paste0("Warning in ", call, ": ", msg) } c(split_lines(msg), "") } #' @export output_replay.message <- function(x) { # Messages are the only conditions where a new line is appended automatically msg <- paste0("Message: ", sub("\n$", "", cnd_message(x))) c(split_lines(msg), "") } #' @export output_replay.recordedplot <- function(x) { abort("Plots are not supported") } # Helpers ------------------------------------------------------------ split_lines <- function(x) { strsplit(x, "\n")[[1]] } testthat/R/reporter-debug.R0000644000176200001440000000472214164710002015364 0ustar liggesusers#' Test reporter: start recovery. #' #' This reporter will call a modified version of [recover()] on all #' broken expectations. #' #' @export #' @family reporters DebugReporter <- R6::R6Class("DebugReporter", inherit = Reporter, public = list( add_result = function(context, test, result) { if (!expectation_success(result) && !is.null(result$start_frame)) { if (sink_number() > 0) { sink(self$out) on.exit(sink(), add = TRUE) } recover2( start_frame = result$start_frame, end_frame = result$end_frame ) } } ) ) sink_number <- function() { sink.number(type = "output") } # recover2 ---------------------------------------------------------------- # Modeled after utils::recover(), which is # part of the R package, https://www.R-project.org # # Copyright (C) 1995-2016 The R Core Team # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # https://www.R-project.org/Licenses/ recover2 <- function(start_frame = 1L, end_frame = sys.nframe()) { calls <- sys.calls() if (.isMethodsDispatchOn()) { tState <- tracingState(FALSE) on.exit(tracingState(tState)) } from <- min(end_frame, length(calls)) calls <- calls[start_frame:from] if (rlang::is_false(peek_option("testthat_format_srcrefs"))) { calls <- lapply(calls, zap_srcref) } calls <- utils::limitedLabels(calls) repeat { which <- show_menu(calls, "\nEnter a frame number, or 0 to exit ") if (which) { frame <- sys.frame(start_frame - 2 + which) browse_frame(frame, skip = 7 - which) } else { break } } } # Helpers ----------------------------------------------------------------- zap_srcref <- function(x) { attr(x, "srcref") <- NULL x } show_menu <- function(choices, title = NULL) { utils::menu(choices, title = title) } browse_frame <- function(frame, skip) { eval( substitute(browser(skipCalls = skip), list(skip = skip)), envir = frame ) } testthat/R/expectation.R0000644000176200001440000001776114164710002014770 0ustar liggesusers#' The building block of all `expect_` functions #' #' Call `expect()` when writing your own expectations. See #' `vignette("custom-expectation")` for details. #' #' @param ok `TRUE` or `FALSE` indicating if the expectation was successful. #' @param failure_message Message to show if the expectation failed. #' @param info Character vector continuing additional information. Included #' for backward compatibility only and new expectations should not use it. #' @param srcref Location of the failure. Should only needed to be explicitly #' supplied when you need to forward a srcref captured elsewhere. #' @param trace An optional backtrace created by [rlang::trace_back()]. #' When supplied, the expectation is displayed with the backtrace. #' @param trace_env If `is.null(trace)`, this is used to automatically #' generate a traceback running from `test_code()`/`test_file()` to #' `trace_env`. You'll generally only need to set this if you're wrapping #' an expectation inside another function. #' @return An expectation object. Signals the expectation condition #' with a `continue_test` restart. #' #' @details #' #' While `expect()` creates and signals an expectation in one go, #' `exp_signal()` separately signals an expectation that you #' have manually created with [new_expectation()]. Expectations are #' signalled with the following protocol: #' #' * If the expectation is a failure or an error, it is signalled with #' [base::stop()]. Otherwise, it is signalled with #' [base::signalCondition()]. #' #' * The `continue_test` restart is registered. When invoked, failing #' expectations are ignored and normal control flow is resumed to #' run the other tests. #' #' @seealso [exp_signal()] #' @export expect <- function(ok, failure_message, info = NULL, srcref = NULL, trace = NULL, trace_env = caller_env()) { type <- if (ok) "success" else "failure" # Preserve existing API which appear to be used in package test code # Can remove in next major release if (missing(failure_message)) { warn("`failure_message` is missing, with no default.") message <- "unknown failure" } else { # A few packages include code in info that errors on evaluation if (ok) { message <- paste(failure_message, collapse = "\n") } else { message <- paste(c(failure_message, info), collapse = "\n") } } if (!ok) { if (is.null(trace)) { trace <- trace_back( top = getOption("testthat_topenv"), bottom = trace_env ) } # Only show if there's at least one function apart from the expectation if (trace_length(trace) <= 1) { trace <- NULL } } exp <- expectation(type, message, srcref = srcref, trace = trace) exp_signal(exp) } #' Construct an expectation object #' #' For advanced use only. If you are creating your own expectation, you should #' call [expect()] instead. See `vignette("custom-expectation")` for more #' details. #' #' Create an expectation with `expectation()` or `new_expectation()` #' and signal it with `exp_signal()`. #' #' @param type Expectation type. Must be one of "success", "failure", "error", #' "skip", "warning". #' @param message Message describing test failure #' @param srcref Optional `srcref` giving location of test. #' @inheritParams expect #' @keywords internal #' @export expectation <- function(type, message, srcref = NULL, trace = NULL) { new_expectation(type, message, srcref = srcref, trace = trace) } #' @rdname expectation #' @param ... Additional attributes for the expectation object. #' @param .subclass An optional subclass for the expectation object. #' @export new_expectation <- function(type, message, ..., srcref = NULL, trace = NULL, .subclass = NULL) { type <- match.arg(type, c("success", "failure", "error", "skip", "warning")) structure( list( message = message, srcref = srcref, trace = trace ), class = c( .subclass, paste0("expectation_", type), "expectation", # Make broken expectations catchable by try() if (type %in% c("failure", "error")) "error", "condition" ), ... ) } #' @rdname expectation #' @param exp An expectation object, as created by #' [new_expectation()]. #' @export exp_signal <- function(exp) { withRestarts( if (expectation_broken(exp)) { stop(exp) } else { signalCondition(exp) }, continue_test = function(e) NULL ) invisible(exp) } #' @export #' @rdname expectation #' @param x object to test for class membership is.expectation <- function(x) inherits(x, "expectation") #' @export print.expectation <- function(x, ...) { cat(cli::style_bold("<", paste0(class(x), collapse = "/"), ">"), "\n", sep = "") cat(format(x), "\n", sep = "") invisible(x) } #' @export format.expectation_success <- function(x, ...) { "As expected" } #' @export format.expectation <- function(x, simplify = "branch", ...) { # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields if (is.null(x[["trace"]]) || trace_length(x[["trace"]]) == 0L) { return(x$message) } max_frames <- if (simplify == "branch") 20 else NULL trace_lines <- format( x$trace, simplify = simplify, ..., max_frames = max_frames ) lines <- c(x$message, crayon::bold("Backtrace:"), trace_lines) paste(lines, collapse = "\n") } # as.expectation ---------------------------------------------------------- as.expectation <- function(x, srcref = NULL) { UseMethod("as.expectation", x) } #' @export as.expectation.expectation <- function(x, srcref = NULL) { x$srcref <- x$srcref %||% srcref x } #' @export as.expectation.error <- function(x, srcref = NULL) { if (is.null(x$call)) { header <- paste0("Error: ") } else { header <- paste0("Error in `", deparse1(x$call), "`: ") } msg <- paste0( if (!is_simple_error(x)) paste0("<", paste(class(x), collapse = "/"), ">\n"), header, cnd_message(x) ) expectation("error", msg, srcref, trace = x[["trace"]]) } is_simple_error <- function(x) { class(x)[[1]] %in% c("simpleError", "rlang_error") } #' @export as.expectation.warning <- function(x, srcref = NULL) { expectation("warning", cnd_message(x), srcref, trace = x[["trace"]]) } #' @export as.expectation.skip <- function(x, ..., srcref = NULL) { expectation("skip", cnd_message(x), srcref, trace = x[["trace"]]) } #' @export as.expectation.default <- function(x, srcref = NULL) { stop( "Don't know how to convert '", paste(class(x), collapse = "', '"), "' to expectation.", call. = FALSE ) } # expectation_type -------------------------------------------------------- expectation_type <- function(exp) { stopifnot(is.expectation(exp)) gsub("^expectation_", "", class(exp)[[1]]) } expectation_success <- function(exp) expectation_type(exp) == "success" expectation_failure <- function(exp) expectation_type(exp) == "failure" expectation_error <- function(exp) expectation_type(exp) == "error" expectation_skip <- function(exp) expectation_type(exp) == "skip" expectation_warning <- function(exp) expectation_type(exp) == "warning" expectation_broken <- function(exp) expectation_failure(exp) || expectation_error(exp) expectation_ok <- function(exp) expectation_type(exp) %in% c("success", "warning") single_letter_summary <- function(x) { switch(expectation_type(x), skip = colourise("S", "skip"), success = colourise(".", "success"), error = colourise("E", "error"), failure = colourise("F", "failure"), warning = colourise("W", "warning"), "?" ) } expectation_location <- function(x) { if (is.null(x$srcref)) { "???" } else { filename <- attr(x$srcref, "srcfile")$filename if (identical(filename, "")) { paste0("Line ", x$srcref[1]) } else { paste0(basename(filename), ":", x$srcref[1], ":", x$srcref[2]) } } } testthat/R/reporter.R0000644000176200001440000001125114166627056014315 0ustar liggesusers#' Manage test reporting #' #' The job of a reporter is to aggregate the results from files, tests, and #' expectations and display them in an informative way. Every testtthat function #' that runs multiple tests provides a `reporter` argument which you can #' use to override the default (which is selected by [default_reporter()]). #' #' You only need to use this `Reporter` object directly if you are creating #' a new reporter. Currently, creating new Reporters is undocumented, #' so if you want to create your own, you'll need to make sure that you're #' familiar with [R6](https://adv-r.hadley.nz/R6.html) and then need read the #' source code for a few. #' #' @keywords internal #' @export #' @export Reporter #' @aliases Reporter #' @importFrom R6 R6Class #' @family reporters #' @examples #' path <- testthat_example("success") #' #' test_file(path) #' # Override the default by supplying the name of a reporter #' test_file(path, reporter = "minimal") Reporter <- R6::R6Class("Reporter", public = list( capabilities = list(parallel_support = FALSE, parallel_updates = FALSE), start_reporter = function() {}, start_context = function(context) {}, start_test = function(context, test) {}, start_file = function(filename) {}, add_result = function(context, test, result) {}, end_test = function(context, test) {}, end_context = function(context) {}, end_reporter = function() {}, end_file = function() {}, is_full = function() FALSE, update = function() {}, width = 80, unicode = TRUE, crayon = TRUE, out = NULL, initialize = function(file = getOption("testthat.output_file", stdout())) { if (is.character(file)) { file <- normalizePath(file, mustWork = FALSE) } self$out <- file if (is.character(self$out) && file.exists(self$out)) { # If writing to a file, overwrite it if it exists file.remove(self$out) } # Capture at init so not affected by test settings self$width <- cli::console_width() self$unicode <- cli::is_utf8_output() self$crayon <- crayon::has_color() }, # To be used when the reporter needs to produce output inside of an active # test, which is almost always from $add_result() local_user_output = function(.env = parent.frame()) { local_reproducible_output( width = self$width, crayon = self$crayon, .env = .env ) # Can't set unicode with local_reproducible_output() because it can # generate a skip if you're temporarily using a non-UTF-8 locale withr::local_options(cli.unicode = self$unicode, .local_envir = .env) }, cat_tight = function(...) { cat(..., sep = "", file = self$out, append = TRUE) }, cat_line = function(...) { cli::cat_line(..., file = self$out) }, rule = function(...) { cli::cat_rule(..., file = self$out) }, # The hierarchy of contexts are implied - a context starts with a # call to context(), and ends either with the end of the file, or # with the next call to context() in the same file. These private # methods paper over the details so that context appear to work # in the same way as tests and expectations. .context = NULL, .start_context = function(context) { if (!is.null(self$.context)) { self$end_context(self$.context) } self$.context <- context self$start_context(context) invisible() }, end_context_if_started = function(context) { if (!is.null(self$.context)) { self$end_context(self$.context) self$.context <- NULL } invisible() } ) ) #' Retrieve the default reporter #' #' The defaults are: #' * [ProgressReporter] for interactive, non-parallel; override with #' `testthat.default_reporter` #' * [ParallelProgressReporter] for interactive, parallel packages; #' override with `testthat.default_parallel_reporter` #' * [CompactProgressReporter] for single-file interactive; override with #' `testthat.default_compact_reporter` #' * [CheckReporter] for R CMD check; override with `testthat.default_check_reporter` #' #' @export #' @keywords internal default_reporter <- function() { getOption("testthat.default_reporter", "Progress") } #' @export #' @rdname default_reporter default_parallel_reporter <- function() { getOption("testthat.default_parallel_reporter", "ParallelProgress") } #' @export #' @rdname default_reporter default_compact_reporter <- function() { getOption("testthat.default_compact_reporter", "CompactProgress") } #' @export #' @rdname default_reporter check_reporter <- function() { getOption("testthat.default_check_reporter", "Check") } testthat/R/expect-known.R0000644000176200001440000001531114165635513015072 0ustar liggesusers#' Expectations: is the output or the value equal to a known good value? #' #' For complex printed output and objects, it is often challenging to describe #' exactly what you expect to see. `expect_known_value()` and #' `expect_known_output()` provide a slightly weaker guarantee, simply #' asserting that the values have not changed since the last time that you ran #' them. #' #' These expectations should be used in conjunction with git, as otherwise #' there is no way to revert to previous values. Git is particularly useful #' in conjunction with `expect_known_output()` as the diffs will show you #' exactly what has changed. #' #' Note that known values updates will only be updated when running tests #' interactively. `R CMD check` clones the package source so any changes to #' the reference files will occur in a temporary directory, and will not be #' synchronised back to the source package. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_known_output()` and friends are deprecated in the 3rd edition; #' please use [expect_snapshot_output()] and friends instead. #' #' @export #' @param file File path where known value/output will be stored. #' @param update Should the file be updated? Defaults to `TRUE`, with #' the expectation that you'll notice changes because of the first failure, #' and then see the modified files in git. #' @param version The serialization format version to use. The default, 2, was #' the default format from R 1.4.0 to 3.5.3. Version 3 became the default from #' R 3.6.0 and can only be read by R versions 3.5.0 and higher. #' @param ... Passed on to [waldo::compare()]. #' @keywords internal #' @inheritParams expect_equal #' @inheritParams capture_output_lines #' @examples #' tmp <- tempfile() #' #' # The first run always succeeds #' expect_known_output(mtcars[1:10, ], tmp, print = TRUE) #' #' # Subsequent runs will succeed only if the file is unchanged #' # This will succeed: #' expect_known_output(mtcars[1:10, ], tmp, print = TRUE) #' #' \dontrun{ #' # This will fail #' expect_known_output(mtcars[1:9, ], tmp, print = TRUE) #' } expect_known_output <- function(object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80) { edition_deprecate(3, "expect_known_output()", "Please use `expect_snapshot_output()` instead" ) act <- list() act$quo <- enquo(object) act$lab <- label %||% quo_label(act$quo) act <- append(act, eval_with_output(object, print = print, width = width)) compare_file(file, act$out, update = update, info = info, ...) invisible(act$val) } compare_file <- function(path, lines, ..., update = TRUE, info = NULL) { if (!file.exists(path)) { warning("Creating reference output", call. = FALSE) brio::write_lines(lines, path) succeed() return() } old_lines <- brio::read_lines(path) if (update) { brio::write_lines(lines, path) if (!all_utf8(lines)) { warning("New reference output is not UTF-8 encoded", call. = FALSE) } } if (!all_utf8(old_lines)) { warning("Reference output is not UTF-8 encoded", call. = FALSE) } comp <- waldo_compare( x = old_lines, x_arg = "old", y = lines, y_arg = "new", ... ) expect( length(comp) == 0, sprintf( "Results have changed from known value recorded in %s.\n\n%s", encodeString(path, quote = "'"), paste0(comp, collapse = "\n\n") ), info = info, trace_env = caller_env() ) } #' Expectations: is the output or the value equal to a known good value? #' #' `expect_output_file()` behaves identically to [expect_known_output()]. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_output_file()` is deprecated in the 3rd edition; #' please use [expect_snapshot_output()] and friends instead. #' #' @export #' @keywords internal expect_output_file <- function(object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80) { # Code is a copy of expect_known_output() edition_deprecate(3, "expect_output_file()", "Please use `expect_snapshot_output()` instead" ) act <- list() act$quo <- enquo(object) act$lab <- label %||% quo_label(act$quo) act <- append(act, eval_with_output(object, print = print, width = width)) compare_file(file, act$out, update = update, info = info, ...) invisible(act$val) } #' @export #' @rdname expect_known_output expect_known_value <- function(object, file, update = TRUE, ..., info = NULL, label = NULL, version = 2) { edition_deprecate(3, "expect_known_value()", "Please use `expect_snapshot_value()` instead" ) act <- quasi_label(enquo(object), label, arg = "object") if (!file.exists(file)) { warning("Creating reference value", call. = FALSE) saveRDS(object, file, version = version) succeed() } else { ref_val <- readRDS(file) comp <- compare(act$val, ref_val, ...) if (update && !comp$equal) { saveRDS(act$val, file, version = version) } expect( comp$equal, sprintf( "%s has changed from known value recorded in %s.\n%s", act$lab, encodeString(file, quote = "'"), comp$message ), info = info ) } invisible(act$value) } #' @export #' @rdname expect_known_output #' @usage NULL expect_equal_to_reference <- function(..., update = FALSE) { edition_deprecate(3, "expect_equal_to_reference()", "Please use `expect_snapshot_value()` instead" ) expect_known_value(..., update = update) } #' @export #' @rdname expect_known_output #' @param hash Known hash value. Leave empty and you'll be informed what #' to use in the test output. expect_known_hash <- function(object, hash = NULL) { edition_deprecate(3, "expect_known_hash()", "Please use `expect_snapshot_value()` instead" ) act <- quasi_label(enquo(object), arg = "object") act_hash <- digest::digest(act$val) if (!is.null(hash)) { act_hash <- substr(act_hash, 1, nchar(hash)) } if (is.null(hash)) { warning(paste0("No recorded hash: use ", substr(act_hash, 1, 10))) succeed() } else { expect( hash == act_hash, sprintf("Value hashes to %s, not %s", act_hash, hash) ) } invisible(act$value) } all_utf8 <- function(x) { ! any(is.na(iconv(x, "UTF-8", "UTF-8"))) } testthat/R/expect-inheritance.R0000644000176200001440000001167014164710002016215 0ustar liggesusers#' Does code return an object inheriting from the expected base type, S3 class, #' or S4 class? #' #' @description #' See for an overview of R's OO systems, and #' the vocabulary used here. #' #' * `expect_type(x, type)` checks that `typeof(x)` is `type`. #' * `expect_s3_class(x, class)` checks that `x` is an S3 object that #' [inherits()] from `class` #' * `expect_s3_class(x, NA)` checks that `x` isn't an S3 object. #' * `expect_s4_class(x, class)` checks that `x` is an S4 object that #' [is()] `class`. #' * `expect_s4_class(x, NA)` checks that `x` isn't an S4 object. #' #' See [expect_vector()] for testing properties of objects created by vctrs. #' #' @param type String giving base type (as returned by [typeof()]). #' @param class Either a character vector of class names, or #' for `expect_s3_class()` and `expect_s4_class()`, an `NA` to assert #' that `object` isn't an S3 or S4 object. #' @inheritParams expect_that #' @family expectations #' @examples #' x <- data.frame(x = 1:10, y = "x", stringsAsFactors = TRUE) #' # A data frame is an S3 object with class data.frame #' expect_s3_class(x, "data.frame") #' show_failure(expect_s4_class(x, "data.frame")) #' # A data frame is built from a list: #' expect_type(x, "list") #' #' # An integer vector is an atomic vector of type "integer" #' expect_type(x$x, "integer") #' # It is not an S3 object #' show_failure(expect_s3_class(x$x, "integer")) #' #' # Above, we requested data.frame() converts strings to factors: #' show_failure(expect_type(x$y, "character")) #' expect_s3_class(x$y, "factor") #' expect_type(x$y, "integer") #' @name inheritance-expectations NULL #' @export #' @rdname inheritance-expectations expect_type <- function(object, type) { stopifnot(is.character(type), length(type) == 1) act <- quasi_label(enquo(object), arg = "object") act_type <- typeof(act$val) expect( identical(act_type, type), sprintf("%s has type %s, not %s.", act$lab, format_class(act_type), format_class(type)) ) invisible(act$val) } #' @export #' @rdname inheritance-expectations #' @param exact If `FALSE`, the default, checks that `object` inherits #' from `class`. If `TRUE`, checks that object has a class that's identical #' to `class`. expect_s3_class <- function(object, class, exact = FALSE) { act <- quasi_label(enquo(object), arg = "object") act$class <- format_class(class(act$val)) exp_lab <- format_class(class) if (identical(class, NA)) { expect( isS3(object) == !is.na(class), sprintf("%s is an S3 object", act$lab) ) } else if (is.character(class)) { if (!isS3(act$val)) { fail(sprintf("%s is not an S3 object", act$lab)) } else if (exact) { expect( identical(class(act$val), class), sprintf("%s has class %s, not %s.", act$lab, act$class, exp_lab) ) } else { expect( inherits(act$val, class), sprintf("%s inherits from %s not %s.", act$lab, act$class, exp_lab) ) } } else { abort("`class` must be a NA or a character vector") } invisible(act$val) } #' @export #' @rdname inheritance-expectations expect_s4_class <- function(object, class) { act <- quasi_label(enquo(object), arg = "object") act_val_lab <- format_class(methods::is(object)) exp_lab <- format_class(class) if (identical(class, NA)) { expect( isS4(object) == !is.na(class), sprintf("%s is an S4 object", act$lab) ) } else if (is.character(class)) { if (!isS4(act$val)) { fail(sprintf("%s is not an S4 object", act$lab)) } else { expect( methods::is(act$val, class), sprintf("%s inherits from %s not %s.", act$lab, act_val_lab, exp_lab) ) } } else { abort("`class` must be a NA or a character vector") } invisible(act$val) } isS3 <- function(x) is.object(x) && !isS4(x) #' Does an object inherit from a given class? #' #' @description #' `r lifecycle::badge("superseded")` #' #' `expect_is()` is an older form that uses [inherits()] without checking #' whether `x` is S3, S4, or neither. Instead, I'd recommend using #' [expect_type()], [expect_s3_class()] or [expect_s4_class()] to more clearly #' convey your intent. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_is()` is formally deprecated in the 3rd edition. #' #' @keywords internal #' @inheritParams expect_type #' @export expect_is <- function(object, class, info = NULL, label = NULL) { stopifnot(is.character(class)) edition_deprecate(3, "expect_is()", "Use `expect_type()`, `expect_s3_class()`, or `expect_s4_class()` instead" ) act <- quasi_label(enquo(object), label, arg = "object") act$class <- format_class(class(act$val)) exp_lab <- format_class(class(class)) expect( inherits(act$val, class), sprintf("%s inherits from `%s` not `%s`.", act$lab, act$class, exp_lab), info = info ) invisible(act$val) } format_class <- function(x) { paste0(encodeString(x, quote = "'"), collapse = "/") } testthat/R/utils.R0000644000176200001440000000600514165635513013610 0ustar liggesusers#' @importFrom magrittr %>% #' @export magrittr::`%>%` null <- function(...) invisible() # Tools for finding srcrefs ----------------------------------------------- find_first_srcref <- function(start) { calls <- sys.calls() calls <- calls[seq2(start, length(calls))] for (call in calls) { srcref <- attr(call, "srcref") if (!is.null(srcref)) { return(srcref) } } NULL } escape_regex <- function(x) { chars <- c("*", ".", "?", "^", "+", "$", "|", "(", ")", "[", "]", "{", "}", "\\") gsub(paste0("([\\", paste0(collapse = "\\", chars), "])"), "\\\\\\1", x, perl = TRUE) } # For R 3.1 dir.exists <- function(paths) { file.exists(paths) & file.info(paths)$isdir } maybe_restart <- function(restart) { if (!is.null(findRestart(restart))) { invokeRestart(restart) } } # Backport for R 3.2 strrep <- function(x, times) { x = as.character(x) if (length(x) == 0L) return(x) unlist(.mapply(function(x, times) { if (is.na(x) || is.na(times)) return(NA_character_) if (times <= 0L) return("") paste0(replicate(times, x), collapse = "") }, list(x = x, times = times), MoreArgs = list()), use.names = FALSE) } # Backport for R < 4.0 deparse1 <- function(expr, ...) paste(deparse(expr, ...), collapse = "\n") can_entrace <- function(cnd) { !inherits(cnd, "Throwable") } # Need to strip environment and source references to make lightweight # function suitable to send to another process transport_fun <- function(f) { environment(f) <- .GlobalEnv f <- zap_srcref(f) f } isNA <- function(x) length(x) == 1 && is.na(x) compact <- function(x) { x[lengths(x) > 0] } # Handled specially in test_code so no backtrace testthat_warn <- function(message, ...) { warn(message, class = "testthat_warn", ...) } split_by_line <- function(x) { trailing_nl <- grepl("\n$", x) x <- strsplit(x, "\n") x[trailing_nl] <- lapply(x[trailing_nl], c, "") x } rstudio_tickle <- function() { if (!is_installed("rstudioapi")) { return() } if (!rstudioapi::hasFun("executeCommand")) { return() } rstudioapi::executeCommand("vcsRefresh") rstudioapi::executeCommand("refreshFiles") } check_installed <- function(pkg, fun) { if (is_installed(pkg)) { return() } abort(c( paste0("The ", pkg, " package must be installed in order to use `", fun, "`"), i = paste0("Do you need to run `install.packages('", pkg, "')`?") )) } first_upper <- function(x) { substr(x, 1, 1) <- toupper(substr(x, 1, 1)) x } in_rcmd_check <- function() { nzchar(Sys.getenv("_R_CHECK_PACKAGE_NAME_", "")) } map_chr <- function(.x, .f, ...) { .f <- as_function(.f) vapply(.x, .f, FUN.VALUE = character(1), ...) } map_lgl <- function(.x, .f, ...) { .f <- as_function(.f) vapply(.x, .f, FUN.VALUE = logical(1), ...) } r_version <- function() paste0("R", getRversion()[, 1:2]) # Waiting on https://github.com/r-lib/withr/pull/188 local_tempfile1 <- function(lines, env = parent.frame()) { path <- withr::local_tempfile(.local_envir = env) writeLines(lines, path) path } testthat/R/test-compiled-code.R0000644000176200001440000003034114164710003016114 0ustar liggesusers#' @keywords internal #' @rdname run_cpp_tests #' @export expect_cpp_tests_pass <- function(package) { run_testthat_tests <- get_routine(package, "run_testthat_tests") output <- "" tests_passed <- TRUE tryCatch( output <- capture_output_lines(tests_passed <- .Call(run_testthat_tests, FALSE)), error = function(e) { warning(sprintf("failed to call test entrypoint '%s'", run_testthat_tests)) } ) # Drop first line of output (it's jut a '####' delimiter) info <- paste(output[-1], collapse = "\n") expect(tests_passed, paste("C++ unit tests:", info, sep = "\n")) } #' Do C++ tests past? #' #' Test compiled code in the package `package`. A call to this function will #' automatically be generated for you in `tests/testthat/test-cpp.R` after #' calling [use_catch()]; you should not need to manually call this expectation #' yourself. #' #' @param package The name of the package to test. #' @keywords internal #' @export run_cpp_tests <- function(package) { skip_on_os("solaris") check_installed("xml2", "run_cpp_tests()") run_testthat_tests <- get_routine(package, "run_testthat_tests") output <- "" tests_passed <- TRUE catch_error <- FALSE tryCatch({ output <- capture_output_lines(tests_passed <- .Call(run_testthat_tests, TRUE)) }, error = function(e) { catch_error <- TRUE reporter <- get_reporter() context_start("Catch") reporter$start_test(context = "Catch", test = "Catch") reporter$add_result(context = "Catch", test = "Catch", result = expectation("failure", e$message)) reporter$end_test(context = "Catch", test = "Catch") } ) if (catch_error) { return() } report <- xml2::read_xml(paste(output, collapse = "\n")) contexts <- xml2::xml_find_all(report, "//TestCase") for (context in contexts) { context_name <- sub(" [|][^|]+$", "", xml2::xml_attr(context, "name")) context_start(context_name) tests <- xml2::xml_find_all(context, "./Section") for (test in tests) { test_name <- xml2::xml_attr(test, "name") result <- xml2::xml_find_first(test, "./OverallResults") successes <- as.integer(xml2::xml_attr(result, "successes")) get_reporter()$start_test(context = context_name, test = test_name) for (i in seq_len(successes)) { exp <- expectation("success", "") exp$test <- test_name get_reporter()$add_result(context = context_name, test = test_name, result = exp) } failures <- xml2::xml_find_all(test, "./Expression") for (failure in failures) { org <- xml2::xml_find_first(failure, "Original") org_text <- xml2::xml_text(org, trim = TRUE) filename <- xml2::xml_attr(failure, "filename") type <- xml2::xml_attr(failure, "type") type_msg <- switch(type, "CATCH_CHECK_FALSE" = "isn't false.", "CATCH_CHECK_THROWS" = "did not throw an exception.", "CATCH_CHECK_THROWS_AS" = "threw an exception with unexpected type.", "isn't true." ) org_text <- paste(org_text, type_msg) line <- xml2::xml_attr(failure, "line") failure_srcref <- srcref(srcfile(file.path("src", filename)), c(line, line, 1, 1)) exp <- expectation("failure", org_text, srcref = failure_srcref) exp$test <- test_name get_reporter()$add_result(context = context_name, test = test_name, result = exp) } exceptions <- xml2::xml_find_all(test, "./Exception") for (exception in exceptions) { exception_text <- xml2::xml_text(exception, trim = TRUE) filename <- xml2::xml_attr(exception, "filename") line <- xml2::xml_attr(exception, "line") exception_srcref <- srcref(srcfile(file.path("src", filename)), c(line, line, 1, 1)) exp <- expectation("error", exception_text, srcref = exception_srcref) exp$test <- test_name get_reporter()$add_result(context = context_name, test = test_name, result = exp) } get_reporter()$end_test(context = context_name, test = test_name) } } } #' Use Catch for C++ Unit Testing #' #' Add the necessary infrastructure to enable C++ unit testing #' in \R packages with [Catch](https://github.com/catchorg/Catch2) and #' `testthat`. #' #' Calling `use_catch()` will: #' #' 1. Create a file `src/test-runner.cpp`, which ensures that the #' `testthat` package will understand how to run your package's #' unit tests, #' #' 2. Create an example test file `src/test-example.cpp`, which #' showcases how you might use Catch to write a unit test, #' #' 3. Add a test file `tests/testthat/test-cpp.R`, which ensures that #' `testthat` will run your compiled tests during invocations of #' `devtools::test()` or `R CMD check`, and #' #' 4. Create a file `R/catch-routine-registration.R`, which ensures that #' \R will automatically register this routine when #' `tools::package_native_routine_registration_skeleton()` is invoked. #' #' You will also need to: #' #' * Add xml2 to Suggests, with e.g. `usethis::use_package("xml2", "Suggests")` #' * Add testthat to LinkingTo, with e.g. #' `usethis::use_package("testthat", "LinkingTo")` #' #' C++ unit tests can be added to C++ source files within the #' `src` directory of your package, with a format similar #' to \R code tested with `testthat`. Here's a simple example #' of a unit test written with `testthat` + Catch: #' #' \preformatted{ #' context("C++ Unit Test") { #' test_that("two plus two is four") { #' int result = 2 + 2; #' expect_true(result == 4); #' } #' } #' } #' #' When your package is compiled, unit tests alongside a harness #' for running these tests will be compiled into your \R package, #' with the C entry point `run_testthat_tests()`. `testthat` #' will use that entry point to run your unit tests when detected. #' #' @section Functions: #' #' All of the functions provided by Catch are #' available with the `CATCH_` prefix -- see #' [here](https://github.com/catchorg/Catch2/blob/master/docs/assertions.md) #' for a full list. `testthat` provides the #' following wrappers, to conform with `testthat`'s #' \R interface: #' #' \tabular{lll}{ #' \strong{Function} \tab \strong{Catch} \tab \strong{Description} \cr #' `context` \tab `CATCH_TEST_CASE` \tab The context of a set of tests. \cr #' `test_that` \tab `CATCH_SECTION` \tab A test section. \cr #' `expect_true` \tab `CATCH_CHECK` \tab Test that an expression evaluates to `true`. \cr #' `expect_false` \tab `CATCH_CHECK_FALSE` \tab Test that an expression evalutes to `false`. \cr #' `expect_error` \tab `CATCH_CHECK_THROWS` \tab Test that evaluation of an expression throws an exception. \cr #' `expect_error_as` \tab `CATCH_CHECK_THROWS_AS` \tab Test that evaluation of an expression throws an exception of a specific class. \cr #' } #' #' In general, you should prefer using the `testthat` #' wrappers, as `testthat` also does some work to #' ensure that any unit tests within will not be compiled or #' run when using the Solaris Studio compilers (as these are #' currently unsupported by Catch). This should make it #' easier to submit packages to CRAN that use Catch. #' #' @section Symbol Registration: #' #' If you've opted to disable dynamic symbol lookup in your #' package, then you'll need to explicitly export a symbol #' in your package that `testthat` can use to run your unit #' tests. `testthat` will look for a routine with one of the names: #' #' \preformatted{ #' C_run_testthat_tests #' c_run_testthat_tests #' run_testthat_tests #' } #' #' See [Controlling Visibility](https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Controlling-visibility) #' and [Registering Symbols](https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-symbols) #' in the **Writing R Extensions** manual for more information. #' #' @section Advanced Usage: #' #' If you'd like to write your own Catch test runner, you can #' instead use the `testthat::catchSession()` object in a file #' with the form: #' #' \preformatted{ #' #define TESTTHAT_TEST_RUNNER #' #include #' #' void run() #' { #' Catch::Session& session = testthat::catchSession(); #' // interact with the session object as desired #' } #' } #' #' This can be useful if you'd like to run your unit tests #' with custom arguments passed to the Catch session. #' #' @param dir The directory containing an \R package. #' #' @section Standalone Usage: #' #' If you'd like to use the C++ unit testing facilities provided #' by Catch, but would prefer not to use the regular `testthat` #' \R testing infrastructure, you can manually run the unit tests #' by inserting a call to: #' #' \preformatted{ #' .Call("run_testthat_tests", PACKAGE = ) #' } #' #' as necessary within your unit test suite. #' #' @export #' @seealso [Catch](https://github.com/catchorg/Catch2/blob/master/docs/assertions.md), #' the library used to enable C++ unit testing. use_catch <- function(dir = getwd()) { desc_path <- file.path(dir, "DESCRIPTION") if (!file.exists(desc_path)) { stop("no DESCRIPTION file at path '", desc_path, "'", call. = FALSE) } desc <- read.dcf(desc_path, all = TRUE) pkg <- desc$Package if (!nzchar(pkg)) { stop("no 'Package' field in DESCRIPTION file '", desc_path, "'", call. = FALSE) } src_dir <- file.path(dir, "src") if (!file.exists(src_dir) && !dir.create(src_dir)) { stop("failed to create 'src/' directory '", src_dir, "'", call. = FALSE) } test_runner_path <- file.path(src_dir, "test-runner.cpp") # Copy the test runner. success <- file.copy( system.file(package = "testthat", "resources", "test-runner.cpp"), test_runner_path, overwrite = TRUE ) if (!success) { stop("failed to copy 'test-runner.cpp' to '", src_dir, "'", call. = FALSE) } # Copy the test example. success <- file.copy( system.file(package = "testthat", "resources", "test-example.cpp"), file.path(src_dir, "test-example.cpp"), overwrite = TRUE ) if (!success) { stop("failed to copy 'test-example.cpp' to '", src_dir, "'", call. = FALSE) } # Copy the 'test-cpp.R' file. test_dir <- file.path(dir, "tests", "testthat") if (!file.exists(test_dir) && !dir.create(test_dir, recursive = TRUE)) { stop("failed to create 'tests/testthat/' directory '", test_dir, "'", call. = FALSE) } template_file <- system.file(package = "testthat", "resources", "test-cpp.R") contents <- readChar(template_file, file.info(template_file)$size, TRUE) transformed <- sprintf(contents, pkg) output_path <- file.path(test_dir, "test-cpp.R") cat(transformed, file = output_path) # Copy the 'test-runner.R file. template_file <- system.file(package = "testthat", "resources", "catch-routine-registration.R") contents <- readChar(template_file, file.info(template_file)$size, TRUE) transformed <- sprintf(contents, pkg) output_path <- file.path(dir, "R", "catch-routine-registration.R") cat(transformed, file = output_path) message("> Added C++ unit testing infrastructure.") message("> Please ensure you have 'LinkingTo: testthat' in your DESCRIPTION.") message("> Please ensure you have 'Suggests: xml2' in your DESCRIPTION.") message("> Please ensure you have 'useDynLib(", pkg, ", .registration = TRUE)' in your NAMESPACE.") } get_routine <- function(package, routine) { # check to see if the package has explicitly exported # the associated routine (check common prefixes as we # don't necessarily have access to the NAMESPACE and # know what the prefix is) namespace <- asNamespace(package) prefixes <- c("C_", "c_", "C", "c", "_", "") for (prefix in prefixes) { name <- paste(prefix, routine, sep = "") if (exists(name, envir = namespace)) { symbol <- get(name, envir = namespace) if (inherits(symbol, "NativeSymbolInfo")) { return(symbol) } } } # otherwise, try to resolve the symbol dynamically for (prefix in prefixes) { name <- paste(prefix, routine, sep = "") resolved <- tryCatch( getNativeSymbolInfo(routine, PACKAGE = package), error = function(e) NULL ) if (inherits(resolved, "NativeSymbolInfo")) { return(resolved) } } # if we got here, we failed to find the symbol -- throw an error fmt <- "failed to locate routine '%s' in package '%s'" stop(sprintf(fmt, routine, package), call. = FALSE) } (function() { .Call(run_testthat_tests, TRUE) }) testthat/R/quasi-label.R0000644000176200001440000000634514164710002014640 0ustar liggesusers#' Quasi-labelling #' #' The first argument to every `expect_` function can use unquoting to #' construct better labels. This makes it easy to create informative labels when #' expectations are used inside a function or a for loop. `quasi_label()` wraps #' up the details, returning the expression and label. #' #' @section Limitations: #' Because all `expect_` function use unquoting to generate more informative #' labels, you can not use unquoting for other purposes. Instead, you'll need #' to perform all other unquoting outside of the expectation and only test #' the results. #' #' @param quo A quosure created by `rlang::enquo()`. #' @param label An optional label to override the default. This is #' only provided for internal usage. Modern expectations should not #' include a `label` parameter. #' @param arg Argument name shown in error message if `quo` is missing. #' @keywords internal #' @return A list containing two elements: #' \item{val}{The evaluate value of `quo`} #' \item{lab}{The quasiquoted label generated from `quo`} #' @export #' @examples #' f <- function(i) if (i > 3) i * 9 else i * 10 #' i <- 10 #' #' # This sort of expression commonly occurs inside a for loop or function #' # And the failure isn't helpful because you can't see the value of i #' # that caused the problem: #' show_failure(expect_equal(f(i), i * 10)) #' #' # To overcome this issue, testthat allows you to unquote expressions using #' # !!. This causes the failure message to show the value rather than the #' # variable name #' show_failure(expect_equal(f(!!i), !!(i * 10))) quasi_label <- function(quo, label = NULL, arg = "quo") { force(quo) if (quo_is_missing(quo)) { stop("argument `", arg, "` is missing, with no default.", call. = FALSE) } expr <- quo_get_expr(quo) list( val = eval_bare(expr, quo_get_env(quo)), lab = label %||% expr_label(expr) ) } quasi_capture <- function(.quo, .label, .capture, ...) { act <- list() act$lab <- .label %||% quo_label(.quo) act$cap <- .capture(act$val <- eval_bare(quo_get_expr(.quo), quo_get_env(.quo)), ...) act } expr_label <- function(x) { if (is_syntactic_literal(x)) { deparse1(x) } else if (is.name(x)) { paste0("`", as.character(x), "`") } else if (is_call(x)) { chr <- deparse(x) if (length(chr) > 1) { if (is_call(x, "function")) { x[[3]] <- quote(...) } else if (is_call_infix(x)) { left <- deparse(x[[2]], width.cutoff = 29) right <- deparse(x[[3]], width.cutoff = 28) if (length(left) > 1) { x[[2]] <- quote(expr = ...) } if (length(right) > 1) { x[[3]] <- quote(expr = ...) } } else { x <- call2(x[[1]], quote(expr = ...)) } } deparse1(x) } else { # Any other object that's been inlined in x <- deparse(x) if (length(x) > 1) { x <- paste0(x[[1]], "...)") } x } } is_call_infix <- function(x) { if (!is_call(x, n = 2)) { return(FALSE) } fn <- x[[1]] if (!is_symbol(fn)) { return(FALSE) } name <- as_string(fn) base <- c( ":", "::", ":::", "$", "@", "^", "*", "/", "+", "-", ">", ">=", "<", "<=", "==", "!=", "!", "&", "&&", "|", "||", "~", "<-", "<<-" ) name %in% base || grepl("^%.*%$", name) } testthat/R/reporter-zzz.R0000644000176200001440000000533014164710002015127 0ustar liggesusers#' Get and set active reporter. #' #' `get_reporter()` and `set_reporter()` access and modify the current "active" #' reporter. Generally, these functions should not be called directly; instead #' use `with_reporter()` to temporarily change, then reset, the active reporter. #' #' #' @param reporter Reporter to use to summarise output. Can be supplied #' as a string (e.g. "summary") or as an R6 object #' (e.g. `SummaryReporter$new()`). #' #' See [Reporter] for more details and a list of built-in reporters. #' @param code Code to execute. #' @return `with_reporter()` invisible returns the reporter active when `code` #' was evaluated. #' @param start_end_reporter Should the reporters `start_reporter()` and #' `end_reporter()` methods be called? For expert use only. #' @keywords internal #' @name reporter-accessors NULL #' @rdname reporter-accessors #' @export set_reporter <- function(reporter) { old <- testthat_env$reporter testthat_env$reporter <- reporter invisible(old) } #' @rdname reporter-accessors #' @export get_reporter <- function() { testthat_env$reporter } #' @rdname reporter-accessors #' @export with_reporter <- function(reporter, code, start_end_reporter = TRUE) { reporter <- find_reporter(reporter) old <- set_reporter(reporter) on.exit(set_reporter(old), add = TRUE) if (start_end_reporter) { reporter$start_reporter() } tryCatch(code, testthat_abort_reporter = function(cnd) { cat(conditionMessage(cnd), "\n") NULL }) if (start_end_reporter) { reporter$end_reporter() } invisible(reporter) } stop_reporter <- function(message) { signal(message, "testthat_abort_reporter") abort(message) } #' Find reporter object given name or object. #' #' If not found, will return informative error message. #' Pass a character vector to create a [MultiReporter] composed #' of individual reporters. #' Will return null if given NULL. #' #' @param reporter name of reporter(s), or reporter object(s) #' @keywords internal find_reporter <- function(reporter) { if (is.null(reporter)) return(NULL) if (inherits(reporter, "R6ClassGenerator")) { reporter$new() } else if (inherits(reporter, "Reporter")) { reporter } else if (is.character(reporter)) { if (length(reporter) <= 1L) { find_reporter_one(reporter) } else { MultiReporter$new(reporters = lapply(reporter, find_reporter_one)) } } else { stop("Invalid input", call. = FALSE) } } find_reporter_one <- function(reporter, ...) { stopifnot(is.character(reporter)) name <- reporter substr(name, 1, 1) <- toupper(substr(name, 1, 1)) name <- paste0(name, "Reporter") if (!exists(name)) { stop("Can not find test reporter ", reporter, call. = FALSE) } get(name)$new(...) } testthat/R/snapshot-file.R0000644000176200001440000002036114167646004015224 0ustar liggesusers#' Snapshot testing for whole files #' #' @description #' Whole file snapshot testing is designed for testing objects that don't have #' a convenient textual representation, with initial support for images #' (`.png`, `.jpg`, `.svg`), data frames (`.csv`), and text files #' (`.R`, `.txt`, `.json`, ...). #' #' The first time `expect_snapshot_file()` is run, it will create #' `_snaps/{test}/{name}.{ext}` containing reference output. Future runs will #' be compared to this reference: if different, the test will fail and the new #' results will be saved in `_snaps/{test}/{name}.new.{ext}`. To review #' failures, call [snapshot_review()]. #' #' We generally expect this function to be used via a wrapper that takes care #' of ensuring that output is as reproducible as possible, e.g. automatically #' skipping tests where it's known that images can't be reproduced exactly. #' #' @param path Path to file to snapshot. Optional for #' `announce_snapshot_file()` if `name` is supplied. #' @param name Snapshot name, taken from `path` by default. #' @param binary `r lifecycle::badge("deprecated")` Please use the #' `compare` argument instead. #' @param compare A function used to compare the snapshot files. It should take #' two inputs, the paths to the `old` and `new` snapshot, and return either #' `TRUE` or `FALSE`. This defaults to `compare_file_text` if `name` has #' extension `.r`, `.R`, `.Rmd`, `.md`, or `.txt`, and otherwise uses #' `compare_file_binary`. #' #' `compare_file_binary()` compares byte-by-byte and #' `compare_file_text()` compares lines-by-line, ignoring #' the difference between Windows and Mac/Linux line endings. #' @param variant If not-`NULL`, results will be saved in #' `_snaps/{variant}/{test}/{name}.{ext}`. This allows you to create #' different snapshots for different scenarios, like different operating #' systems or different R versions. #' @inheritParams expect_snapshot #' #' @section Announcing snapshots: #' testthat automatically detects dangling snapshots that have been #' written to the `_snaps` directory but which no longer have #' corresponding R code to generate them. These dangling files are #' automatically deleted so they don't clutter the snapshot #' directory. However we want to preserve snapshot files when the R #' code wasn't executed because of an unexpected error or because of a #' [skip()]. Let testthat know about these files by calling #' `announce_snapshot_file()` before `expect_snapshot_file()`. #' #' @export #' @examples #' #' # To use expect_snapshot_file() you'll typically need to start by writing #' # a helper function that creates a file from your code, returning a path #' save_png <- function(code, width = 400, height = 400) { #' path <- tempfile(fileext = ".png") #' png(path, width = width, height = height) #' on.exit(dev.off()) #' code #' #' path #' } #' path <- save_png(plot(1:5)) #' path #' #' \dontrun{ #' expect_snapshot_file(save_png(hist(mtcars$mpg)), "plot.png") #' } #' #' # You'd then also provide a helper that skips tests where you can't #' # be sure of producing exactly the same output #' expect_snapshot_plot <- function(name, code) { #' # Other packages might affect results #' skip_if_not_installed("ggplot2", "2.0.0") #' # Or maybe the output is different on some operation systems #' skip_on_os("windows") #' # You'll need to carefully think about and experiment with these skips #' #' name <- paste0(name, ".png") #' #' # Announce the file before touching `code`. This way, if `code` #' # unexpectedly fails or skips, testthat will not auto-delete the #' # corresponding snapshot file. #' announce_snapshot_file(name = name) #' #' path <- save_png(code) #' expect_snapshot_file(path, name) #' } expect_snapshot_file <- function(path, name = basename(path), binary = lifecycle::deprecated(), cran = FALSE, compare = NULL, transform = NULL, variant = NULL) { edition_require(3, "expect_snapshot_file()") if (!cran && !interactive() && on_cran()) { skip("On CRAN") } check_variant(variant) snapshotter <- get_snapshotter() if (is.null(snapshotter)) { snapshot_not_available(paste0("New path: ", path)) return(invisible()) } if (!is_missing(binary)) { lifecycle::deprecate_soft( "3.0.3", "expect_snapshot_file(binary = )", "expect_snapshot_file(compare = )" ) compare <- if (binary) compare_file_binary else compare_file_text } if (is.null(compare)) { ext <- tools::file_ext(name) is_text <- ext %in% c("r", "R", "txt", "md", "Rmd") compare <- if (is_text) compare_file_text else compare_file_binary } if (!is.null(transform)) { lines <- brio::read_lines(path) lines <- transform(lines) brio::write_lines(lines, path) } lab <- quo_label(enquo(path)) equal <- snapshotter$take_file_snapshot(name, path, file_equal = compare, variant = variant, trace_env = caller_env() ) hint <- snapshot_review_hint(snapshotter$file, name) expect( equal, sprintf( "Snapshot of %s to '%s' has changed\n%s", lab, paste0(snapshotter$file, "/", name), hint ) ) } #' @rdname expect_snapshot_file #' @export announce_snapshot_file <- function(path, name = basename(path)) { edition_require(3, "announce_snapshot_file()") snapshotter <- get_snapshotter() if (!is.null(snapshotter)) { snapshotter$announce_file_snapshot(name) } } snapshot_review_hint <- function(test, name, ci = on_ci(), check = in_rcmd_check()) { path <- paste0("tests/testthat/_snaps/", test, "/", new_name(name)) paste0( if (check && ci) "* Download and unzip run artifact\n", if (check && !ci) "* Locate check directory\n", if (check) paste0("* Copy '", path, "' to local test directory\n"), if (check) "* ", paste0("Run `testthat::snapshot_review('", test, "/')` to review changes") ) } snapshot_file_equal <- function(snap_test_dir, snap_name, path, file_equal = compare_file_binary, fail_on_new = FALSE, trace_env = NULL) { if (!file.exists(path)) { abort(paste0("`", path, "` not found")) } cur_path <- file.path(snap_test_dir, snap_name) new_path <- new_name(cur_path) if (file.exists(cur_path)) { eq <- file_equal(cur_path, path) if (!eq) { file.copy(path, new_path, overwrite = TRUE) } else { # in case it exists from a previous run unlink(new_path) } eq } else { dir.create(snap_test_dir, showWarnings = FALSE, recursive = TRUE) file.copy(path, cur_path) message <- paste0("Adding new file snapshot: 'tests/testhat/_snaps/", snap_name, "'") if (fail_on_new) { fail(message, trace_env = trace_env) } else { testthat_warn(message) } TRUE } } # Helpers ----------------------------------------------------------------- new_name <- function(x) { pieces <- split_path(x) paste0( pieces$dir, ifelse(pieces$dir == "", "", "/"), pieces$name, ".new.", pieces$ext ) } split_path <- function(path) { dir <- dirname(path) dir[dir == "."] <- "" name <- basename(path) ext_loc <- regexpr(".", name, fixed = TRUE) no_ext <- ext_loc == -1L name_sans_ext <- ifelse(no_ext, name, substr(name, 1, ext_loc - 1)) ext <- ifelse(no_ext, "", substr(name, ext_loc + 1, nchar(name))) list( dir = dir, name = name_sans_ext, ext = ext ) } write_tmp_lines <- function(lines, ext = ".txt", eol = "\n") { path <- tempfile(fileext = ext) brio::write_lines(lines, path, eol = eol) path } local_snap_dir <- function(paths, .env = parent.frame()) { dir <- tempfile() withr::defer(unlink(paths), envir = .env) dirs <- file.path(dir, unique(dirname(paths))) for (d in dirs) { dir.create(d, showWarnings = FALSE, recursive = TRUE) } file.create(file.path(dir, paths)) dir } #' @rdname expect_snapshot_file #' @param old,new Paths to old and new snapshot files. #' @export compare_file_binary <- function(old, new) { old <- brio::read_file_raw(old) new <- brio::read_file_raw(new) identical(old, new) } #' @rdname expect_snapshot_file #' @export compare_file_text <- function(old, new) { old <- brio::read_lines(old) new <- brio::read_lines(new) identical(old, new) } testthat/R/parallel-taskq.R0000644000176200001440000001375514164710002015361 0ustar liggesusers # See https://www.tidyverse.org/blog/2019/09/callr-task-q/ # for a detailed explanation on how the task queue works. # # Changes in this version, compared to the blog post: # * We use data frames instead of tibbles. This requires some caution # and the df_add_row() function below. # * We do not collect the results in a result column, because we # just return them immediately, as we get them. # * We do not need a pop() method, because poll() will just return # every message. PROCESS_DONE <- 200L PROCESS_STARTED <- 201L PROCESS_MSG <- 301L PROCESS_EXITED <- 500L PROCESS_CRASHED <- 501L PROCESS_CLOSED <- 502L PROCESS_FAILURES <- c(PROCESS_EXITED, PROCESS_CRASHED, PROCESS_CLOSED) task_q <- R6::R6Class( "task_q", public = list( initialize = function(concurrency = 4L, ...) { private$start_workers(concurrency, ...) invisible(self) }, list_tasks = function() private$tasks, get_num_waiting = function() sum(!private$tasks$idle & private$tasks$state == "waiting"), get_num_running = function() sum(!private$tasks$idle & private$tasks$state == "running"), get_num_done = function() sum(private$tasks$state == "done"), is_idle = function() sum(!private$tasks$idle) == 0, push = function(fun, args = list(), id = NULL) { if (is.null(id)) id <- private$get_next_id() if (id %in% private$tasks$id) stop("Duplicate task id") before <- which(private$tasks$idle)[1] private$tasks <- df_add_row(private$tasks, .before = before, id = id, idle = FALSE, state = "waiting", fun = I(list(fun)), args = I(list(args)), worker = I(list(NULL)) ) private$schedule() invisible(id) }, poll = function(timeout = 0) { limit <- Sys.time() + timeout as_ms <- function(x) if (x==Inf) -1 else as.integer(as.double(x, "secs") * 1000) repeat{ topoll <- which(private$tasks$state == "running") conns <- lapply( private$tasks$worker[topoll], function(x) x$get_poll_connection()) pr <- processx::poll(conns, as_ms(timeout)) ready <- topoll[pr == "ready"] results <- lapply(ready, function(i) { msg <- private$tasks$worker[[i]]$read() ## TODO: why can this be NULL? if (is.null(msg) || msg$code == PROCESS_MSG) { private$tasks$state[[i]] <- "running" } else if (msg$code == PROCESS_STARTED) { private$tasks$state[[i]] <- "ready" msg <- NULL } else if (msg$code == PROCESS_DONE) { private$tasks$state[[i]] <- "ready" } else if (msg$code %in% PROCESS_FAILURES) { private$handle_error(msg, i) } else { file <- private$tasks$args[[i]][[1]] errmsg <- paste0( "unknown message from testthat subprocess: ", msg$code, ", ", "in file `", file, "`" ) abort( errmsg, test_file = file, class = c("testthat_process_error", "testthat_error") ) } msg }) results <- results[! vapply(results, is.null, logical(1))] private$schedule() if (is.finite(timeout)) timeout <- limit - Sys.time() if (length(results) || timeout < 0) break; } results } ), private = list( tasks = NULL, next_id = 1L, get_next_id = function() { id <- private$next_id private$next_id <- id + 1L paste0(".", id) }, start_workers = function(concurrency, ...) { nl <- I(replicate(concurrency, NULL)) private$tasks <- data.frame( stringsAsFactors = FALSE, id = paste0(".idle-", seq_len(concurrency)), idle = TRUE, state = "running", fun = nl, args = nl, worker = nl) rsopts <- callr::r_session_options(...) for (i in seq_len(concurrency)) { rs <- callr::r_session$new(rsopts, wait = FALSE) private$tasks$worker[[i]] <- rs } }, schedule = function() { ready <- which(private$tasks$state == "ready") if (!length(ready)) return() rss <- private$tasks$worker[ready] private$tasks$worker[ready] <- replicate(length(ready), NULL) private$tasks$state[ready] <- ifelse(private$tasks$idle[ready], "waiting", "done") done <- which(private$tasks$state == "done") if (any(done)) private$tasks <- private$tasks[-done, ] waiting <- which(private$tasks$state == "waiting")[1:length(ready)] private$tasks$worker[waiting] <- rss private$tasks$state[waiting] <- ifelse(private$tasks$idle[waiting], "ready", "running") lapply(waiting, function(i) { if (! private$tasks$idle[i]) { private$tasks$worker[[i]]$call(private$tasks$fun[[i]], private$tasks$args[[i]]) } }) }, handle_error = function(msg, task_no) { inform("\n") # get out of the progress bar, if any fun <- private$tasks$fun[[task_no]] file <- private$tasks$args[[task_no]][[1]] if (is.null(fun)) { msg$err$stdout <- msg$stdout msg$err$stderr <- msg$stderr abort( paste0( "testthat subprocess failed to start, stderr:\n", msg$err$stderr ), test_file = NULL, parent = msg$err, class = c("testthat_process_error", "testthat_error") ) } else { abort( paste0("testthat subprocess exited in file `", file, "`"), test_file = file, parent = msg$err, class = c("testthat_process_error", "testthat_error") ) } } ) ) df_add_row <- function(df, ..., .before = NULL) { before <- .before %||% (nrow(df) + 1L) row <- data.frame(stringsAsFactors = FALSE, ...) if (before > nrow(df)) { rbind(df, row) } else if (before <= 1L) { rbind(row, df) } else { rbind(df[1:(before-1), ], row, df[before:nrow(df), ]) } } silence_r_cmd_check <- function() callr::r_session testthat/R/expect-self-test.R0000644000176200001440000000527114165427540015647 0ustar liggesusers#' Tools for testing expectations #' #' Use these expectations to test other expectations. #' Use `show_failure()` in examples to print the failure message without #' throwing an error. #' #' @param expr Expression that evaluates a single expectation. #' @param message Check that the failure message matches this regexp. #' @param ... Other arguments passed on to [expect_match()]. #' @keywords internal #' @export expect_success <- function(expr) { exp <- capture_expectation(expr) if (is.null(exp)) { fail("no expectation used.") } else if (!expectation_success(exp)) { fail(paste0( "Expectation did not succeed:\n", exp$message )) } else { succeed() } invisible(NULL) } #' @export #' @rdname expect_success expect_failure <- function(expr, message = NULL, ...) { exp <- capture_expectation(expr) if (is.null(exp)) { fail("No expectation used") return() } if (!expectation_failure(exp)) { fail("Expectation did not fail") return() } if (!is.null(message)) { expect_match(exp$message, message, ...) } else { succeed() } invisible(NULL) } expect_skip <- function(code, regexp = NULL) { expect_condition(code, regexp, class = "skip") } #' @export #' @rdname expect_success show_failure <- function(expr) { exp <- capture_expectation(expr) if (!is.null(exp) && expectation_failure(exp)) { cat(crayon::bold("Failed expectation:\n")) cat(exp$message, "\n", sep = "") } invisible() } expect_snapshot_failure <- function(x) { expect_snapshot_error(x, "expectation_failure") } expect_snapshot_reporter <- function(reporter, paths = test_path("reporters/tests.R")) { local_rng_version("3.3") set.seed(1014) # withr::local_seed(1014) expect_snapshot_output( with_reporter(reporter, { for (path in paths) test_one_file(path) }) ) } # to work around https://github.com/r-lib/withr/issues/167 local_rng_version <- function(version, .local_envir = parent.frame()) { withr::defer(RNGversion(as.character(getRversion())), envir = .local_envir) suppressWarnings(RNGversion(version)) } # Use specifically for testthat tests in order to override the # defaults found when starting the reporter local_output_override <- function(width = 80, crayon = TRUE, unicode = TRUE, .env = parent.frame()) { reporter <- get_reporter() if (is.null(reporter)) { return() } old_width <- reporter$width old_crayon <- reporter$crayon old_unicode <- reporter$unicode reporter$width <- width reporter$crayon <- crayon reporter$unicode <- unicode withr::defer({ reporter$width <- old_width reporter$crayon <- old_crayon reporter$unicode <- old_unicode }, .env) } testthat/R/evaluate-promise.R0000644000176200001440000000255114165635513015734 0ustar liggesusers#' Evaluate a promise, capturing all types of output. #' #' @param code Code to evaluate. #' @keywords internal #' @export #' @return A list containing #' \item{result}{The result of the function} #' \item{output}{A string containing all the output from the function} #' \item{warnings}{A character vector containing the text from each warning} #' \item{messages}{A character vector containing the text from each message} #' @examples #' evaluate_promise({ #' print("1") #' message("2") #' warning("3") #' 4 #' }) evaluate_promise <- function(code, print = FALSE) { warnings <- Stack$new() handle_warning <- function(condition) { warnings$push(condition) maybe_restart("muffleWarning") } messages <- Stack$new() handle_message <- function(condition) { messages$push(condition) maybe_restart("muffleMessage") } path <- withr::local_tempfile() result <- withr::with_output_sink( path, withCallingHandlers( withVisible(code), warning = handle_warning, message = handle_message ) ) if (result$visible && print) { withr::with_output_sink(path, print(result$value), append = TRUE) } output <- paste0(brio::read_lines(path), collapse = "\n") list( result = result$value, output = output, warnings = get_messages(warnings$as_list()), messages = get_messages(messages$as_list()) ) } testthat/R/deprec-condition.R0000644000176200001440000000741414164710002015665 0ustar liggesusers new_capture <- function(class) { exiting_handlers <- rep_named(class, list(identity)) calling_handlers <- rep_named(class, alist(function(cnd) { if (can_entrace(cnd)) { cnd <- cnd_entrace(cnd) } return_from(env, cnd) })) formals <- pairlist2(code = , entrace = FALSE) # R CMD check global variable NOTE code <- entrace <- NULL body <- expr({ if (!entrace) { return(tryCatch({ code; NULL }, !!!exiting_handlers)) } env <- environment() withCallingHandlers({ code; NULL }, !!!calling_handlers) }) new_function(formals, body, ns_env("testthat")) } #' Capture conditions, including messages, warnings, expectations, and errors. #' #' @description #' `r lifecycle::badge("superseded")` #' #' These functions allow you to capture the side-effects of a function call #' including printed output, messages and warnings. We no longer recommend #' that you use these functions, instead relying on the [expect_message()] #' and friends to bubble up unmatched conditions. If you just want to silence #' unimportant warnings, use [suppressWarnings()]. #' #' @param code Code to evaluate #' @param entrace Whether to add a [backtrace][rlang::trace_back] to #' the captured condition. #' @return Singular functions (`capture_condition`, `capture_expectation` etc) #' return a condition object. `capture_messages()` and `capture_warnings` #' return a character vector of message text. #' @keywords internal #' @export #' @examples #' f <- function() { #' message("First") #' warning("Second") #' message("Third") #' } #' #' capture_message(f()) #' capture_messages(f()) #' #' capture_warning(f()) #' capture_warnings(f()) #' #' # Condition will capture anything #' capture_condition(f()) capture_condition <- new_capture("condition") #' @export #' @rdname capture_condition capture_error <- new_capture("error") #' @export #' @rdname capture_condition capture_expectation <- new_capture("expectation") #' @export #' @rdname capture_condition capture_message <- new_capture("condition") #' @export #' @rdname capture_condition capture_warning <- new_capture("warning") #' @export #' @rdname capture_condition capture_messages <- function(code) { out <- Stack$new() withCallingHandlers( code, message = function(condition) { out$push(condition) maybe_restart("muffleMessage") } ) get_messages(out$as_list()) } #' @export #' @rdname capture_condition capture_warnings <- function(code) { out <- Stack$new() withCallingHandlers( code, warning = function(condition) { out$push(condition) maybe_restart("muffleWarning") } ) get_messages(out$as_list()) } get_messages <- function(x) { vapply(x, cnd_message, FUN.VALUE = character(1)) } #' Is an error informative? #' #' @description #' `r lifecycle::badge("deprecated")` #' #' `is_informative_error()` is a generic predicate that indicates #' whether testthat users should explicitly test for an error #' class. Since we no longer recommend you do that, this generic #' has been deprecated. #' #' @param x An error object. #' @inheritParams ellipsis::dots_empty #' #' @details #' A few classes are hard-coded as uninformative: #' - `simpleError` #' - `rlang_error` unless a subclass is detected #' - `Rcpp::eval_error` #' - `Rcpp::exception` #' #' @keywords internal #' @export is_informative_error <- function(x, ...) { lifecycle::deprecate_warn("3.0.0", "is_informative_error()") ellipsis::check_dots_empty() if (!inherits(x, "error")) { return(TRUE) } if (inherits(x, c("simpleError", "Rcpp::eval_error", "Rcpp::exception"))) { return(FALSE) } if (inherits_only(x, c("rlang_error", "error", "condition"))) { return(FALSE) } UseMethod("is_informative_error") } #' @export is_informative_error.default <- function(x, ...) { TRUE } testthat/R/expect-vector.R0000644000176200001440000000224414164710002015223 0ustar liggesusers#' Does code return a vector with the expected size and/or prototype? #' #' `expect_vector()` is a thin wrapper around [vctrs::vec_assert()], converting #' the results of that function in to the expectations used by testthat. This #' means that it used the vctrs of `ptype` (prototype) and `size`. See #' details in #' #' @inheritParams expect_that #' @param ptype (Optional) Vector prototype to test against. Should be a #' size-0 (empty) generalised vector. #' @param size (Optional) Size to check for. #' @export #' @examples #' if (requireNamespace("vctrs") && packageVersion("vctrs") > "0.1.0.9002") { #' expect_vector(1:10, ptype = integer(), size = 10) #' show_failure(expect_vector(1:10, ptype = integer(), size = 5)) #' show_failure(expect_vector(1:10, ptype = character(), size = 5)) #' } expect_vector <- function(object, ptype = NULL, size = NULL) { act <- quasi_label(enquo(object), arg = "object") message <- NULL tryCatch( vctrs::vec_assert(act$val, ptype = ptype, size = size, arg = act$lab), vctrs_error_assert = function(e) { message <<- e$message } ) expect(is.null(message), message) } testthat/R/test-env.R0000644000176200001440000000253014164710003014177 0ustar liggesusers#' Determine testing status #' #' @description #' * `is_testing()` determine if code is being run as part of a test #' * `is_parallel()` if the test is being run in parallel. #' * `testing_package()` gives name of the package being tested. #' #' These are thin wrappers that retrieve the values of environment variables. #' To avoid creating a run-time dependency on testthat, you can inline the #' source of these functions directly into your package. #' #' @export is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } #' @export #' @rdname is_testing is_parallel <- function() { identical(Sys.getenv("TESTTHAT_IS_PARALLEL"), "true") } #' @export #' @rdname is_testing testing_package <- function() { Sys.getenv("TESTTHAT_PKG") } #' Generate default testing environment. #' #' We use a new environment which inherits from [globalenv()] or a package #' namespace. In an ideal world, we'd avoid putting the global environment on #' the search path for tests, but it's not currently possible without losing #' the ability to load packages in tests. #' #' @keywords internal #' @export test_env <- function(package = NULL) { if (is.null(package)) { child_env(globalenv()) } else { # Must clone environment so that during R CMD check, it's not locked # preventing creation of S4 classes env_clone(asNamespace(package)) } } testthat/R/expect-constant.R0000644000176200001440000000423714164710002015556 0ustar liggesusers#' Does code return `TRUE` or `FALSE`? #' #' These are fall-back expectations that you can use when none of the other #' more specific expectations apply. The disadvantage is that you may get #' a less informative error message. #' #' Attributes are ignored. #' #' @seealso [is_false()] for complement #' @inheritParams expect_that #' @family expectations #' @examples #' expect_true(2 == 2) #' # Failed expectations will throw an error #' \dontrun{ #' expect_true(2 != 2) #' } #' expect_true(!(2 != 2)) #' # or better: #' expect_false(2 != 2) #' #' a <- 1:3 #' expect_true(length(a) == 3) #' # but better to use more specific expectation, if available #' expect_equal(length(a), 3) #' @name logical-expectations NULL #' @export #' @rdname logical-expectations expect_true <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") act$val <- as.vector(act$val) expect_waldo_constant(act, TRUE, info = info) } #' @export #' @rdname logical-expectations expect_false <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") act$val <- as.vector(act$val) expect_waldo_constant(act, FALSE, info = info) } #' Does code return `NULL`? #' #' This is a special case because `NULL` is a singleton so it's possible #' check for it either with `expect_equal(x, NULL)` or `expect_type(x, "NULL")`. #' #' @inheritParams expect_that #' @keywords internal #' @export #' @family expectations #' @examples #' x <- NULL #' y <- 10 #' #' expect_null(x) #' show_failure(expect_null(y)) expect_null <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") expect_waldo_constant(act, NULL, info = info) } # helpers ----------------------------------------------------------------- expect_waldo_constant <- function(act, constant, info) { comp <- waldo_compare(act$val, constant, x_arg = "actual", y_arg = "expected") expect( identical(act$val, constant), sprintf( "%s is not %s\n\n%s", act$lab, deparse(constant), paste0(comp, collapse = "\n\n") ), info = info, trace_env = caller_env() ) invisible(act$val) } testthat/R/source.R0000644000176200001440000000430414165635513013750 0ustar liggesusers#' Source a file, directory of files, or various important subsets #' #' These are used by [test_dir()] and friends #' #' @param path Path to files. #' @param pattern Regular expression used to filter files. #' @param env Environment in which to evaluate code. #' @param chdir Change working directory to `dirname(path)`? #' @param wrap Automatically wrap all code within [test_that()]? This ensures #' that all expectations are reported, even if outside a test block. #' @export #' @keywords internal source_file <- function(path, env = test_env(), chdir = TRUE, wrap = TRUE) { stopifnot(file.exists(path)) stopifnot(is.environment(env)) lines <- brio::read_lines(path) srcfile <- srcfilecopy(path, lines, file.info(path)[1, "mtime"], isFile = TRUE) ## We need to parse from a connection, because parse() has a bug, ## and converts the input to the native encoding, if the text arg is used con <- textConnection(lines, encoding = "UTF-8") on.exit(try(close(con), silent = TRUE), add = TRUE) exprs <- parse(con, n = -1, srcfile = srcfile, encoding = "UTF-8") n <- length(exprs) if (n == 0L) return(invisible()) if (chdir) { old_dir <- setwd(dirname(path)) on.exit(setwd(old_dir), add = TRUE) } withr::local_options(testthat_topenv = env) if (wrap) { invisible(test_code(NULL, exprs, env)) } else { invisible(eval(exprs, env)) } } #' @rdname source_file #' @export source_dir <- function(path, pattern = "\\.[rR]$", env = test_env(), chdir = TRUE, wrap = TRUE) { files <- normalizePath(sort(dir(path, pattern, full.names = TRUE))) lapply(files, source_file, env = env, chdir = chdir, wrap = wrap) } #' @rdname source_file #' @export source_test_helpers <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^helper.*\\.[rR]$", env = env, wrap = FALSE) } #' @rdname source_file #' @export source_test_setup <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^setup.*\\.[rR]$", env = env, wrap = FALSE) } #' @rdname source_file #' @export source_test_teardown <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^teardown.*\\.[rR]$", env = env, wrap = FALSE) } testthat/R/describe.R0000644000176200001440000000546014164710002014216 0ustar liggesusers#' describe: a BDD testing language #' #' A simple BDD DSL for writing tests. The language is similiar to RSpec for #' Ruby or Mocha for JavaScript. BDD tests read like sentences and it should #' thus be easier to understand what the specification of a function/component #' is. #' #' Tests using the `describe` syntax not only verify the tested code, but #' also document its intended behaviour. Each `describe` block specifies a #' larger component or function and contains a set of specifications. A #' specification is defined by an `it` block. Each `it` block #' functions as a test and is evaluated in its own environment. You #' can also have nested `describe` blocks. #' #' #' This test syntax helps to test the intended behaviour of your code. For #' example: you want to write a new function for your package. Try to describe #' the specification first using `describe`, before your write any code. #' After that, you start to implement the tests for each specification (i.e. #' the `it` block). #' #' Use `describe` to verify that you implement the right things and use #' [test_that()] to ensure you do the things right. #' #' @param description description of the feature #' @param code test code containing the specs #' @export #' @examples #' describe("matrix()", { #' it("can be multiplied by a scalar", { #' m1 <- matrix(1:4, 2, 2) #' m2 <- m1 * 2 #' expect_equal(matrix(1:4 * 2, 2, 2), m2) #' }) #' it("can have not yet tested specs") #' }) #' #' # Nested specs: #' ## code #' addition <- function(a, b) a + b #' division <- function(a, b) a / b #' #' ## specs #' describe("math library", { #' describe("addition()", { #' it("can add two numbers", { #' expect_equal(1 + 1, addition(1, 1)) #' }) #' }) #' describe("division()", { #' it("can divide two numbers", { #' expect_equal(10 / 2, division(10, 2)) #' }) #' it("can handle division by 0") #not yet implemented #' }) #' }) describe <- function(description, code) { is_invalid_description <- function(description) { !is.character(description) || length(description) != 1 || nchar(description) == 0 } if (is_invalid_description(description)) { stop("description must be a string of at least length 1") } # prepares a new environment for each it-block describe_environment <- new.env(parent = parent.frame()) describe_environment$it <- function(it_description, it_code = NULL) { if (is_invalid_description(it_description)) { stop("it-description must be a string of at least length 1") } if (missing(it_code)) return() test_description <- paste0(description, ": ", it_description) test_code( test_description, substitute(it_code), env = describe_environment, skip_on_empty = FALSE ) } eval(substitute(code), describe_environment) invisible() } testthat/R/expectations-matches.R0000644000176200001440000000604714164710002016570 0ustar liggesusers#' Does a string match a regular expression? #' #' @details #' `expect_match()` is a wrapper around [grepl()]. See its documentation for #' more detail about the individual arguments. `expect_no_match()` provides #' the complementary case, checking that a string *does not* match a regular #' expression. #' #' @inheritParams expect_that #' @inheritParams base::grepl #' @param regexp Regular expression to test against. #' @param all Should all elements of actual value match `regexp` (TRUE), #' or does only one need to match (FALSE). #' @inheritDotParams base::grepl -pattern -x -perl -fixed #' @family expectations #' @keywords internal #' @export #' @examples #' expect_match("Testing is fun", "fun") #' expect_match("Testing is fun", "f.n") #' expect_no_match("Testing is fun", "horrible") #' #' \dontrun{ #' expect_match("Testing is fun", "horrible") #' #' # Zero-length inputs always fail #' expect_match(character(), ".") #' } expect_match <- function(object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL) { # Capture here to avoid environment-related messiness act <- quasi_label(enquo(object), label, arg = "object") stopifnot(is.character(regexp), length(regexp) == 1) stopifnot(is.character(act$val)) if (length(object) == 0) { fail(sprintf("%s is empty.", act$lab), info = info) } expect_match_( act = act, regexp = regexp, perl = perl, fixed = fixed, ..., all = all, info = info, label = label, negate = FALSE ) } #' @describeIn expect_match Check that a string doesn't match a regular #' expression. #' @export expect_no_match <- function(object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL) { # Capture here to avoid environment-related messiness act <- quasi_label(enquo(object), label, arg = "object") stopifnot(is.character(regexp), length(regexp) == 1) stopifnot(is.character(act$val)) if (length(object) == 0) { fail(sprintf("%s is empty.", act$lab), info = info) } expect_match_( act = act, regexp = regexp, perl = perl, fixed = fixed, ..., all = all, info = info, label = label, negate = TRUE ) } expect_match_ <- function(act, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL, negate = FALSE) { matches <- grepl(regexp, act$val, perl = perl, fixed = fixed, ...) condition <- if (negate) !matches else matches escape <- if (fixed) identity else escape_regex if (length(act$val) == 1) { values <- paste0("Actual value: \"", escape(encodeString(act$val)), "\"") } else { values <- paste0( "Actual values:\n", paste0("* ", escape(encodeString(act$val)), collapse = "\n") ) } expect( if (all) all(condition) else any(condition), sprintf( if (negate) "%s does match %s.\n%s" else "%s does not match %s.\n%s", escape(act$lab), encodeString(regexp, quote = '"'), values ), info = info ) invisible(act$val) } testthat/R/snapshot-manage.R0000644000176200001440000001164514166627056015547 0ustar liggesusers#' Snapshot management #' #' * `snapshot_accept()` accepts all modified snapshots. #' * `snapshot_review()` opens a Shiny app that shows a visual diff of each #' modified snapshot. This is particularly useful for whole file snapshots #' created by `expect_snapshot_file()`. #' #' @param files Optionally, filter effects to snapshots from specified files. #' This can be a snapshot name (e.g. `foo` or `foo.md`), a snapshot file name #' (e.g. `testfile/foo.txt`), or a snapshot file directory (e.g. `testfile/`). #' #' @param path Path to tests. #' @export snapshot_accept <- function(files = NULL, path = "tests/testthat") { changed <- snapshot_meta(files, path) if (nrow(changed) == 0) { inform("No snapshots to update") return(invisible()) } inform(c("Updating snapshots:", changed$name)) unlink(changed$cur) file.rename(changed$new, changed$cur) rstudio_tickle() invisible() } #' @rdname snapshot_accept #' @export snapshot_review <- function(files = NULL, path = "tests/testthat") { check_installed("shiny", "snapshot_review()") check_installed("diffviewer", "snapshot_review()") changed <- snapshot_meta(files, path) if (nrow(changed) == 0) { inform("No snapshots to update") return(invisible()) } review_app(changed$name, changed$cur, changed$new) rstudio_tickle() invisible() } review_app <- function(name, old_path, new_path) { stopifnot( length(name) == length(old_path), length(old_path) == length(new_path) ) n <- length(name) case_index <- stats::setNames(seq_along(name), name) handled <- rep(FALSE, n) ui <- shiny::fluidPage(style = "margin: 0.5em", shiny::fluidRow(style = "display: flex", shiny::div(style = "flex: 1 1", shiny::selectInput("cases", NULL, case_index, width = "100%") ), shiny::div(class = "btn-group", style = "margin-left: 1em; flex: 0 0 auto", shiny::actionButton("skip", "Skip"), shiny::actionButton("accept", "Accept", class = "btn-success"), ) ), shiny::fluidRow( diffviewer::visual_diff_output("diff") ) ) server <- function(input, output, session) { i <- shiny::reactive(as.numeric(input$cases)) output$diff <- diffviewer::visual_diff_render({ diffviewer::visual_diff(old_path[[i()]], new_path[[i()]]) }) # Handle buttons - after clicking update move input$cases to next case, # and remove current case (for accept/reject). If no cases left, close app shiny::observeEvent(input$reject, { inform(paste0("Rejecting snapshot: '", new_path[[i()]], "'")) unlink(new_path[[i()]]) update_cases() }) shiny::observeEvent(input$accept, { inform(paste0("Accepting snapshot: '", old_path[[i()]], "'")) file.rename(new_path[[i()]], old_path[[i()]]) update_cases() }) shiny::observeEvent(input$skip, { i <- next_case() shiny::updateSelectInput(session, "cases", selected = i) }) update_cases <- function() { handled[[i()]] <<- TRUE i <- next_case() shiny::updateSelectInput(session, "cases", choices = case_index[!handled], selected = i ) } next_case <- function() { if (all(handled)) { inform("Review complete") shiny::stopApp() return() } # Find next case; remaining <- case_index[!handled] next_cases <- which(remaining > i()) if (length(next_cases) == 0) remaining[[1]] else remaining[[next_cases[[1]]]] } } inform(c( "Starting Shiny app for snapshot review", i = "Use Ctrl + C to quit" )) shiny::runApp( shiny::shinyApp(ui, server), quiet = TRUE, launch.browser = shiny::paneViewer() ) invisible() } # helpers ----------------------------------------------------------------- snapshot_meta <- function(files = NULL, path = "tests/testthat") { all <- dir(file.path(path, "_snaps"), recursive = TRUE, full.names = TRUE) cur <- all[!grepl("\\.new\\.", all)] snap_file <- basename(dirname(cur)) != "_snaps" snap_test <- ifelse(snap_file, basename(dirname(cur)), gsub("\\.md$", "", basename(cur))) if (length(cur) == 0) { new <- character() } else { new <- paste0(tools::file_path_sans_ext(cur), ".new.", tools::file_ext(cur)) new[!file.exists(new)] <- NA } snap_name <- ifelse(snap_file, file.path(snap_test, basename(cur)), basename(cur) ) out <- data.frame( test = snap_test, name = snap_name, cur = cur, new = new, stringsAsFactors = FALSE ) out <- out[!is.na(out$new), , drop = FALSE] out <- out[order(out$test, out$cur), , drop = FALSE] rownames(out) <- NULL if (!is.null(files)) { is_dir <- substr(files, nchar(files), nchar(files)) == "/" dirs <- files[is_dir] files <- files[!is_dir] dirs <- substr(dirs, 1, nchar(dirs) - 1) files <- ifelse(tools::file_ext(files) == "", paste0(files, ".md"), files) out <- out[out$name %in% files | out$test %in% dirs, , drop = FALSE] } out } testthat/R/parallel.R0000644000176200001440000002561214164710002014233 0ustar liggesusers # +-----------------------------+ +-------------------------------+ # | Main R process | | Subprocess 1 | # | +------------------------+ | | +---------------------------+ | # | | test_dir_parallel() | | | | test_file() | | # | | +-------------------+ | | | | +-----------------------+ | | # | | | Event loop |< ------+ | | | SubprocessReporter | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | | | | | | | | | | test_that() | | | | # | | v | | | | | | +-------------------+ | | | # | | +-------------------+ | | | | | | | | | | # | | | Progress2Reporter | | | | | | | v | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | +------------------------+ | |--------| signalCondition() | | | | # +-----------------------------+ | | | | +-------------------+ | | | # | | | +-----------------------+ | | # | | +---------------------------+ | # | +-------------------------------+ # | +-------------------------------+ # |--| Subprocess 2 | # | +-------------------------------+ # | +-------------------------------+ # +--| Subprocess 3 | # +-------------------------------+ # ... # # ## Notes # # * Subprocesses run `callr::r_session` R sessions. They are re-used, # one R session can be used for several test_file() calls. # * Helper and setup files are loaded in the subprocesses after this. # * The main process puts all test files in the task queue, and then # runs an event loop. test_files_parallel <- function( test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_parallel_reporter(), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, wrap = TRUE, # unused, to match test_files signature load_package = c("none", "installed", "source") ) { reporters <- test_files_reporter(reporter) # TODO: support timeouts. 20-30s for each file by default? num_workers <- min(default_num_cpus(), length(test_paths)) inform(paste0( "Starting ", num_workers, " test process", if (num_workers != 1) "es" )) # Set up work queue ------------------------------------------ queue <- NULL withr::defer(queue_teardown(queue)) # Start workers in parallel and add test tasks to queue. queue <- queue_setup( test_paths = test_paths, test_package = test_package, test_dir = test_dir, load_helpers = load_helpers, num_workers = num_workers, load_package = load_package ) with_reporter(reporters$multi, { parallel_updates <- reporter$capabilities$parallel_updates if (parallel_updates) { parallel_event_loop_smooth(queue, reporters) } else { parallel_event_loop_chunky(queue, reporters) } }) test_files_check(reporters$list$get_results(), stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning ) } default_num_cpus <- function() { # Use common option, if set ncpus <- getOption("Ncpus", NULL) if (!is.null(ncpus)) { ncpus <- suppressWarnings(as.integer(ncpus)) if (is.na(ncpus)) abort("`getOption(Ncpus)` must be an integer") return(ncpus) } # Otherwise use env var if set ncpus <- Sys.getenv("TESTTHAT_CPUS", "") if (ncpus != "") { ncpus <- suppressWarnings(as.integer(ncpus)) if (is.na(ncpus)) abort("TESTTHAT_CPUS must be an integer") return(ncpus) } # Otherwise 2 2L } parallel_event_loop_smooth <- function(queue, reporters) { update_interval <- 0.1 next_update <- proc.time()[[3]] + update_interval while (!queue$is_idle()) { # How much time do we have to poll before the next UI update? now <- proc.time()[[3]] poll_time <- max(next_update - now, 0) next_update <- now + update_interval msgs <- queue$poll(poll_time) updated <- FALSE for (x in msgs) { if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { message(m) next } if (m$cmd != "DONE") { reporters$multi$start_file(m$filename) do.call(reporters$multi[[m$cmd]], m$args) updated <- TRUE } } # We need to spin, even if there were no events if (!updated) reporters$multi$update() } } parallel_event_loop_chunky <- function(queue, reporters) { files <- list() while (!queue$is_idle()) { msgs <- queue$poll(Inf) for (x in msgs) { if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { message(m) next } # Record all events until we get end of file, then we replay them all # with the local reporters. This prevents out of order reporting. if (m$cmd != "DONE") { files[[m$filename]] <- append(files[[m$filename]], list(m)) } else { replay_events(reporters$multi, files[[m$filename]]) reporters$multi$end_context_if_started() files[[m$filename]] <- NULL } } } } replay_events <- function(reporter, events) { for (event in events) { do.call(reporter[[event$cmd]], event$args) } } queue_setup <- function(test_paths, test_package, test_dir, num_workers, load_helpers, load_package) { # TODO: observe `load_package`, but the "none" default is not # OK for the subprocess, because it'll not have the tested package if (load_package == "none") load_package <- "source" # TODO: similarly, load_helpers = FALSE, coming from devtools, # is not appropriate in the subprocess load_helpers <- TRUE test_package <- test_package %||% Sys.getenv("TESTTHAT_PKG") # First we load the package "manually", in case it is testthat itself load_hook <- expr({ switch(!!load_package, installed = library(!!test_package, character.only = TRUE), source = pkgload::load_all(!!test_dir, helpers = FALSE, quiet = TRUE) ) asNamespace("testthat")$queue_process_setup( test_package = !!test_package, test_dir = !!test_dir, load_helpers = !!load_helpers, load_package = "none" ) }) queue <- task_q$new(concurrency = num_workers, load_hook = load_hook) fun <- transport_fun(function(path) asNamespace("testthat")$queue_task(path)) for (path in test_paths) { queue$push(fun, list(path)) } queue } queue_process_setup <- function(test_package, test_dir, load_helpers, load_package) { env <- asNamespace("testthat")$test_files_setup_env( test_package, test_dir, load_package ) asNamespace("testthat")$test_files_setup_state( test_dir = test_dir, test_package = test_package, load_helpers = load_helpers, env = env, .env = .GlobalEnv ) # Save test environment in global env where it can easily be retrieved .GlobalEnv$.test_env <- env } queue_task <- function(path) { env <- .GlobalEnv$.test_env withr::local_envvar("TESTTHAT_IS_PARALLEL" = "true") reporters <- test_files_reporter(SubprocessReporter$new()) with_reporter(reporters$multi, test_one_file(path, env = env)) NULL } # Clean up subprocesses: we call teardown methods, but we only give them a # second, before killing the whole process tree using ps's env var marker # method. queue_teardown <- function(queue) { if (is.null(queue)) { return() } tasks <- queue$list_tasks() num <- nrow(tasks) clean_fn <- function() { withr::deferred_run(.GlobalEnv) quit(save = "no", status = 1L, runLast = TRUE) } topoll <- list() for (i in seq_len(num)) { if (!is.null(tasks$worker[[i]])) { # The worker might have crashed or exited, so this might fail. # If it does then we'll just ignore that worker tryCatch({ tasks$worker[[i]]$call(clean_fn) topoll <- c(topoll, tasks$worker[[i]]$get_poll_connection()) }, error = function(e) tasks$worker[i] <- list(NULL)) } } # Give covr time to write out the coverage files if (in_covr()) grace <- 30L else grace <- 3L limit <- Sys.time() + grace while (length(topoll) > 0 && (timeout <- limit - Sys.time()) > 0) { timeout <- as.double(timeout, units = "secs") * 1000 pr <- processx::poll(topoll, as.integer(timeout)) topoll <- topoll[pr != "ready"] } for (i in seq_len(num)) { if (!is.null(tasks$worker[[i]])) { tryCatch( close(tasks$worker[[i]]$get_input_connection()), error = function(e) NULL ) if (ps::ps_is_supported()) { tasks$worker[[i]]$kill_tree() } else { tasks$worker[[i]]$kill() } } } } # Reporter that just forwards events in the subprocess back to the main process # # Ideally, these messages would be throttled, i.e. if the test code # emits many expectation conditions fast, SubprocessReporter should # collect several of them and only emit a condition a couple of times # a second. End-of-test and end-of-file events would be transmitted # immediately. SubprocessReporter <- R6::R6Class("SubprocessReporter", inherit = Reporter, public = list( start_file = function(filename) { private$filename <- filename private$event("start_file", filename) }, start_test = function(context, test) { private$event("start_test", context, test) }, start_context = function(context) { private$event("start_context", context) }, add_result = function(context, test, result) { if (inherits(result, "expectation_success")) { # Strip bulky components to reduce data transfer cost result[["srcref"]] <- NULL result[["trace"]] <- NULL } private$event("add_result", context, test, result) }, end_test = function(context, test) { private$event("end_test", context, test) }, end_context = function(context) { private$event("end_context", context) }, end_file = function() { private$event("end_file") }, end_reporter = function() { private$event("DONE") } ), private = list( filename = NULL, event = function(cmd, ...) { msg <- list( code = PROCESS_MSG, cmd = cmd, filename = private$filename, time = proc.time()[[3]], args = list(...) ) class(msg) <- c("testthat_message", "callr_message", "condition") signalCondition(msg) } ) ) testthat/R/try-again.R0000644000176200001440000000161414164710003014327 0ustar liggesusers#' Try evaluating an expressing multiple times until it succeeds. #' #' @param times Maximum number of attempts. #' @param code Code to evaluate #' @keywords internal #' @export #' @examples #' third_try <- local({ #' i <- 3 #' function() { #' i <<- i - 1 #' if (i > 0) fail(paste0("i is ", i)) #' } #' }) #' try_again(3, third_try()) try_again <- function(times, code) { while (times > 0) { e <- tryCatch( withCallingHandlers( { code NULL }, warning = function(e) { if (identical(e$message, "restarting interrupted promise evaluation")) { maybe_restart("muffleWarning") } } ), expectation_failure = function(e) { e }, error = function(e) { e } ) if (is.null(e)) { return(invisible(TRUE)) } times <- times - 1L } stop(e) } testthat/R/reporter-stop.R0000644000176200001440000000315014164710002015255 0ustar liggesusers#' Test reporter: stop on error #' #' The default reporter used when [expect_that()] is run interactively. #' It responds by [stop()]ping on failures and doing nothing otherwise. This #' will ensure that a failing test will raise an error. #' #' This should be used when doing a quick and dirty test, or during the final #' automated testing of R CMD check. Otherwise, use a reporter that runs all #' tests and gives you more context about the problem. #' #' @export #' @family reporters StopReporter <- R6::R6Class("StopReporter", inherit = Reporter, public = list( failures = NULL, n_fail = 0L, stop_reporter = TRUE, initialize = function(stop_reporter = TRUE) { super$initialize() self$failures <- Stack$new() self$stop_reporter <- stop_reporter }, start_test = function(context, test) { self$failures <- Stack$new() }, add_result = function(context, test, result) { if (expectation_success(result)) { return() } if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 } self$failures$push(result) }, end_test = function(context, test) { self$local_user_output() failures <- self$failures$as_list() if (length(failures) == 0) { self$cat_line(colourise("Test passed", "success"), " ", praise_emoji()) return() } messages <- vapply(failures, issue_summary, rule = TRUE, character(1)) self$cat_line(messages, "\n") }, stop_if_needed = function() { if (self$stop_reporter && self$n_fail > 0) { abort("Test failed") } } ) ) testthat/R/reporter-location.R0000644000176200001440000000136614164710002016107 0ustar liggesusers#' Test reporter: location #' #' This reporter simply prints the location of every expectation and error. #' This is useful if you're trying to figure out the source of a segfault, #' or you want to figure out which code triggers a C/C++ breakpoint #' #' @export #' @family reporters LocationReporter <- R6::R6Class("LocationReporter", inherit = Reporter, public = list( start_test = function(context, test) { self$cat_line("Start test: ", test) }, add_result = function(context, test, result) { status <- expectation_type(result) self$cat_line(" ", expectation_location(result), " [", status, "]") }, end_test = function(context, test) { self$cat_line("End test: ", test) self$cat_line() } ) ) testthat/R/capture-output.R0000644000176200001440000000367714165635513015465 0ustar liggesusers#' Capture output to console #' #' Evaluates `code` in a special context in which all output is captured, #' similar to [capture.output()]. #' #' Results are printed using the `testthat_print()` generic, which defaults #' to `print()`, giving you the ability to customise the printing of your #' object in tests, if needed. #' #' @param code Code to evaluate. #' @param print If `TRUE` and the result of evaluating `code` is #' visible, print the result using `testthat_print()`. #' @param width Number of characters per line of output. This does not #' inherit from `getOption("width")` so that tests always use the same #' output width, minimising spurious differences. #' @return `capture_output()` returns a single string. `capture_output_lines()` #' returns a character vector with one entry for each line #' @keywords internal #' @export #' @examples #' capture_output({ #' cat("Hi!\n") #' cat("Bye\n") #' }) #' #' capture_output_lines({ #' cat("Hi!\n") #' cat("Bye\n") #' }) #' #' capture_output("Hi") #' capture_output("Hi", print = TRUE) capture_output <- function(code, print = FALSE, width = 80) { output <- capture_output_lines(code, print, width = width) paste0(output, collapse = "\n") } #' @export #' @rdname capture_output capture_output_lines <- function(code, print = FALSE, width = 80) { eval_with_output(code, print = print, width = width)$out } eval_with_output <- function(code, print = FALSE, width = 80) { path <- withr::local_tempfile() if (!is.null(width)) { local_width(width) } result <- withr::with_output_sink(path, withVisible(code)) if (result$visible && print) { withr::with_output_sink(path, testthat_print(result$value), append = TRUE) } list( val = result$value, vis = result$visible, out = brio::read_lines(path) ) } #' @export #' @rdname capture_output testthat_print <- function(x) { UseMethod("testthat_print") } #' @export testthat_print.default <- function(x) { print(x) } testthat/R/expect-silent.R0000644000176200001440000000143114164710002015214 0ustar liggesusers#' Does code execute silently? #' #' Checks that the code produces no output, messages, or warnings. #' #' @inheritParams expect_error #' @return The first argument, invisibly. #' @family expectations #' @export #' @examples #' expect_silent("123") #' #' f <- function() { #' message("Hi!") #' warning("Hey!!") #' print("OY!!!") #' } #' \dontrun{ #' expect_silent(f()) #' } expect_silent <- function(object) { act <- quasi_capture(enquo(object), NULL, evaluate_promise) outputs <- c( if (!identical(act$cap$output, "")) "output", if (length(act$cap$warnings) > 0) "warnings", if (length(act$cap$messages) > 0) "messages" ) expect( length(outputs) == 0, sprintf("%s produced %s.", act$lab, paste(outputs, collapse = ", ")) ) invisible(act$cap$result) } testthat/R/reporter-fail.R0000644000176200001440000000126414164710002015207 0ustar liggesusers#' Test reporter: fail at end. #' #' This reporter will simply throw an error if any of the tests failed. It is #' best combined with another reporter, such as the #' [SummaryReporter]. #' #' @export #' @family reporters FailReporter <- R6::R6Class("FailReporter", inherit = Reporter, public = list( failed = FALSE, initialize = function(...) { self$capabilities$parallel_support <- TRUE super$initialize(...) }, add_result = function(context, test, result) { self$failed <- self$failed || expectation_broken(result) }, end_reporter = function() { if (self$failed) { stop("Failures detected.", call. = FALSE) } } ) ) testthat/R/expect-named.R0000644000176200001440000000362314164710002015007 0ustar liggesusers#' Does code return a vector with (given) names? #' #' You can either check for the presence of names (leaving `expected` #' blank), specific names (by supplying a vector of names), or absence of #' names (with `NULL`). #' #' @inheritParams expect_that #' @param expected Character vector of expected names. Leave missing to #' match any names. Use `NULL` to check for absence of names. #' @param ignore.order If `TRUE`, sorts names before comparing to #' ignore the effect of order. #' @param ignore.case If `TRUE`, lowercases all names to ignore the #' effect of case. #' @family expectations #' @export #' @examples #' x <- c(a = 1, b = 2, c = 3) #' expect_named(x) #' expect_named(x, c("a", "b", "c")) #' #' # Use options to control sensitivity #' expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) #' #' # Can also check for the absence of names with NULL #' z <- 1:4 #' expect_named(z, NULL) expect_named <- function(object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") act$names <- names(act$val) if (missing(expected)) { expect( !identical(act$names, NULL), sprintf("%s does not have names.", act$lab) ) } else { exp_names <- normalise_names(expected, ignore.order, ignore.case) act$names <- normalise_names(act$names, ignore.order, ignore.case) expect( identical(act$names, exp_names), sprintf( "Names of %s (%s) don't match %s", act$lab, paste0("'", act$names, "'", collapse = ", "), paste0("'", exp_names, "'", collapse = ", ") ), info = info ) } invisible(act$val) } normalise_names <- function(x, ignore.order = FALSE, ignore.case = FALSE) { if (is.null(x)) return() if (ignore.order) x <- sort(x) if (ignore.case) x <- tolower(x) x } testthat/R/mock.R0000644000176200001440000001117014164710002013362 0ustar liggesusers#' Mock functions in a package. #' #' @description #' `r lifecycle::badge("superseded")` #' #' `with_mock()` and `local_mock()` are superseded in favour of the more #' rigorous techniques found in the [mockr](https://krlmlr.github.io/mockr/) #' and [mockery](https://github.com/r-lib/mockery#mockery) packages. #' #' Mocking allows you to temporary replace the implementation of functions #' within a package, which useful for testing code that relies on functions #' that are slow, have unintended side effects or access resources that may #' not be available when testing. #' #' This works by using some C code to temporarily modify the mocked function #' _in place_. On exit, all functions are restored to their previous state. #' This is somewhat abusive of R's internals so use with care. In particular, #' functions in base packages cannot be mocked; to work aroud you'll need to #' make a wrapper function in your own package.. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `with_mock()` and `local_mock()` are deprecated in the third edition. #' #' @param ... named parameters redefine mocked functions, unnamed parameters #' will be evaluated after mocking the functions #' @param .env the environment in which to patch the functions, #' defaults to the top-level environment. A character is interpreted as #' package name. #' @param .local_env Environment in which to add exit hander. #' For expert use only. #' @keywords internal #' @return The result of the last unnamed parameter #' @references Suraj Gupta (2012): \href{http://blog.obeautifulcode.com/R/How-R-Searches-And-Finds-Stuff/}{How R Searches And Finds Stuff} #' @export with_mock <- function(..., .env = topenv()) { edition_deprecate(3, "with_mock()", "Please use mockr or mockery packages instead" ) dots <- eval(substitute(alist(...))) mock_qual_names <- names(dots) if (all(mock_qual_names == "")) { warning( "Not mocking anything. Please use named parameters to specify the functions you want to mock.", call. = FALSE ) code_pos <- rep(TRUE, length(dots)) } else { code_pos <- (mock_qual_names == "") } code <- dots[code_pos] mock_funs <- lapply(dots[!code_pos], eval, parent.frame()) mocks <- extract_mocks(mock_funs, .env = .env) on.exit(lapply(mocks, reset_mock), add = TRUE) lapply(mocks, set_mock) # Evaluate the code if (length(code) > 0) { for (expression in code[-length(code)]) { eval(expression, parent.frame()) } # Isolate last item for visibility eval(code[[length(code)]], parent.frame()) } } #' @export #' @rdname with_mock local_mock <- function(..., .env = topenv(), .local_envir = parent.frame()) { edition_deprecate(3, "local_mock()", "Please use mockr or mockery packages instead" ) mocks <- extract_mocks(list(...), .env = .env) on_exit <- bquote( on.exit(lapply(.(mocks), .(reset_mock)), add = TRUE), ) lapply(mocks, set_mock) eval_bare(on_exit, .local_envir) invisible() } pkg_rx <- ".*[^:]" colons_rx <- "::(?:[:]?)" name_rx <- ".*" pkg_and_name_rx <- sprintf("^(?:(%s)%s)?(%s)$", pkg_rx, colons_rx, name_rx) extract_mocks <- function(funs, .env) { if (is.environment(.env)) { .env <- environmentName(.env) } mock_qual_names <- names(funs) lapply( stats::setNames(nm = mock_qual_names), function(qual_name) { pkg_name <- gsub(pkg_and_name_rx, "\\1", qual_name) if (is_base_pkg(pkg_name)) { stop( "Can't mock functions in base packages (", pkg_name, ")", call. = FALSE ) } name <- gsub(pkg_and_name_rx, "\\2", qual_name) if (pkg_name == "") { pkg_name <- .env } env <- asNamespace(pkg_name) if (!exists(name, envir = env, mode = "function")) { stop("Function ", name, " not found in environment ", environmentName(env), ".", call. = FALSE ) } mock(name = name, env = env, new = funs[[qual_name]]) } ) } mock <- function(name, env, new) { target_value <- get(name, envir = env, mode = "function") structure( list( env = env, name = as.name(name), orig_value = .Call(duplicate_, target_value), target_value = target_value, new_value = new ), class = "mock" ) } set_mock <- function(mock) { .Call(reassign_function, mock$name, mock$env, mock$target_value, mock$new_value) } reset_mock <- function(mock) { .Call(reassign_function, mock$name, mock$env, mock$target_value, mock$orig_value) } is_base_pkg <- function(x) { x %in% rownames(utils::installed.packages(priority = "base")) } test_mock1 <- function() { test_mock2() } test_mock2 <- function() 10 testthat/R/expect-that.R0000644000176200001440000000522014164710002014656 0ustar liggesusers#' Expect that a condition holds. #' #' @description #' `r lifecycle::badge("superseded")` #' #' An old style of testing that's no longer encouraged. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' This style of testing is formally deprecated as of the 3rd edition. #' Use a more specific `expect_` function instead. #' #' @param object Object to test. #' #' Supports limited unquoting to make it easier to generate readable failures #' within a function or for loop. See [quasi_label] for more details. #' @param condition, a function that returns whether or not the condition #' is met, and if not, an error message to display. #' @param label Used to customise failure messages. For expert use only. #' @param info Extra information to be included in the message. This argument #' is soft-deprecated and should not be used in new code. Instead see #' alternatives in [quasi_label]. #' @return the (internal) expectation result as an invisible list #' @keywords internal #' @export #' @seealso [fail()] for an expectation that always fails. #' @examples #' expect_that(5 * 2, equals(10)) #' expect_that(sqrt(2) ^ 2, equals(2)) #' \dontrun{ #' expect_that(sqrt(2) ^ 2, is_identical_to(2)) #' } expect_that <- function(object, condition, info = NULL, label = NULL) { edition_deprecate(3, "expect_that()") condition(object) } #' Default expectations that always succeed or fail. #' #' These allow you to manually trigger success or failure. Failure is #' particularly useful to a pre-condition or mark a test as not yet #' implemented. #' #' @param message a string to display. #' @inheritParams expect #' @export #' @examples #' \dontrun{ #' test_that("this test fails", fail()) #' test_that("this test succeeds", succeed()) #' } fail <- function(message = "Failure has been forced", info = NULL, trace_env = caller_env()) { expect(FALSE, message, info = info, trace_env = trace_env) } #' @rdname fail #' @export succeed <- function(message = "Success has been forced", info = NULL) { expect(TRUE, message, info = info) } #' Negate an expectation #' #' This negates an expectation, making it possible to express that you #' want the opposite of a standard expectation. This function is deprecated #' and will be removed in a future version. #' #' @param f an existing expectation function #' @keywords internal #' @export not <- function(f) { warning("`not()` is deprecated.", call. = FALSE) stopifnot(is.function(f)) negate <- function(expt) { expect( !expectation_success(expt), failure_message = paste0("NOT(", expt$message, ")"), srcref = expt$srcref ) } function(...) { negate(capture_expectation(f(...))) } } testthat/R/teardown.R0000644000176200001440000000376714164710003014272 0ustar liggesusersfile_teardown_env <- new.env(parent = emptyenv()) file_teardown_env$queue <- list() #' Run code before/after tests #' #' @description #' `r lifecycle::badge("superseded")` #' #' We no longer recommend using `setup()` and `teardown()`; instead #' we think it's better practice to use a **test fixture** as described in #' `vignette("test-fixtures")`. #' #' Code in a `setup()` block is run immediately in a clean environment. #' Code in a `teardown()` block is run upon completion of a test file, #' even if it exits with an error. Multiple calls to `teardown()` will be #' executed in the order they were created. #' #' @param code Code to evaluate #' @param env Environment in which code will be evaluated. For expert #' use only. #' @export #' @keywords internal #' @examples #' \dontrun{ #' # Old approach #' tmp <- tempfile() #' setup(writeLines("some test data", tmp)) #' teardown(unlink(tmp)) #' } #' #' # Now recommended: #' local_test_data <- function(env = parent.frame()) { #' tmp <- tempfile() #' writeLines("some test data", tmp) #' withr::defer(unlink(tmp), env) #' #' tmp #' } #' # Then call local_test_data() in your tests teardown <- function(code, env = parent.frame()) { edition_deprecate(3, "teardown()", "Please use test fixtures instead see vignette('test-fixtures') for details" ) fun <- new_function(list(), enexpr(code), env = env) file_teardown_env$queue <- append(file_teardown_env$queue, fun) invisible() } #' @export #' @rdname teardown setup <- function(code, env = parent.frame()) { edition_deprecate(3, "setup()", "Please use test fixtures instead see vignette('test-fixtures') for details" ) out <- eval_tidy(enquo(code), env = env) invisible(out) } teardown_reset <- function() { file_teardown_env$queue <- list() } teardown_run <- function(path = ".") { if (length(file_teardown_env$queue) == 0) return() old_dir <- setwd(path) on.exit(setwd(old_dir), add = TRUE) lapply(file_teardown_env$queue, function(f) try(f())) teardown_reset() gc() } testthat/R/colour-text.R0000644000176200001440000000102314164710002014712 0ustar liggesuserscolourise <- function(text, as = c("success", "skip", "warning", "failure", "error")) { if (has_colour()) { crayon::style(text, testthat_style(as)) } else { text } } has_colour <- function() { isTRUE(getOption("testthat.use_colours", TRUE)) && crayon::has_color() } testthat_style <- function(type = c("success", "skip", "warning", "failure", "error")) { type <- match.arg(type) c( success = "green", skip = "blue", warning = "magenta", failure = "orange", error = "orange" )[[type]] } testthat/R/auto-test.R0000644000176200001440000001155514164710002014365 0ustar liggesusers#' Watches code and tests for changes, rerunning tests as appropriate. #' #' The idea behind `auto_test()` is that you just leave it running while #' you develop your code. Everytime you save a file it will be automatically #' tested and you can easily see if your changes have caused any test #' failures. #' #' The current strategy for rerunning tests is as follows: #' #' - if any code has changed, then those files are reloaded and all tests #' rerun #' - otherwise, each new or modified test is run #' #' In the future, `auto_test()` might implement one of the following more #' intelligent alternatives: #' #' - Use codetools to build up dependency tree and then rerun tests only #' when a dependency changes. #' - Mimic ruby's autotest and rerun only failing tests until they pass, #' and then rerun all tests. # #' @seealso [auto_test_package()] #' @export #' @param code_path path to directory containing code #' @param test_path path to directory containing tests #' @param reporter test reporter to use #' @param env environment in which to execute test suite. #' @param hash Passed on to [watch()]. When FALSE, uses less accurate #' modification time stamps, but those are faster for large files. #' @keywords debugging auto_test <- function(code_path, test_path, reporter = default_reporter(), env = test_env(), hash = TRUE) { reporter <- find_reporter(reporter) code_path <- normalizePath(code_path) test_path <- normalizePath(test_path) # Start by loading all code and running all tests source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") test_files(tests, env = env, reporter = reporter$clone(deep = TRUE)) } TRUE } watch(c(code_path, test_path), watcher, hash = hash) } #' Watches a package for changes, rerunning tests as appropriate. #' #' @param pkg path to package #' @export #' @param reporter test reporter to use #' @param hash Passed on to [watch()]. When FALSE, uses less accurate #' modification time stamps, but those are faster for large files. #' @keywords debugging #' @seealso [auto_test()] for details on how method works auto_test_package <- function(pkg = ".", reporter = default_reporter(), hash = TRUE) { reporter <- find_reporter(reporter) path <- pkgload::pkg_path(pkg) package <- pkgload::pkg_name(path) code_path <- file.path(path, c("R", "src")) code_path <- code_path[file.exists(code_path)] code_path <- normalizePath(code_path) test_path <- normalizePath(file.path(path, "tests", "testthat")) # Start by loading all code and running all tests withr::local_envvar("NOT_CRAN" = "true") pkgload::load_all(path) test_dir(test_path, package = package, reporter = reporter$clone(deep = TRUE), stop_on_failure = FALSE) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] # Remove helper from test and add it to code (if a helper changed, # like for code, reload all and rerun all tests) helper <- tests[starts_with(basename(tests), "helper-")] tests <- setdiff(tests, helper) code <- c(code, helper) if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") pkgload::load_all(path, quiet = TRUE) test_dir(test_path, package = package, reporter = reporter$clone(deep = TRUE)) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") env <- env_clone(asNamespace(package)) test_files( test_dir = test_path, test_package = package, test_paths = tests, env = env, reporter = reporter$clone(deep = TRUE) ) } TRUE } watch(c(code_path, test_path), watcher, hash = hash) } # Helpers ----------------------------------------------------------------- starts_with <- function(string, prefix) { substr(string, 1, nchar(prefix)) == prefix } testthat/R/old-school.R0000644000176200001440000000712514164710002014501 0ustar liggesusers#' Old-style expectations. #' #' @description #' `r lifecycle::badge("superseded")` #' #' Initial testthat used a style of testing that looked like #' `expect_that(a, equals(b)))` this allowed expectations to read like #' English sentences, but was verbose and a bit too cutesy. This style #' will continue to work but has been soft-deprecated - it is no longer #' documented, and new expectations will only use the new style #' `expect_equal(a, b)`. #' #' @name oldskool #' @keywords internal NULL #' @export #' @rdname oldskool is_null <- function() { warning( "`is_null()` is deprecated. Please use `expect_null()` instead.", call. = FALSE ) function(x) expect_null(x) } #' @export #' @rdname oldskool is_a <- function(class) { function(x) expect_is(x, class) } #' @export #' @rdname oldskool is_true <- function() { function(x) { warning( "`is_true()` is deprecated. Please use `expect_true()` instead.", call. = FALSE ) expect_true(x) } } #' @export #' @rdname oldskool is_false <- function() { function(x) { warning( "`is_false()` is deprecated. Please use `expect_false()` instead.", call. = FALSE ) expect_false(x) } } #' @export #' @rdname oldskool has_names <- function(expected, ignore.order = FALSE, ignore.case = FALSE) { function(x) { expect_named(x, expected = expected, ignore.order = ignore.order, ignore.case = ignore.case) } } #' @export #' @rdname oldskool is_less_than <- function(expected, label = NULL, ...) { function(x) expect_lt(x, expected) } #' @export #' @rdname oldskool is_more_than <- function(expected, label = NULL, ...) { function(x) expect_gt(x, expected) } #' @export #' @rdname oldskool equals <- function(expected, label = NULL, ...) { function(x) expect_equal(x, expected, ..., expected.label = label) } #' @export #' @rdname oldskool is_equivalent_to <- function(expected, label = NULL) { function(x) expect_equivalent(x, expected, expected.label = label) } #' @export #' @rdname oldskool is_identical_to <- function(expected, label = NULL) { function(x) expect_identical(x, expected, expected.label = label) } #' @export #' @rdname oldskool equals_reference <- function(file, label = NULL, ...) { function(x) expect_known_value(x, file, expected.label = label, ...) } #' @export #' @rdname oldskool shows_message <- function(regexp = NULL, all = FALSE, ...) { function(x) expect_message(x, regexp = regexp, all = all, ...) } #' @export #' @rdname oldskool gives_warning <- function(regexp = NULL, all = FALSE, ...) { function(x) expect_warning(x, regexp = regexp, all = all, ...) } #' @export #' @rdname oldskool prints_text <- function(regexp = NULL, ...) { function(x) expect_output(x, regexp, ...) } #' @export #' @rdname oldskool throws_error <- function(regexp = NULL, ...) { function(x) expect_error(x, regexp, ...) } #' @export #' @rdname oldskool matches <- function(regexp, all = TRUE, ...) { warning( "`matches()` is deprecated. Please use `expect_match()` instead.", call. = FALSE ) function(x) expect_match(x, regexp, all = all, ...) } #' Does code take less than the expected amount of time to run? #' #' This is useful for performance regression testing. #' #' @keywords internal #' @export #' @param amount maximum duration in seconds takes_less_than <- function(amount) { warning( "takes_less_than() is deprecated because it is stochastic and unreliable", call. = FALSE ) function(expr) { duration <- system.time(force(expr))["elapsed"] expect( duration < amount, paste0("took ", duration, " seconds, which is more than ", amount) ) } } testthat/R/reporter-teamcity.R0000644000176200001440000000422114164710002016107 0ustar liggesusers#' Test reporter: Teamcity format. #' #' This reporter will output results in the Teamcity message format. #' For more information about Teamcity messages, see #' http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity #' #' @export #' @family reporters TeamcityReporter <- R6::R6Class("TeamcityReporter", inherit = Reporter, public = list( i = NA_integer_, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE }, start_context = function(context) { private$report_event("testSuiteStarted", context) }, end_context = function(context) { private$report_event("testSuiteFinished", context) self$cat_line() self$cat_line() }, start_test = function(context, test) { private$report_event("testSuiteStarted", test) self$i <- 1L }, end_test = function(context, test) { private$report_event("testSuiteFinished", test) self$cat_line() }, add_result = function(context, test, result) { testName <- paste0("expectation ", self$i) self$i <- self$i + 1L if (expectation_skip(result)) { private$report_event("testIgnored", testName, message = format(result)) return() } private$report_event("testStarted", testName) if (!expectation_ok(result)) { lines <- strsplit(format(result), "\n")[[1]] private$report_event( "testFailed", testName, message = lines[1], details = paste(lines[-1], collapse = "\n") ) } private$report_event("testFinished", testName) } ), private = list( report_event = function(event, name, ...) { values <- list(name = name, ...) values <- vapply(values, teamcity_escape, character(1)) if (length(values) == 0) { value_string <- "" } else { value_string <- paste0(names(values), "='", values, "'", collapse = " ") } self$cat_line("##teamcity[", event, " ", value_string, "]") } ) ) # teamcity escape character is | teamcity_escape <- function(s) { s <- gsub("(['|]|\\[|\\])", "|\\1", s) gsub("\n", "|n", s) } testthat/R/snapshot-file-snaps.R0000644000176200001440000000505714166627056016360 0ustar liggesusers# Manage a test files worth of snapshots - if the test file uses variants, this # will correspond to multiple output files. FileSnaps <- R6::R6Class("FileSnaps", public = list( snap_path = NULL, file = NULL, type = NULL, snaps = NULL, initialize = function(snap_path, file, type = c("old", "cur", "new")) { self$snap_path <- snap_path self$file <- file self$type <- arg_match(type) if (self$type == "old") { # Find variants variants <- c("_default", dirs(self$snap_path)) paths <- set_names(self$path(variants), variants) paths <- paths[file.exists(paths)] self$snaps <- lapply(paths, read_snaps) } else { self$snaps <- list(`_default` = list()) } }, get = function(test, variant, i) { test_snaps <- self$snaps[[variant]][[test]] if (i > length(test_snaps)) { NULL } else { test_snaps[[i]] } }, set = function(test, variant, i, data) { self$snaps[[variant]][[test]][[i]] <- data }, append = function(test, variant, data) { if (!has_name(self$snaps, variant)) { # Needed for R < 3.6 self$snaps[[variant]] <- list() } self$snaps[[variant]][[test]] <- c(self$snaps[[variant]][[test]], data) length(self$snaps[[variant]][[test]]) }, reset = function(test, old) { for (variant in names(self$snaps)) { cur_test <- self$snaps[[variant]][[test]] old_test <- old$snaps[[variant]][[test]] if (length(cur_test) == 0) { self$snaps[[variant]][[test]] <- old_test } else if (length(old_test) > length(cur_test)) { self$snaps[[variant]][[test]] <- c(cur_test, old_test[-seq_along(cur_test)]) } } invisible() }, write = function(variants = names(self$snaps)) { for (variant in variants) { default <- variant == "_default" if (!default) { dir.create(file.path(self$snap_path, variant), showWarnings = FALSE) } write_snaps( self$snaps[[variant]], self$path(variant), delete = default ) } invisible() }, delete = function(variant = "_default") { unlink(self$path(variant)) invisible() }, variants = function() { names(self$snaps) }, filename = function() { paste0(self$file, if (self$type == "new") ".new", ".md") }, path = function(variant = "_default") { ifelse(variant == "_default", file.path(self$snap_path, self$filename()), file.path(self$snap_path, variant, self$filename()) ) } )) dirs <- function(path) { list.dirs(path, recursive = FALSE, full.names = FALSE) } testthat/R/test-path.R0000644000176200001440000000116114164710003014342 0ustar liggesusers#' Locate file in testing directory. #' #' This function is designed to work both interactively and during tests, #' locating files in the `tests/testthat` directory #' #' @param ... Character vectors giving path component. #' @return A character vector giving the path. #' @export test_path <- function(...) { if (is_testing() && !isTRUE(getOption("testthat_interactive"))) { if (missing(...)) { "." } else { file.path(...) } } else { base <- "tests/testthat" if (!dir.exists(base)) { abort("Can't find `tests/testthat/` in current directory.") } file.path(base, ...) } } testthat/R/test-example.R0000644000176200001440000000344314164710003015046 0ustar liggesusers#' Test package examples #' #' These helper functions make it easier to test the examples in a package. #' Each example counts as one test, and it succeeds if the code runs without #' an error. Generally, this is redundant with R CMD check, and is not #' recommended in routine practice. #' #' @keywords internal #' @param path For `test_examples()`, path to directory containing Rd files. #' For `test_example()`, path to a single Rd file. Remember the working #' directory for tests is `tests/testthat`. #' @param title Test title to use #' @param rd A parsed Rd object, obtained from [tools::Rd_db()] or otherwise. #' @export test_examples <- function(path = "../..") { res <- test_examples_source(path) %||% test_examples_installed() if (is.null(res)) { stop("Could not find examples", call. = FALSE) } invisible(res) } test_examples_source <- function(path = "../..") { if (!dir.exists(file.path(path, "man"))) { return() } Rd <- tools::Rd_db(dir = path) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } test_examples_installed <- function(package = testing_package()) { if (identical(package, "") || is.null(package)) { return() } Rd <- tools::Rd_db(package = package) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } #' @export #' @rdname test_examples test_rd <- function(rd, title = attr(rd, "Rdfile")) { test_example(rd, title) } #' @export #' @rdname test_examples test_example <- function(path, title = path) { ex_path <- tempfile(fileext = ".R") tools::Rd2ex(path, ex_path) if (!file.exists(ex_path)) { return(invisible(FALSE)) } env <- new.env(parent = globalenv()) ok <- test_code(title, parse(ex_path, encoding = "UTF-8"), env = env, skip_on_empty = FALSE ) if (ok) succeed(path) invisible(ok) } testthat/R/test-package.R0000644000176200001440000000332314164710003015003 0ustar liggesusers#' Run all tests in a package #' #' @description #' * `test_local()` tests a local source package. #' * `test_package()` tests an installed package. #' * `test_check()` checks a package during `R CMD check`. #' #' Tests live in `tests/testthat`. #' #' @section `R CMD check`: #' To run testthat automatically from `R CMD check`, make sure you have #' a `tests/testthat.R` that contains: #' #' ``` #' library(testthat) #' library(yourpackage) #' #' test_check("yourpackage") #' ``` #' #' @inherit test_dir return params #' @inheritSection test_dir Special files #' @inheritSection test_dir Environments #' @param ... Additional arguments passed to [test_dir()] #' @export #' @rdname test_package test_package <- function(package, reporter = check_reporter(), ...) { test_path <- system.file("tests", "testthat", package = package) if (test_path == "") { inform(paste0("No installed testthat tests found for ", package)) return(invisible()) } test_dir( test_path, package = package, reporter = reporter, ..., load_package = "installed" ) } #' @export #' @rdname test_package test_check <- function(package, reporter = check_reporter(), ...) { require(package, character.only = TRUE) test_dir( "testthat", package = package, reporter = reporter, ..., load_package = "installed" ) } #' @export #' @rdname test_package test_local <- function(path = ".", reporter = NULL, ...) { package <- pkgload::pkg_name(path) test_path <- file.path(pkgload::pkg_path(path), "tests", "testthat") withr::local_envvar(NOT_CRAN = "true") test_dir( test_path, package = package, reporter = reporter, ..., load_package = if (package != "testthat") "source" else "none" ) } testthat/R/reporter-multi.R0000644000176200001440000000320514164710002015423 0ustar liggesusers#' Multi reporter: combine several reporters in one. #' #' This reporter is useful to use several reporters at the same time, e.g. #' adding a custom reporter without removing the current one. #' #' @export #' @family reporters MultiReporter <- R6::R6Class("MultiReporter", inherit = Reporter, public = list( reporters = list(), initialize = function(reporters = list()) { super$initialize() self$capabilities$parallel_support <- TRUE self$reporters <- reporters }, start_reporter = function() { o_apply(self$reporters, "start_reporter") }, start_file = function(filename) { o_apply(self$reporters, "start_file", filename) }, start_context = function(context) { o_apply(self$reporters, "start_context", context) }, start_test = function(context, test) { o_apply(self$reporters, "start_test", context, test) }, add_result = function(context, test, result) { o_apply(self$reporters, "add_result", context = context, test = test, result = result) }, end_test = function(context, test) { o_apply(self$reporters, "end_test", context, test) }, end_context = function(context) { o_apply(self$reporters, "end_context", context) }, end_reporter = function() { o_apply(self$reporters, "end_reporter") }, end_file = function() { o_apply(self$reporters, "end_file") }, update = function() { o_apply(self$reporters, "update") } ) ) o_apply <- function(objects, method, ...) { x <- NULL # silence check note f <- new_function(exprs(x = ), expr( `$`(x, !!method)(...) )) lapply(objects, f) } testthat/R/snapshot-cleanup.R0000644000176200001440000000365014166370064015735 0ustar liggesuserssnapshot_cleanup <- function(path, test_files_seen = character(), snap_files_seen = character()) { outdated <- snapshot_outdated(path, test_files_seen, snap_files_seen) if (length(outdated) > 0) { inform(c("Deleting unused snapshots:", outdated)) unlink(file.path(path, outdated), recursive = TRUE) } # Delete empty directories: # nest dir() inside list.dirs() to avoid picking up `.` directories dirs <- list.dirs(dir(path, full.names = TRUE)) empty <- dirs[map_lgl(dirs, is_dir_empty)] unlink(empty, recursive = TRUE) # Delete snapshot folder if (is_dir_empty(path)) { unlink(path, recursive = TRUE) } rstudio_tickle() invisible(outdated) } is_dir_empty <- function(x) { length(dir(x, recursive = TRUE)) == 0 } snapshot_outdated <- function(path, test_files_seen = character(), snap_files_seen = character()) { all_files <- dir(path, recursive = TRUE) expected <- snapshot_expected(path, test_files_seen, snap_files_seen) setdiff(all_files, expected) } snapshot_expected <- function( snap_dir, test_files_seen = character(), snap_files_seen = character()) { if (length(test_files_seen) > 0) { snaps <- c(paste0(test_files_seen, ".md"), paste0(test_files_seen, ".new.md")) } else { snaps <- character() } # Empirically determine variants snap_dirs <- list.dirs(snap_dir, recursive = FALSE) is_variant <- dir_contains(snap_dirs, c(snaps, snap_files_seen)) variants <- basename(snap_dirs[is_variant]) snap_files_seen_new <- paste0( tools::file_path_sans_ext(snap_files_seen), ".new.", tools::file_ext(snap_files_seen) ) sort(c( snaps, outer(variants, snaps, file.path), snap_files_seen, outer(variants, snap_files_seen, file.path), snap_files_seen_new, outer(variants, snap_files_seen_new, file.path) )) } dir_contains <- function(paths, expected_files) { map_lgl(paths, ~ any(file.exists(file.path(.x, expected_files)))) } testthat/R/example.R0000644000176200001440000000114114164710002014061 0ustar liggesusers#' Retrieve paths to built-in example test files #' #' `testthat_examples()` retrieves path to directory of test files, #' `testthat_example()` retrieves path to a single test file. #' #' @keywords internal #' @param filename Name of test file #' @export #' @examples #' dir(testthat_examples()) #' testthat_example("success") testthat_examples <- function() { system.file("examples", package = "testthat") } #' @export #' @rdname testthat_examples testthat_example <- function(filename) { system.file( "examples", paste0("test-", filename, ".R"), package = "testthat", mustWork = TRUE ) } testthat/R/expect-comparison.R0000644000176200001440000000537114164710002016077 0ustar liggesusers#' Does code return a number greater/less than the expected value? #' #' @inheritParams expect_equal #' @param expected Single numeric value to compare. #' @family expectations #' @examples #' a <- 9 #' expect_lt(a, 10) #' #' \dontrun{ #' expect_lt(11, 10) #' } #' #' a <- 11 #' expect_gt(a, 10) #' \dontrun{ #' expect_gt(9, 10) #' } #' @name comparison-expectations NULL expect_compare <- function(operator = c("<", "<=", ">", ">="), act, exp) { operator <- match.arg(operator) op <- match.fun(operator) msg <- c( "<" = "not strictly less than", "<=" = "not less than", ">" = "not strictly more than", ">=" = "not more than" )[[operator]] cmp <- op(act$val, exp$val) if (length(cmp) != 1 || !is.logical(cmp)) { abort("Result of comparison must be a single logical value") } expect( if (!is.na(cmp)) cmp else FALSE, sprintf("%s is %s %s. Difference: %.3g", act$lab, msg, exp$lab, act$val - exp$val), trace_env = caller_env() ) invisible(act$val) } #' @export #' @rdname comparison-expectations expect_lt <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare("<", act, exp) } #' @export #' @rdname comparison-expectations expect_lte <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare("<=", act, exp) } #' @export #' @rdname comparison-expectations expect_gt <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare(">", act, exp) } #' @export #' @rdname comparison-expectations expect_gte <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect_compare(">=", act, exp) } # Wordy names ------------------------------------------------------------- #' Deprecated numeric comparison functions #' #' These functions have been deprecated in favour of the more concise #' [expect_gt()] and [expect_lt()]. #' #' @export #' @param ... All arguments passed on to `expect_lt()`/`expect_gt()`. #' @keywords internal expect_less_than <- function(...) { warning("Deprecated: please use `expect_lt()` instead", call. = FALSE) expect_lt(...) } #' @rdname expect_less_than #' @export expect_more_than <- function(...) { warning("Deprecated: please use `expect_gt()` instead", call. = FALSE) expect_gt(...) } testthat/R/expect-invisible.R0000644000176200001440000000234314164710002015705 0ustar liggesusers#' Does code return a visible or invisible object? #' #' Use this to test whether a function returns a visible or invisible #' output. Typically you'll use this to check that functions called primarily #' for their side-effects return their data argument invisibly. #' #' @param call A function call. #' @inheritParams expect_that #' @return The evaluated `call`, invisibly. #' @export #' @examples #' expect_invisible(x <- 10) #' expect_visible(x) #' #' # Typically you'll assign the result of the expectation so you can #' # also check that the value is as you expect. #' greet <- function(name) { #' message("Hi ", name) #' invisible(name) #' } #' out <- expect_invisible(greet("Hadley")) #' expect_equal(out, "Hadley") expect_invisible <- function(call, label = NULL) { lab <- label %||% expr_label(enexpr(call)) vis <- withVisible(call) expect( identical(vis$visible, FALSE), sprintf("%s does not return invisibly", lab) ) invisible(vis$value) } #' @export #' @rdname expect_invisible expect_visible <- function(call, label = NULL) { lab <- label %||% expr_label(enexpr(call)) vis <- withVisible(call) expect( identical(vis$visible, TRUE), sprintf("%s does not invisibly", lab) ) invisible(vis$value) } testthat/R/watcher.R0000644000176200001440000000635114164710003014074 0ustar liggesusers#' Watch a directory for changes (additions, deletions & modifications). #' #' This is used to power the [auto_test()] and #' [auto_test_package()] functions which are used to rerun tests #' whenever source code changes. #' #' Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to #' stop the watcher. #' #' @param path character vector of paths to watch. Omit trailing backslash. #' @param pattern file pattern passed to [dir()] #' @param callback function called everytime a change occurs. It should #' have three parameters: added, deleted, modified, and should return #' TRUE to keep watching, or FALSE to stop. #' @param hash hashes are more accurate at detecting changes, but are slower #' for large files. When FALSE, uses modification time stamps #' @export #' @keywords internal watch <- function(path, callback, pattern = NULL, hash = TRUE) { prev <- dir_state(path, pattern, hash = hash) repeat { Sys.sleep(1) curr <- dir_state(path, pattern, hash = hash) changes <- compare_state(prev, curr) if (changes$n > 0) { # cat("C") keep_going <- TRUE try(keep_going <- callback(changes$added, changes$deleted, changes$modified)) if (!isTRUE(keep_going)) return(invisible()) } else { # cat(".") } prev <- curr } } #' Compute a digest of a filename, returning NA if the file doesn't #' exist. #' #' @param filename filename to compute digest on #' @return a digest of the file, or NA if it doesn't exist. #' @keywords internal safe_digest <- function(path) { if (!file.exists(path)) return(NA_character_) if (is_directory(path)) return(NA_character_) if (!is_readable(path)) return(NA_character_) digest::digest(path, file = TRUE) } #' Capture the state of a directory. #' #' @param path path to directory #' @param pattern regular expression with which to filter files #' @param hash use hash (slow but accurate) or time stamp (fast but less #' accurate) #' @keywords internal dir_state <- function(path, pattern = NULL, hash = TRUE) { files <- dir(path, pattern, full.names = TRUE) # It's possible for any of the files to be deleted between the dir() # call above and the calls below; `file.info` handles this # gracefully, but digest::digest doesn't -- so we wrap it. Both # cases will return NA for files that have gone missing. if (hash) { file_states <- vapply(files, safe_digest, character(1)) } else { file_states <- stats::setNames(file.info(files)$mtime, files) } file_states[!is.na(file_states)] } #' Compare two directory states. #' #' @param old previous state #' @param new current state #' @return list containing number of changes and files which have been #' `added`, `deleted` and `modified` #' @keywords internal compare_state <- function(old, new) { added <- setdiff(names(new), names(old)) deleted <- setdiff(names(old), names(new)) same <- intersect(names(old), names(new)) modified <- names(new[same])[new[same] != old[same]] n <- length(added) + length(deleted) + length(modified) list(n = n, added = added, deleted = deleted, modified = modified) } # Helpers ----------------------------------------------------------------- is_directory <- function(x) file.info(x)$isdir is_readable <- function(x) file.access(x, 4) == 0 testthat/R/praise.R0000644000176200001440000000306314164710002013716 0ustar liggesusers# nocov start praise <- function() { plain <- c( "You rock!", "You are a coding rockstar!", "Keep up the good work.", "Woot!", "Way to go!", "Nice code.", praise::praise("Your tests are ${adjective}!"), praise::praise("${EXCLAMATION} - ${adjective} code.") ) utf8 <- c( "\U0001f600", # smile "\U0001f973", # party face "\U0001f638", # cat grin paste0(strrep("\U0001f389\U0001f38a", 5), "\U0001f389"), "\U0001f485 Your tests are beautiful \U0001f485", "\U0001f947 Your tests deserve a gold medal \U0001f947", "\U0001f308 Your tests are over the rainbow \U0001f308", "\U0001f9ff Your tests look perfect \U0001f9ff", "\U0001f3af Your tests hit the mark \U0001f3af", "\U0001f41d Your tests are the bees knees \U0001f41d", "\U0001f4a3 Your tests are da bomb \U0001f4a3", "\U0001f525 Your tests are lit \U0001f525" ) x <- if (cli::is_utf8_output()) c(plain, utf8) else plain sample(x, 1) } praise_emoji <- function() { if (!cli::is_utf8_output()) { return("") } emoji <- c( "\U0001f600", # smile "\U0001f973", # party face "\U0001f638", # cat grin "\U0001f308", # rainbow "\U0001f947", # gold medal "\U0001f389", # party popper "\U0001f38a" # confetti ball ) sample(emoji, 1) } encourage <- function() { x <- c( "Keep trying!", "Don't worry, you'll get it.", "No one is perfect!", "No one gets it right on their first try", "Frustration is a natural part of programming :)", "I believe in you!" ) sample(x, 1) } # nocov end testthat/R/reporter-check.R0000644000176200001440000000551614166627056015377 0ustar liggesusers#' Check reporter: 13 line summary of problems #' #' `R CMD check` displays only the last 13 lines of the result, so this #' report is designed to ensure that you see something useful there. #' #' @export #' @family reporters CheckReporter <- R6::R6Class("CheckReporter", inherit = Reporter, public = list( problems = NULL, skips = NULL, warnings = NULL, n_ok = 0L, initialize = function(...) { self$capabilities$parallel_support <- TRUE self$problems <- Stack$new() self$warnings <- Stack$new() self$skips <- Stack$new() super$initialize(...) }, add_result = function(context, test, result) { if (expectation_broken(result)) { self$problems$push(result) } else if (expectation_warning(result)) { self$warnings$push(result) } else if (expectation_skip(result)) { self$skips$push(result$message) } else { self$n_ok <- self$n_ok + 1L } }, end_reporter = function() { if (self$skips$size() || self$warnings$size() || self$problems$size()) { self$cat_line(summary_line( n_fail = self$problems$size(), n_warn = self$warnings$size(), n_skip = self$skips$size(), n_pass = self$n_ok )) self$cat_line() } if (self$skips$size() > 0) { self$rule("Skipped tests", line = 2) self$cat_line(skip_bullets(self$skips$as_list())) self$cat_line() } # Don't show warnings in revdep checks in order to focus on failures if (self$warnings$size() > 0 && !on_cran()) { warnings <- self$warnings$as_list() self$rule("Warnings", line = 2) self$cat_line(map_chr(warnings, issue_summary, rule = TRUE)) self$cat_line() } if (self$problems$size() > 0) { problems <- self$problems$as_list() saveRDS(problems, "testthat-problems.rds", version = 2) self$rule("Failed tests", line = 2) self$cat_line(map_chr(problems, issue_summary, rule = TRUE, simplify = "none")) self$cat_line() } else { # clean up unlink("testthat-problems.rds") } self$cat_line(summary_line( n_fail = self$problems$size(), n_warn = self$warnings$size(), n_skip = self$skips$size(), n_pass = self$n_ok )) } ) ) summary_line <- function(n_fail, n_warn, n_skip, n_pass) { colourise_if <- function(text, colour, cond) { if (cond) colourise(text, colour) else text } # Ordered from most important to least important paste0( "[ ", colourise_if("FAIL", "failure", n_fail > 0), " ", n_fail, " | ", colourise_if("WARN", "warn", n_warn > 0), " ", n_warn, " | ", colourise_if("SKIP", "skip", n_skip > 0), " ", n_skip, " | ", colourise_if("PASS", "success", n_fail == 0), " ", n_pass, " ]" ) } testthat/R/context.R0000644000176200001440000000275114164710002014122 0ustar liggesusers#' Describe the context of a set of tests. #' #' @description #' `r lifecycle::badge("superseded")` #' #' Use of `context()` is no longer recommended. Instead omit it, and messages #' will use the name of the file instead. This ensures that the context and #' test file name are always in sync. #' #' A context defines a set of tests that test related functionality. Usually #' you will have one context per file, but you may have multiple contexts #' in a single file if you so choose. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `context()` is deprecated in the third edition, and the equivalent #' information is instead recorded by the test file name. #' #' @param desc description of context. Should start with a capital letter. #' @keywords internal #' @export #' @examples #' context("String processing") #' context("Remote procedure calls") context <- function(desc) { edition_deprecate(3, "context()") context_start(desc) } context_start <- function(desc) { reporter <- get_reporter() if (!is.null(reporter)) { get_reporter()$.start_context(desc) } } #' Start test context from a file name #' #' For use in external reporters #' #' @param name file name #' @keywords internal #' @export context_start_file <- function(name) { context_start(context_name(name)) } context_name <- function(filename) { # Remove test- prefix filename <- sub("^test[-_]", "", filename) # Remove terminal extension filename <- sub("[.][Rr]$", "", filename) filename } testthat/R/reporter-silent.R0000644000176200001440000000136114164710002015570 0ustar liggesusers#' Test reporter: gather all errors silently. #' #' This reporter quietly runs all tests, simply gathering all expectations. #' This is helpful for programmatically inspecting errors after a test run. #' You can retrieve the results with the `expectations()` #' method. #' #' @export #' @family reporters SilentReporter <- R6::R6Class("SilentReporter", inherit = Reporter, public = list( .expectations = NULL, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$.expectations <- Stack$new() }, add_result = function(context, test, result) { self$.expectations$push(result) }, expectations = function() { self$.expectations$as_list() } ) ) testthat/R/reporter-rstudio.R0000644000176200001440000000131314164710002015760 0ustar liggesusers#' Test reporter: RStudio #' #' This reporter is designed for output to RStudio. It produces results in #' any easily parsed form. #' #' @export #' @family reporters RStudioReporter <- R6::R6Class("RStudioReporter", inherit = Reporter, public = list( initialize = function(...) { self$capabilities$parallel_support <- TRUE super$initialize(...) }, add_result = function(context, test, result) { if (expectation_success(result)) { return() } loc <- expectation_location(result) status <- expectation_type(result) first_line <- strsplit(result$message, "\n")[[1]][1] self$cat_line(loc, " [", status, "] ", test, ". ", first_line) } ) ) testthat/R/expect-equality.R0000644000176200001440000001501614164710002015557 0ustar liggesusers#' Does code return the expected value? #' #' @description #' These functions provide two levels of strictness when comparing a #' computation to a reference value. `expect_identical()` is the baseline; #' `expect_equal()` relaxes the test to ignore small numeric differences. #' #' In the 2nd edition, `expect_identical()` uses [identical()] and #' `expect_equal` uses [all.equal()]. In the 3rd edition, both functions use #' [waldo](https://github.com/r-lib/waldo). They differ only in that #' `expect_equal()` sets `tolerance = testthat_tolerance()` so that small #' floating point differences are ignored; this also implies that (e.g.) `1` #' and `1L` are treated as equal. #' #' @param object,expected Computation and value to compare it to. #' #' Both arguments supports limited unquoting to make it easier to generate #' readable failures within a function or for loop. See [quasi_label] for #' more details. #' @param ... #' **3e**: passed on to [waldo::compare()]. See its docs to see other #' ways to control comparison. #' #' **2e**: passed on to [testthat::compare()]/[identical()]. #' @param tolerance #' **3e**: passed on to [waldo::compare()]. If non-`NULL`, will #' ignore small floating point differences. It uses same algorithm as #' [all.equal()] so the tolerance is usually relative (i.e. #' `mean(abs(x - y) / mean(abs(y)) < tolerance`), except when the differences #' are very small, when it becomes absolute (i.e. `mean(abs(x - y) < tolerance`). #' See waldo documentation for more details. #' #' **2e**: passed on to [testthat::compare()], if set. It's hard to #' reason about exactly what tolerance means because depending on the precise #' code path it could be either an absolute or relative tolerance. #' @param label,expected.label Used to customise failure messages. For expert #' use only. #' @seealso #' * [expect_setequal()]/[expect_mapequal()] to test for set equality. #' * [expect_reference()] to test if two names point to same memory address. #' @inheritParams expect_that #' @family expectations #' @examples #' a <- 10 #' expect_equal(a, 10) #' #' # Use expect_equal() when testing for numeric equality #' \dontrun{ #' expect_identical(sqrt(2) ^ 2, 2) #' } #' expect_equal(sqrt(2) ^ 2, 2) #' @name equality-expectations NULL #' @export #' @rdname equality-expectations expect_equal <- function(object, expected, ..., tolerance = if (edition_get() >= 3) testthat_tolerance(), info = NULL, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") if (edition_get() >= 3) { expect_waldo_equal("equal", act, exp, info, ..., tolerance = tolerance) } else { if (!is.null(tolerance)) { comp <- compare(act$val, exp$val, ..., tolerance = tolerance) } else { comp <- compare(act$val, exp$val, ...) } expect( comp$equal, sprintf("%s not equal to %s.\n%s", act$lab, exp$lab, comp$message), info = info ) invisible(act$val) } } #' @export #' @rdname equality-expectations expect_identical <- function(object, expected, info = NULL, label = NULL, expected.label = NULL, ...) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") if (edition_get() >= 3) { expect_waldo_equal("identical", act, exp, info, ...) } else { ident <- identical(act$val, exp$val, ...) if (ident) { msg <- "" } else { compare <- compare(act$val, exp$val) if (compare$equal) { msg <- "Objects equal but not identical" } else { msg <- compare$message } } expect( ident, sprintf("%s not identical to %s.\n%s", act$lab, exp$lab, msg), info = info ) invisible(act$val) } } expect_waldo_equal <- function(type, act, exp, info, ...) { comp <- waldo_compare(act$val, exp$val, ..., x_arg = "actual", y_arg = "expected") expect( length(comp) == 0, sprintf( "%s (%s) not %s to %s (%s).\n\n%s", act$lab, "`actual`", type, exp$lab, "`expected`", paste0(comp, collapse = "\n\n") ), info = info, trace_env = caller_env() ) invisible(act$val) } #' Is an object equal to the expected value, ignoring attributes? #' #' Compares `object` and `expected` using [all.equal()] and #' `check.attributes = FALSE`. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_equivalent()` is deprecated in the 3rd edition. Instead use #' `expect_equal(ignore_attr = TRUE)`. #' #' @inheritParams expect_equal #' @param ... Passed on to [compare()]. #' @keywords internal #' @export #' @examples #' #' # expect_equivalent() ignores attributes #' a <- b <- 1:3 #' names(b) <- letters[1:3] #' \dontrun{ #' expect_equal(a, b) #' } #' expect_equivalent(a, b) expect_equivalent <- function(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") edition_deprecate(3, "expect_equivalent()", "Use expect_equal(ignore_attr = TRUE)" ) comp <- compare(act$val, exp$val, ..., check.attributes = FALSE) expect( comp$equal, sprintf("%s not equivalent to %s.\n%s", act$lab, exp$lab, comp$message), info = info ) invisible(act$val) } #' Does code return a reference to the expected object? #' #' `expect_reference()` compares the underlying memory addresses of #' two symbols. It is for expert use only. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_reference()` is deprecated in the third edition. If you know what #' you're doing, and you really need this behaviour, just use `is_reference()` #' directly: `expect_true(rlang::is_reference(x, y))`. #' #' @inheritParams expect_equal #' @family expectations #' @keywords internal #' @export expect_reference <- function(object, expected, info = NULL, label = NULL, expected.label = NULL) { edition_deprecate(3, "expect_reference()") act <- quasi_label(enquo(object), label, arg = "object") exp <- quasi_label(enquo(expected), expected.label, arg = "expected") expect( is_reference(act$val, exp$val), sprintf("%s not a reference to %s.", act$lab, exp$lab), info = info ) invisible(act$val) } # expect_reference() needs dev version of rlang utils::globalVariables("is_reference") testthat/R/test-that.R0000644000176200001440000001417514164710003014357 0ustar liggesusers#' Run a test #' #' @description #' A test encapsulates a series of expectations about a small, self-contained #' set of functionality. Each test lives in a file and contains multiple #' expectations, like [expect_equal()] or [expect_error()]. #' #' Tests are evaluated in their own environments, and should not affect #' global state. #' #' @param desc Test name. Names should be brief, but evocative. They are #' only used by humans, so do you #' @param code Test code containing expectations. Braces (`{}`) should always #' be used in order to get accurate location data for test failures. #' @return When run interactively, returns `invisible(TRUE)` if all tests #' pass, otherwise throws an error. #' @export #' @examples #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1 / sqrt(2)) #' expect_equal(cos(pi / 4), 1 / sqrt(2)) #' expect_equal(tan(pi / 4), 1) #' }) #' #' \dontrun{ #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1) #' }) #' } test_that <- function(desc, code) { if (!is.character(desc) || length(desc) != 1) { abort("`desc` must be a string") } reporter <- get_reporter() if (is.null(reporter)) { reporter <- local_interactive_reporter() } local_test_context() code <- substitute(code) if (edition_get() >= 3) { if (!is_call(code, "{")) { warn( "The `code` argument to `test_that()` must be a braced expression to get accurate file-line information for failures.", class = "testthat_braces_warning" ) } } test_code(desc, code, env = parent.frame(), reporter = reporter) } # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields test_code <- function(test, code, env = test_env(), reporter = get_reporter(), skip_on_empty = TRUE) { reporter <- reporter %||% StopReporter$new() if (!is.null(test) && !is.null(reporter)) { reporter$start_test(context = reporter$.context, test = test) on.exit(reporter$end_test(context = reporter$.context, test = test)) } ok <- TRUE # @param debug_end How many frames should be skipped to find the # last relevant frame call. Only useful for the DebugReporter. register_expectation <- function(e, debug_end) { # Find test environment on the stack start <- eval_bare(quote(base::sys.nframe()), test_env) + 1L srcref <- e[["srcref"]] %||% find_first_srcref(start) e <- as.expectation(e, srcref = srcref) # Data for the DebugReporter if (debug_end >= 0) { e$start_frame <- start e$end_frame <- sys.nframe() - debug_end - 1L } e$test <- test %||% "(code run outside of `test_that()`)" ok <<- ok && expectation_ok(e) reporter$add_result(context = reporter$.context, test = test, result = e) } frame <- sys.nframe() # Any error will be assigned to this variable first # In case of stack overflow, no further processing (not even a call to # signalCondition() ) might be possible test_error <- NULL expressions_opt <- getOption("expressions") expressions_opt_new <- min(expressions_opt + 500L, 500000L) # If no handlers are called we skip: BDD (`describe()`) tests are often # nested and the top level might not contain any expectations, so we need # some way to disable handled <- !skip_on_empty handle_error <- function(e) { handled <<- TRUE # First thing: Collect test error test_error <<- e # Increase option(expressions) to handle errors here if possible, even in # case of a stack overflow. This is important for the DebugReporter. # Call options() manually, avoid withr overhead. options(expressions = expressions_opt_new) on.exit(options(expressions = expressions_opt), add = TRUE) # Add structured backtrace to the expectation if (can_entrace(e)) { e <- cnd_entrace(e) } test_error <<- e # Error will be handled by handle_fatal() if this fails; need to do it here # to be able to debug with the DebugReporter register_expectation(e, 2) e[["handled"]] <- TRUE test_error <<- e } handle_fatal <- function(e) { handled <<- TRUE # Error caught in handle_error() has precedence if (!is.null(test_error)) { e <- test_error if (isTRUE(e[["handled"]])) { return() } } register_expectation(e, 0) } handle_expectation <- function(e) { handled <<- TRUE register_expectation(e, 6) invokeRestart("continue_test") } handle_warning <- function(e) { # When options(warn) < 0, warnings are expected to be ignored. if (getOption("warn") < 0) { return() } # When options(warn) >= 2, warnings are converted to errors. # So, do not handle it here so that it will be handled by handle_error. if (getOption("warn") >= 2) { return() } if (!inherits(e, "testthat_warn")) { e <- cnd_entrace(e) } register_expectation(e, 5) maybe_restart("muffleWarning") } handle_message <- function(e) { if (edition_get() < 3) { maybe_restart("muffleMessage") } } handle_skip <- function(e) { handled <<- TRUE if (inherits(e, "skip_empty")) { # If we get here, `code` has already finished its evaluation. # Find the srcref in the `test_that()` frame above. e$srcref <- find_first_srcref(frame - 1) debug_end <- -1 } else { debug_end <- 2 } register_expectation(e, debug_end) signalCondition(e) } test_env <- new.env(parent = env) old <- options(rlang_trace_top_env = test_env)[[1]] on.exit(options(rlang_trace_top_env = old), add = TRUE) withr::local_options(testthat_topenv = test_env) tryCatch( withCallingHandlers( { eval(code, test_env) if (!handled && !is.null(test)) { skip_empty() } }, expectation = handle_expectation, skip = handle_skip, warning = handle_warning, message = handle_message, error = handle_error ), # some errors may need handling here, e.g., stack overflow error = handle_fatal, # skip silently terminate code skip = function(e) {} ) invisible(ok) } testthat/R/test-files.R0000644000176200001440000002713714167646004014537 0ustar liggesusers#' Run all tests in a directory #' #' This function is the low-level workhorse that powers [test_local()] and #' [test_package()]. Generally, you should not call this function directly. #' In particular, you are responsible for ensuring that the functions to test #' are available in the test `env` (e.g. via `load_package`). #' #' @section Special files: #' There are two types of `.R` file that have special behaviour: #' #' * Test files start with `test` and are executed in alphabetical order. #' #' * Setup files start with `setup` and are executed before tests. If #' clean up is needed after all tests have been run, you can use #' `withr::defer(clean_up(), teardown_env())`. See `vignette("test-fixtures")` #' for more details. #' #' There are two other types of special file that we no longer recommend using: #' #' * Helper files start with `helper` and are executed before tests are #' run. They're also loaded by `devtools::load_all()`, so there's no #' real point to them and you should just put your helper code in `R/`. #' #' * Teardown files start with `teardown` and are executed after the tests #' are run. Now we recommend interleave setup and cleanup code in `setup-` #' files, making it easier to check that you automatically clean up every #' mess that you make. #' #' All other files are ignored by testthat. #' #' @section Environments: #' Each test is run in a clean environment to keep tests as isolated as #' possible. For package tests, that environment that inherits from the #' package's namespace environment, so that tests can access internal functions #' and objects. #' #' @param path Path to directory containing tests. #' @param package If these tests belong to a package, the name of the package. #' @param filter If not `NULL`, only tests with file names matching this #' regular expression will be executed. Matching is performed on the file #' name after it's stripped of `"test-"` and `".R"`. #' @param env Environment in which to execute the tests. Expert use only. #' @param ... Additional arguments passed to [grepl()] to control filtering. #' @param load_helpers Source helper files before running the tests? #' See [source_test_helpers()] for more details. #' @param stop_on_failure If `TRUE`, throw an error if any tests fail. #' @param stop_on_warning If `TRUE`, throw an error if any tests generate #' warnings. #' @param load_package Strategy to use for load package code: #' * "none", the default, doesn't load the package. #' * "installed", uses [library()] to load an installed package. #' * "source", uses [pkgload::load_all()] to a source package. #' @param wrap DEPRECATED #' @keywords internal #' @return A list (invisibly) containing data about the test results. #' @inheritParams with_reporter #' @inheritParams source_file #' @export test_dir <- function(path, filter = NULL, reporter = NULL, env = NULL, ..., load_helpers = TRUE, stop_on_failure = TRUE, stop_on_warning = FALSE, wrap = lifecycle::deprecated(), package = NULL, load_package = c("none", "installed", "source") ) { load_package <- arg_match(load_package) start_first <- find_test_start_first(path, load_package, package) test_paths <- find_test_scripts( path, filter = filter, ..., full.names = FALSE, start_first = start_first ) if (length(test_paths) == 0) { abort("No test files found") } if (!is_missing(wrap)) { lifecycle::deprecate_warn("3.0.0", "test_dir(wrap = )") } want_parallel <- find_parallel(path, load_package, package) if (is.null(reporter)) { if (want_parallel) { reporter <- default_parallel_reporter() } else { reporter <- default_reporter() } } reporter <- find_reporter(reporter) parallel <- want_parallel && reporter$capabilities$parallel_support test_files( test_dir = path, test_paths = test_paths, test_package = package, reporter = reporter, load_helpers = load_helpers, env = env, stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, wrap = wrap, load_package = load_package, parallel = parallel ) } #' Run all tests in a single file #' #' Helper, setup, and teardown files located in the same directory as the #' test will also be run. #' #' @inherit test_dir return params #' @inheritSection test_dir Special files #' @inheritSection test_dir Environments #' @param path Path to file. #' @param ... Additional parameters passed on to `test_dir()` #' @export #' @examples #' path <- testthat_example("success") #' test_file(path) #' test_file(path, reporter = "minimal") test_file <- function(path, reporter = default_compact_reporter(), package = NULL, ...) { if (!file.exists(path)) { stop("`path` does not exist", call. = FALSE) } test_files( test_dir = dirname(path), test_package = package, test_paths = basename(path), reporter = reporter, ... ) } test_files <- function(test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_reporter(), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, wrap = TRUE, load_package = c("none", "installed", "source"), parallel = FALSE) { if (is_missing(wrap)) { wrap <- TRUE } if (!isTRUE(wrap)) { lifecycle::deprecate_warn("3.0.0", "test_dir(wrap = )") } if (parallel) { test_files <- test_files_parallel } else { test_files <- test_files_serial } test_files( test_dir = test_dir, test_package = test_package, test_paths = test_paths, load_helpers = load_helpers, reporter = reporter, env = env, stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, wrap = wrap, load_package = load_package ) } test_files_serial <- function(test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_reporter(), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, wrap = TRUE, load_package = c("none", "installed", "source")) { env <- test_files_setup_env(test_package, test_dir, load_package, env) test_files_setup_state(test_dir, test_package, load_helpers, env) reporters <- test_files_reporter(reporter) with_reporter(reporters$multi, lapply(test_paths, test_one_file, env = env, wrap = wrap) ) test_files_check(reporters$list$get_results(), stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning ) } test_files_setup_env <- function(test_package, test_dir, load_package = c("none", "installed", "source"), env = NULL) { library(testthat) load_package <- arg_match(load_package) switch(load_package, none = {}, installed = library(test_package, character.only = TRUE), source = pkgload::load_all(test_dir, helpers = FALSE, quiet = TRUE) ) env %||% test_env(test_package) } test_files_setup_state <- function(test_dir, test_package, load_helpers, env, .env = parent.frame()) { # Define testing environment local_test_directory(test_dir, test_package, .env = .env) withr::local_options( topLevelEnvironment = env_parent(env), .local_envir = .env ) # Load helpers, setup, and teardown (on exit) local_teardown_env(.env) if (load_helpers) { source_test_helpers(".", env) } source_test_setup(".", env) withr::defer(withr::deferred_run(teardown_env()), .env) # new school withr::defer(source_test_teardown(".", env), .env) # old school } test_files_reporter <- function(reporter, .env = parent.frame()) { lister <- ListReporter$new() reporters <- list( find_reporter(reporter), lister, # track data local_snapshotter("_snaps", fail_on_new = FALSE, .env = .env) # for snapshots ) list( multi = MultiReporter$new(reporters = compact(reporters)), list = lister ) } test_files_check <- function(results, stop_on_failure = TRUE, stop_on_warning = FALSE) { if (stop_on_failure && !all_passed(results)) { stop("Test failures", call. = FALSE) } if (stop_on_warning && any_warnings(results)) { stop("Tests generated warnings", call. = FALSE) } invisible(results) } test_one_file <- function(path, env = test_env(), wrap = TRUE) { reporter <- get_reporter() on.exit(teardown_run(), add = TRUE) reporter$start_file(path) source_file(path, child_env(env), wrap = wrap) reporter$end_context_if_started() reporter$end_file() } # Helpers ----------------------------------------------------------------- #' Run code after all test files #' #' This environment has no purpose other than as a handle for [withr::defer()]: #' use it when you want to run code after all tests have been run. #' Typically, you'll use `withr::defer(cleanup(), teardown_env())` #' immediately after you've made a mess in a `setup-*.R` file. #' #' @export teardown_env <- function() { testthat_env$teardown_env } local_teardown_env <- function(env = parent.frame()) { old <- testthat_env$teardown_env testthat_env$teardown_env <- child_env(emptyenv()) withr::defer(testthat_env$teardown_env <- old, env) invisible() } #' Find test files #' #' @param path path to tests #' @param invert If `TRUE` return files which **don't** match. #' @param ... Additional arguments passed to [grepl()] to control filtering. #' @param start_first A character vector of file patterns (globs, see #' [utils::glob2rx()]). The patterns are for the file names (base names), #' not for the whole paths. testthat starts the files matching the #' first pattern first, then the ones matching the second, etc. and then #' the rest of the files, alphabetically. Parallel tests tend to finish #' quicker if you start the slowest files first. `NULL` means alphabetical #' order. #' @inheritParams test_dir #' @return A character vector of paths #' @keywords internal #' @export find_test_scripts <- function(path, filter = NULL, invert = FALSE, ..., full.names = TRUE, start_first = NULL) { files <- dir(path, "^test.*\\.[rR]$", full.names = full.names) files <- filter_test_scripts(files, filter, invert, ...) order_test_scripts(files, start_first) } filter_test_scripts <- function(files, filter = NULL, invert = FALSE, ...) { if (is.null(filter)) { return(files) } which_files <- grepl(filter, context_name(files), ...) if (isTRUE(invert)) { which_files <- !which_files } files[which_files] } find_test_start_first <- function(path, load_package, package) { # Make sure we get the local package package if not "installed" if (load_package != "installed") package <- NULL desc <- find_description(path, package) if (is.null(desc)) { return(NULL) } conf <- desc$get_field("Config/testthat/start-first", NULL) if (is.null(conf)) { return(NULL) } trimws(strsplit(conf, ",")[[1]]) } order_test_scripts <- function(paths, start_first) { if (is.null(start_first)) return(paths) filemap <- data.frame( stringsAsFactors = FALSE, base = sub("\\.[rR]$", "", sub("^test[-_\\.]?", "", basename(paths))), orig = paths ) rxs <- utils::glob2rx(start_first) mch <- lapply(rxs, function(rx) filemap$orig[grep(rx, filemap$base)]) unique(c(unlist(mch), paths)) } testthat/R/testthat-package.R0000644000176200001440000000154414165635513015704 0ustar liggesusers#' An R package to make testing fun! #' #' Try the example below. Have a look at the references and learn more #' from function documentation such as [test_that()]. #' #' @section Options: #' - `testthat.use_colours`: Should the output be coloured? (Default: `TRUE`). #' - `testthat.summary.max_reports`: The maximum number of detailed test #' reports printed for the summary reporter (default: 10). #' - `testthat.summary.omit_dots`: Omit progress dots in the summary reporter #' (default: `FALSE`). #' #' @keywords internal "_PACKAGE" #' @import rlang #' @importFrom brio writeLines readLines #' @useDynLib testthat, .registration = TRUE NULL testthat_env <- new.env(parent = emptyenv()) # The following block is used by usethis to automatically manage # roxygen namespace tags. Modify with care! ## usethis namespace: start ## usethis namespace: end NULL testthat/R/reporter-minimal.R0000644000176200001440000000134014164710002015715 0ustar liggesusers#' Test reporter: minimal. #' #' The minimal test reporter provides the absolutely minimum amount of #' information: whether each expectation has succeeded, failed or experienced #' an error. If you want to find out what the failures and errors actually #' were, you'll need to run a more informative test reporter. #' #' @export #' @family reporters MinimalReporter <- R6::R6Class("MinimalReporter", inherit = Reporter, public = list( initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE }, add_result = function(context, test, result) { self$cat_tight(single_letter_summary(result)) }, end_reporter = function() { self$cat_line() } ) ) testthat/R/expect-setequal.R0000644000176200001440000000632614164710002015551 0ustar liggesusers#' Does code return a vector containing the expected values? #' #' * `expect_setequal(x, y)` tests that every element of `x` occurs in `y`, #' and that every element of `y` occurs in `x`. #' * `expect_mapequal(x, y)` tests that `x` and `y` have the same names, and #' that `x[names(y)]` equals `y`. #' #' Note that `expect_setequal()` ignores names, and you will be warned if both #' `object` and `expected` have them. #' #' @inheritParams expect_equal #' @export #' @examples #' expect_setequal(letters, rev(letters)) #' show_failure(expect_setequal(letters[-1], rev(letters))) #' #' x <- list(b = 2, a = 1) #' expect_mapequal(x, list(a = 1, b = 2)) #' show_failure(expect_mapequal(x, list(a = 1))) #' show_failure(expect_mapequal(x, list(a = 1, b = "x"))) #' show_failure(expect_mapequal(x, list(a = 1, b = 2, c = 3))) expect_setequal <- function(object, expected) { act <- quasi_label(enquo(object), arg = "object") exp <- quasi_label(enquo(expected), arg = "expected") if (!is_vector(act$val) || !is_vector(exp$val)) { abort("`object` and `expected` must both be vectors") } if (!is.null(names(act$val)) && !is.null(names(exp$val))) { warn("expect_setequal() ignores names") } act_miss <- !act$val %in% exp$val if (any(act_miss)) { fail( paste0(act$lab, "[", locations(act_miss), "] absent from ", exp$lab) ) } exp_miss <- !exp$val %in% act$val if (any(exp_miss)) { fail( paste0(exp$lab, "[", locations(exp_miss), "] absent from ", act$lab) ) } if (!any(exp_miss) && !any(act_miss)) { succeed() } invisible(act$val) } is_vector <- function(x) is.list(x) || (is.atomic(x) && !is.null(x)) locations <- function(i) { loc <- which(i) if (length(loc) == 1) { return(loc) } if (length(loc) > 10) { loc <- c(loc[1:9], "...") } paste0("c(", paste0(loc, collapse = ", "), ")") } #' @export #' @rdname expect_setequal expect_mapequal <- function(object, expected) { act <- quasi_label(enquo(object), arg = "object") exp <- quasi_label(enquo(expected), arg = "expected") if (!is_vector(act$val) || !is_vector(exp$val)) { abort("`object` and `expected` must both be vectors") } # Length-0 vectors are OK whether named or unnamed. if (length(act$val) == 0 && length(exp$val) == 0) { warn("`object` and `expected` are empty lists") succeed() return(invisible(act$val)) } act_nms <- names(act$val) exp_nms <- names(exp$val) check_names_ok(act_nms, "object") check_names_ok(exp_nms, "expected") if (!setequal(act_nms, exp_nms)) { act_miss <- setdiff(exp_nms, act_nms) if (length(act_miss) > 0) { vals <- paste0(encodeString(act_miss, quote = '"'), ", ") fail(paste0("Names absent from `object`: ", vals)) } exp_miss <- setdiff(act_nms, exp_nms) if (length(exp_miss) > 0) { vals <- paste0(encodeString(exp_miss, quote = '"'), ", ") fail(paste0("Names absent from `expected`: ", vals)) } } else { expect_equal(act$val[exp_nms], exp$val) } invisible(act$val) } check_names_ok <- function(x, label) { if (anyDuplicated(x)) { stop("Duplicate names in `", label, "`: ", unique(x[duplicated(x)])) } if (any(x == "")) { stop("All elements in `", label, "` must be named") } } testthat/R/snapshot-reporter.R0000644000176200001440000001346114167646004016152 0ustar liggesusers SnapshotReporter <- R6::R6Class("SnapshotReporter", inherit = Reporter, public = list( snap_dir = character(), file = NULL, test = NULL, test_file_seen = character(), snap_file_seen = character(), variants_changed = FALSE, fail_on_new = FALSE, old_snaps = NULL, cur_snaps = NULL, new_snaps = NULL, initialize = function(snap_dir = "_snaps", fail_on_new = FALSE) { self$snap_dir <- normalizePath(snap_dir, mustWork = FALSE) self$fail_on_new <- fail_on_new }, start_file = function(path, test = NULL) { self$file <- context_name(path) self$test_file_seen <- c(self$test_file_seen, self$file) self$variants_changed <- character() self$old_snaps <- FileSnaps$new(self$snap_dir, self$file, type = "old") self$cur_snaps <- FileSnaps$new(self$snap_dir, self$file, type = "cur") self$new_snaps <- FileSnaps$new(self$snap_dir, self$file, type = "new") if (!is.null(test)) { self$start_test(NULL, test) } }, start_test = function(context, test) { self$test <- test }, # Called by expectation take_snapshot = function(value, save = identity, load = identity, ..., tolerance = testthat_tolerance(), variant = NULL, trace_env = NULL) { i <- self$new_snaps$append(self$test, variant, save(value)) old_raw <- self$old_snaps$get(self$test, variant, i) if (!is.null(old_raw)) { self$cur_snaps$append(self$test, variant, old_raw) old <- load(old_raw) comp <- waldo_compare( x = old, x_arg = "old", y = value, y_arg = "new", ..., tolerance = tolerance ) if (length(comp) > 0L) { self$variants_changed <- union(self$variants_changed, variant) } else { # Use the old value for the new snapshot so the snapshot remains # unchanged if the values compare as equal self$new_snaps$set(self$test, variant, i, old_raw) } comp } else { value_enc <- save(value) check_roundtrip(value, load(value_enc), ..., tolerance = tolerance) self$cur_snaps$append(self$test, variant, value_enc) message <- paste0( "Adding new snapshot", if (variant != "_default") paste0(" for variant '", variant, "'"), if (self$fail_on_new) " in CI", ":\n", value_enc ) if (self$fail_on_new) { fail(message, trace_env = trace_env) } else { testthat_warn(message) } character() } }, take_file_snapshot = function(name, path, file_equal, variant = NULL, trace_env = NULL) { self$announce_file_snapshot(name) if (is.null(variant)) { snap_dir <- file.path(self$snap_dir, self$file) } else { snap_dir <- file.path(self$snap_dir, variant, self$file) } snapshot_file_equal( snap_test_dir = snap_dir, snap_name = name, path = path, file_equal = file_equal, fail_on_new = self$fail_on_new, trace_env = trace_env ) }, # Also called from announce_snapshot_file() announce_file_snapshot = function(name) { self$snap_file_seen <- c(self$snap_file_seen, file.path(self$file, name)) }, add_result = function(context, test, result) { if (is.null(self$test)) { return() } # If expectation errors or skips, need to reset remaining snapshots if (expectation_error(result) || expectation_skip(result)) { self$cur_snaps$reset(self$test, self$old_snaps) } }, end_file = function() { dir.create(self$snap_dir, showWarnings = FALSE) self$cur_snaps$write() for (variant in self$new_snaps$variants()) { if (variant %in% self$variants_changed) { self$new_snaps$write(variant) } else { self$new_snaps$delete(variant) } } }, end_reporter = function() { # clean up if we've seen all files tests <- context_name(find_test_scripts(".", full.names = FALSE)) if (!on_ci() && all(tests %in% self$test_file_seen)) { snapshot_cleanup(self$snap_dir, test_files_seen = self$test_file_seen, snap_files_seen = self$snap_file_seen ) } }, is_active = function() { !is.null(self$file) && !is.null(self$test) }, snap_files = function() { dir(self$snap_dir, recursive = TRUE) } ) ) check_roundtrip <- function(x, y, ..., tolerance = testthat_tolerance()) { check <- waldo_compare(x, y, x_arg = "value", y_arg = "roundtrip", ..., tolerance = tolerance) if (length(check) > 0) { abort(c( paste0("Serialization round-trip is not symmetric.\n\n", check, "\n"), i = "You may need to consider serialization `style`") ) } } # set/get active snapshot reporter ---------------------------------------- get_snapshotter <- function() { x <- getOption("testthat.snapshotter") if (is.null(x)) { return() } if (!x$is_active()) { return() } x } #' Instantiate local snapshotting context #' #' Needed if you want to run snapshot tests outside of the usual testthat #' framework For expert use only. #' #' @export #' @keywords internal local_snapshotter <- function(snap_dir = NULL, cleanup = FALSE, fail_on_new = FALSE, .env = parent.frame()) { snap_dir <- snap_dir %||% withr::local_tempdir(.local_envir = .env) reporter <- SnapshotReporter$new(snap_dir = snap_dir, fail_on_new = fail_on_new) if (!identical(cleanup, FALSE)) { warn("`cleanup` is deprecated") } withr::local_options( "testthat.snapshotter" = reporter, .local_envir = .env ) reporter } testthat/R/parallel-config.R0000644000176200001440000000157014164710002015473 0ustar liggesusersfind_parallel <- function(path, load_package = "source", package = NULL) { # If env var is set, then use that parenv <- Sys.getenv("TESTTHAT_PARALLEL", NA_character_) if (!is.na(parenv)) { if (toupper(parenv) == "TRUE") return(TRUE) if (toupper(parenv) == "FALSE") return(FALSE) abort("`TESTTHAT_PARALLEL` must be `TRUE` or `FALSE`") } # Make sure we get the local package package if not "installed" if (load_package != "installed") package <- NULL desc <- find_description(path, package) if (is.null(desc)) { return(FALSE) } par <- identical( toupper(desc$get_field("Config/testthat/parallel", default = "FALSE")), "TRUE" ) if (par) { ed <- as.integer(desc$get_field("Config/testthat/edition", default = 2L)) if (ed < 3) { inform("Running tests in parallel requires the 3rd edition") par <- FALSE } } par } testthat/R/expect-output.R0000644000176200001440000000340114164710002015255 0ustar liggesusers#' Does code print output to the console? #' #' Test for output produced by `print()` or `cat()`. This is best used for #' very simple output; for more complex cases use [verify_output()]. #' #' @export #' @family expectations #' @inheritParams expect_that #' @param regexp Regular expression to test against. #' * A character vector giving a regular expression that must match the output. #' * If `NULL`, the default, asserts that there should output, #' but doesn't check for a specific value. #' * If `NA`, asserts that there should be no output. #' @inheritDotParams expect_match -object -regexp -info -label #' @inheritParams capture_output #' @return The first argument, invisibly. #' @examples #' str(mtcars) #' expect_output(str(mtcars), "32 obs") #' expect_output(str(mtcars), "11 variables") #' #' # You can use the arguments of grepl to control the matching #' expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) #' expect_output(str(mtcars), "$ mpg", fixed = TRUE) expect_output <- function(object, regexp = NULL, ..., info = NULL, label = NULL, width = 80 ) { act <- quasi_capture(enquo(object), label, capture_output, width = width) if (identical(regexp, NA)) { expect( identical(act$cap, ""), sprintf("%s produced output.\n%s", act$lab, encodeString(act$cap)), info = info ) } else if (is.null(regexp) || identical(act$cap, "")) { expect( !identical(act$cap, ""), sprintf("%s produced no output", act$lab), info = info ) } else { expect_match(act$cap, enc2native(regexp), ..., info = info, label = act$lab) } invisible(act$val) } testthat/R/snapshot-serialize.R0000644000176200001440000000324214165635513016274 0ustar liggesusers# data is list of character vectors snap_to_md <- function(data) { h2 <- paste0("# ", names(data), "\n\n") code_block <- function(x) paste0(indent_add(x), collapse = "\n\n---\n\n") data <- vapply(data, code_block, character(1)) paste0(h2, data, "\n\n", collapse = "") } snap_from_md <- function(lines) { lines <- gsub("\r", "", lines, fixed = TRUE) h2 <- grepl("^# ", lines) tests_group <- cumsum(h2) tests <- split(lines[!h2], tests_group[!h2]) names(tests) <- gsub("^# ", "", lines[h2]) split_tests <- function(lines) { sep <- grepl("^-{3,}", lines) case_group <- cumsum(sep) # Remove first line and last line, separator, line above and line below sep_loc <- which(sep) drop <- c(1, sep_loc, sep_loc + 1, sep_loc - 1, length(lines)) cases <- unname(split(lines[-drop], case_group[-drop])) code_unblock <- function(x) paste0(indent_del(x), collapse = "\n") vapply(cases, code_unblock, character(1)) } lapply(tests, split_tests) } read_snaps <- function(path) { if (file.exists(path)) { lines <- brio::read_lines(path) snap_from_md(lines) } else { list() } } write_snaps <- function(snaps, path, delete = FALSE) { snaps <- compact(snaps) if (length(snaps) == 0) { if (delete) { unlink(path) } return() } out <- snap_to_md(snaps) brio::write_file(out, path) } # Helpers ----------------------------------------------------------------- indent_add <- function(x, prefix = " ") { paste0(prefix, gsub("\n", paste0("\n", prefix), x, fixed = TRUE)) } indent_del <- function(x, prefix = " ") { x <- gsub(paste0("^", prefix), "", x) x <- gsub(paste0("\n", prefix), "\n", x) x } testthat/R/snapshot.R0000644000176200001440000003366014167646004014315 0ustar liggesusers#' Snapshot testing #' #' @description #' Snapshot tests (aka golden tests) are similar to unit tests except that the #' expected result is stored in a separate file that is managed by testthat. #' Snapshot tests are useful for when the expected value is large, or when #' the intent of the code is something that can only be verified by a human #' (e.g. this is a useful error message). Learn more in #' `vignette("snapshotting")`. #' #' * `expect_snapshot()` captures all messages, warnings, errors, and #' output from code. #' * `expect_snapshot_output()` captures just output printed to the console. #' * `expect_snapshot_error()` captures an error message and #' optionally checks its class. #' * `expect_snapshot_warning()` captures a warning message and #' optionally checks its class. #' * `expect_snapshot_value()` captures the return value. #' #' (These functions supersede [verify_output()], [expect_known_output()], #' [expect_known_value()], and [expect_known_hash()].) #' #' @section Workflow: #' The first time that you run a snapshot expectation it will run `x`, #' capture the results, and record in `tests/testthat/snap/{test}.json`. #' Each test file gets its own snapshot file, e.g. `test-foo.R` will get #' `snap/foo.json`. #' #' It's important to review the JSON files and commit them to git. They are #' designed to be human readable, and you should always review new additions #' to ensure that the salient information has been captured. They should also #' be carefully reviewed in pull requests, to make sure that snapshots have #' updated in the expected way. #' #' On subsequent runs, the result of `x` will be compared to the value stored #' on disk. If it's different, the expectation will fail, and a new file #' `snap/{test}.new.json` will be created. If the change was deliberate, #' you can approve the change with [snapshot_accept()] and then the tests will #' pass the next time you run them. #' #' Note that snapshotting can only work when executing a complete test file #' (with [test_file()], [test_dir()], or friends) because there's otherwise #' no way to figure out the snapshot path. If you run snapshot tests #' interactively, they'll just display the current value. #' #' @param x Code to evaluate. #' @param cran Should these expectations be verified on CRAN? By default, #' they are not, because snapshot tests tend to be fragile because they #' often rely on minor details of dependencies. #' @param error Do you expect the code to throw an error? The expectation #' will fail (even on CRAN) if an unexpected error is thrown or the #' expected error is not thrown. #' @param variant `r lifecycle::badge("experimental")` #' #' If not-`NULL`, results will be saved in `_snaps/{variant}/{test.md}`, #' so `variant` must be a single string of alphanumeric characters suitable #' for use as a directory name. #' #' You can variants to deal with cases where the snapshot output varies #' and you want to capture and test the variations. Common use cases include #' variations for operating system, R version, or version of key dependency. #' Variants are an advanced feature. When you use them, you'll need to #' carefully think about your testing strategy to ensure that all important #' variants are covered by automated tests, and ensure that you have a way #' to get snapshot changes out of your CI system and back into the repo. #' @param transform Optionally, a function to scrub sensitive or stochastic #' text from the output. Should take a character vector of lines as input #' and return a modified character vector as output. #' @param cnd_class Whether to include the class of messages, #' warnings, and errors in the snapshot. Only the most specific #' class is included, i.e. the first element of `class(cnd)`. #' @export expect_snapshot <- function(x, cran = FALSE, error = FALSE, transform = NULL, variant = NULL, cnd_class = FALSE) { edition_require(3, "expect_snapshot()") variant <- check_variant(variant) if (!is.null(transform)) { transform <- as_function(transform) } x <- enquo0(x) # Execute code, capturing last error state <- new_environment(list(error = NULL)) replay <- function(x) { snapshot_replay( x, state, transform = transform, cnd_class = cnd_class ) } out <- verify_exec(quo_get_expr(x), quo_get_env(x), replay) # Use expect_error() machinery to confirm that error is as expected msg <- compare_condition_3e("error", state$error, quo_label(x), error) if (!is.null(msg)) { if (error) { expect(FALSE, msg, trace = state$error[["trace"]]) } else { exp_signal(expectation("error", msg, trace = state$error[["trace"]])) } return() } expect_snapshot_helper("code", out, cran = cran, save = function(x) paste0(x, collapse = "\n"), load = function(x) split_by_line(x)[[1]], variant = variant, trace_env = caller_env() ) } snapshot_replay <- function(x, state, ..., transform = NULL) { UseMethod("snapshot_replay", x) } #' @export snapshot_replay.character <- function(x, state, ..., transform = NULL) { c(snap_header(state, "Output"), snapshot_lines(x, transform)) } #' @export snapshot_replay.source <- function(x, state, ..., transform = NULL) { c(snap_header(state, "Code"), snapshot_lines(x$src)) } #' @export snapshot_replay.condition <- function(x, state, ..., transform = NULL, cnd_class = FALSE) { if (!use_rlang_1_0()) { return(snapshot_replay_condition_legacy( x, state, transform = transform )) } cnd_message <- env_get(ns_env("rlang"), "cnd_message") if (inherits(x, "message")) { msg <- cnd_message(x) type <- "Message" } else { if (inherits(x, "error")) { state$error <- x } msg <- cnd_message(x, prefix = TRUE) type <- "Condition" } if (cnd_class) { type <- paste0(type, " <", class(x)[[1]], ">") } c(snap_header(state, type), snapshot_lines(msg, transform)) } snapshot_replay_condition_legacy <- function(x, state = env(), transform = NULL) { msg <- cnd_message(x) if (inherits(x, "error")) { state$error <- x type <- "Error" msg <- add_implict_nl(msg) } else if (inherits(x, "warning")) { type <- "Warning" msg <- paste0(msg, "\n") } else if (inherits(x, "message")) { type <- "Message" } else { type <- "Condition" } class <- paste0(type, " <", class(x)[[1]], ">") c(snap_header(state, class), snapshot_lines(msg, transform)) } snapshot_lines <- function(x, transform = NULL) { x <- split_lines(x) if (!is.null(transform)) { x <- transform(x) } x <- indent(x) x } add_implict_nl <- function(x) { if (substr(x, nchar(x), nchar(x)) == "\n") { x } else { paste0(x, "\n") } } snap_header <- function(state, header) { if (!identical(state$header, header)) { state$header <- header header } } #' @export #' @rdname expect_snapshot expect_snapshot_output <- function(x, cran = FALSE, variant = NULL) { edition_require(3, "expect_snapshot_output()") variant <- check_variant(variant) lab <- quo_label(enquo(x)) val <- capture_output_lines(x, print = TRUE, width = NULL) expect_snapshot_helper(lab, val, cran = cran, save = function(x) paste0(x, collapse = "\n"), load = function(x) split_by_line(x)[[1]], variant = variant, trace_env = caller_env() ) } #' @param class Class of expected error or warning. The expectation will #' always fail (even on CRAN) if an error of this class isn't seen #' when executing `x`. #' @export #' @rdname expect_snapshot expect_snapshot_error <- function(x, class = "error", cran = FALSE, variant = NULL) { edition_require(3, "expect_snapshot_error()") expect_snapshot_condition( "error", {{x}}, class = class, cran = cran, variant = variant ) } #' @export #' @rdname expect_snapshot expect_snapshot_warning <- function(x, class = "warning", cran = FALSE, variant = NULL) { edition_require(3, "expect_snapshot_warning()") expect_snapshot_condition( "warning", {{x}}, class = class, cran = cran, variant = variant ) } expect_snapshot_condition <- function(base_class, x, class, cran = FALSE, variant = NULL) { variant <- check_variant(variant) lab <- quo_label(enquo(x)) val <- capture_matching_condition(x, cnd_matcher(class)) if (is.null(val)) { if (base_class == class) { fail(sprintf("%s did not generate %s", lab, base_class)) } else { fail(sprintf("%s did not generate %s with class '%s'", lab, base_class, class)) } } expect_snapshot_helper( lab, conditionMessage(val), cran = cran, variant = variant, trace_env = caller_env() ) } #' @param style Serialization style to use: #' * `json` uses [jsonlite::fromJSON()] and [jsonlite::toJSON()]. This #' produces the simplest output but only works for relatively simple #' objects. #' * `json2` uses [jsonlite::serializeJSON()] and [jsonlite::unserializeJSON()] #' which are more verbose but work for a wider range of type. #' * `deparse` uses [deparse()], which generates a depiction of the object #' using R code. #' * `serialize()` produces a binary serialization of the object using #' [serialize()]. This is all but guaranteed to work for any R object, #' but produces a completely opaque serialization. #' @param ... For `expect_snapshot_value()` only, passed on to #' [waldo::compare()] so you can control the details of the comparison. #' @export #' @inheritParams compare #' @rdname expect_snapshot expect_snapshot_value <- function(x, style = c("json", "json2", "deparse", "serialize"), cran = FALSE, tolerance = testthat_tolerance(), ..., variant = NULL) { edition_require(3, "expect_snapshot_value()") variant <- check_variant(variant) lab <- quo_label(enquo(x)) style <- arg_match(style) save <- switch(style, json = function(x) jsonlite::toJSON(x, auto_unbox = TRUE, pretty = TRUE), json2 = function(x) jsonlite::serializeJSON(x, pretty = TRUE), deparse = function(x) paste0(deparse(x), collapse = "\n"), serialize = function(x) jsonlite::base64_enc(serialize(x, NULL, version = 2)) ) load <- switch(style, json = function(x) jsonlite::fromJSON(x, simplifyVector = FALSE), json2 = function(x) jsonlite::unserializeJSON(x), deparse = function(x) reparse(x), serialize = function(x) unserialize(jsonlite::base64_dec(x)) ) expect_snapshot_helper(lab, x, save = save, load = load, cran = cran, ..., tolerance = tolerance, variant = variant, trace_env = caller_env() ) } # Safe environment for evaluating deparsed objects, based on inspection of # https://github.com/wch/r-source/blob/5234fe7b40aad8d3929d240c83203fa97d8c79fc/src/main/deparse.c#L845 reparse <- function(x) { env <- env(emptyenv(), `-` = `-`, c = c, list = list, quote = quote, structure = structure, expression = expression, `function` = `function`, new = methods::new, getClass = methods::getClass, pairlist = pairlist, alist = alist, as.pairlist = as.pairlist ) eval(parse(text = x), env) } expect_snapshot_helper <- function(lab, val, cran = FALSE, save = identity, load = identity, ..., tolerance = testthat_tolerance(), variant = NULL, trace_env = caller_env() ) { if (!cran && !interactive() && on_cran()) { skip("On CRAN") } snapshotter <- get_snapshotter() if (is.null(snapshotter)) { snapshot_not_available(paste0("Current value:\n", save(val))) return(invisible()) } comp <- snapshotter$take_snapshot(val, save = save, load = load, ..., tolerance = tolerance, variant = variant, trace_env = trace_env ) if (!identical(variant, "_default")) { variant_lab <- paste0(" (variant '", variant, "')") } else { variant_lab <- "" } hint <- snapshot_accept_hint(variant, snapshotter$file) expect( length(comp) == 0, sprintf( "Snapshot of %s has changed%s:\n%s\n\n%s", lab, variant_lab, paste0(comp, collapse = "\n\n"), hint ), trace_env = trace_env ) } snapshot_accept_hint <- function(variant, file) { if (is.null(variant) || variant == "_default") { name <- file } else { name <- file.path(variant, file) } paste0( "* Run `snapshot_accept('", name, "')` to accept the change\n", "* Run `snapshot_review('", name, "')` to interactively review the change" ) } snapshot_not_available <- function(message) { inform(c( crayon::bold("Can't compare snapshot to reference when testing interactively"), i = "Run `devtools::test()` or `testthat::test_file()` to see changes", i = message )) } local_snapshot_dir <- function(snap_names, .env = parent.frame()) { path <- withr::local_tempdir(.local_envir = .env) dir.create(file.path(path, "_snaps"), recursive = TRUE) dirs <- setdiff(unique(dirname(snap_names)), ".") for (dir in dirs) { dir.create(file.path(path, "_snaps", dir), recursive = TRUE, showWarnings = FALSE) } snap_paths <- file.path(path, "_snaps", snap_names) lapply(snap_paths, brio::write_lines, text = "") path } # if transform() wiped out the full message, don't indent, #1487 indent <- function(x) if (length(x)) paste0(" ", x) else x check_variant <- function(x) { if (is.null(x)) { "_default" } else if (is_string(x)) { x } else { abort("If supplied, `variant` must be a string") } } testthat/R/expect-length.R0000644000176200001440000000116114164710002015177 0ustar liggesusers#' Does code return a vector with the specified length? #' #' @seealso [expect_vector()] to make assertions about the "size" of a vector #' @inheritParams expect_that #' @param n Expected length. #' @family expectations #' @export #' @examples #' expect_length(1, 1) #' expect_length(1:10, 10) #' #' \dontrun{ #' expect_length(1:10, 1) #' } expect_length <- function(object, n) { stopifnot(is.numeric(n), length(n) == 1) act <- quasi_label(enquo(object), arg = "object") act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) invisible(act$val) } testthat/R/reporter-progress.R0000644000176200001440000003446414165635513016166 0ustar liggesusers#' Test reporter: interactive progress bar of errors. #' #' @description #' `ProgressReporter` is designed for interactive use. Its goal is to #' give you actionable insights to help you understand the status of your #' code. This reporter also praises you from time-to-time if all your tests #' pass. It's the default reporter for [test_dir()]. #' #' `ParallelProgressReporter` is very similar to `ProgressReporter`, but #' works better for packages that want parallel tests. #' #' `CompactProgressReporter` is a minimal version of `ProgressReporter` #' designed for use with single files. It's the default reporter for #' [test_file()]. #' #' @export #' @family reporters ProgressReporter <- R6::R6Class("ProgressReporter", inherit = Reporter, public = list( show_praise = TRUE, min_time = 0.1, start_time = NULL, last_update = NULL, update_interval = NULL, skips = NULL, max_fail = NULL, verbose_skips = NULL, n_ok = 0, n_skip = 0, n_warn = 0, n_fail = 0, frames = NULL, dynamic = FALSE, ctxt_start_time = NULL, ctxt_issues = NULL, ctxt_n = 0, ctxt_n_ok = 0, ctxt_n_skip = 0, ctxt_n_warn = 0, ctxt_n_fail = 0, ctxt_name = "", file_name = "", initialize = function(show_praise = TRUE, max_failures = testthat_max_fails(), min_time = 0.1, update_interval = 0.1, verbose_skips = getOption("testthat.progress.verbose_skips", TRUE), ...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$show_praise <- show_praise self$max_fail <- max_failures self$verbose_skips <- !rlang::is_false(verbose_skips) self$min_time <- min_time self$update_interval <- update_interval self$skips <- Stack$new() self$ctxt_issues <- Stack$new() # Capture at init so not affected by test settings self$frames <- cli::get_spinner()$frames self$dynamic <- cli::is_dynamic_tty() }, is_full = function() { self$n_fail >= self$max_fail }, start_reporter = function(context) { self$start_time <- proc.time() self$show_header() }, start_file = function(file) { self$file_name <- file self$ctxt_issues <- Stack$new() self$ctxt_start_time <- proc.time() context_start_file(self$file_name) }, start_context = function(context) { self$ctxt_name <- context self$ctxt_issues <- Stack$new() self$ctxt_n <- 0L self$ctxt_n_ok <- 0L self$ctxt_n_fail <- 0L self$ctxt_n_warn <- 0L self$ctxt_n_skip <- 0L self$ctxt_start_time <- proc.time() self$show_status() }, show_header = function() { self$cat_line( colourise(cli::symbol$tick, "success"), " | ", colourise("F", "failure"), " ", colourise("W", "warning"), " ", colourise("S", "skip"), " ", colourise(" OK", "success"), " | ", "Context" ) }, status_data = function() { list( n = self$ctxt_n, n_ok = self$ctxt_n_ok, n_fail = self$ctxt_n_fail, n_warn = self$ctxt_n_warn, n_skip = self$ctxt_n_skip, name = self$ctxt_name ) }, show_status = function(complete = FALSE, time = 0, pad = FALSE) { data <- self$status_data() if (complete) { if (data$n_fail > 0) { status <- crayon::red(cli::symbol$cross) } else { status <- crayon::green(cli::symbol$tick) } } else { # Do not print if not enough time has passed since we last printed. if (!self$should_update()) { return() } status <- spinner(self$frames, data$n) if (data$n_fail > 0) { status <- colourise(status, "failure") } else if (data$n_warn > 0) { status <- colourise(status, "warning") } } col_format <- function(n, type) { if (n == 0) { " " } else { colourise(n, type) } } message <- paste0( status, " | ", col_format(data$n_fail, "fail"), " ", col_format(data$n_warn, "warn"), " ", col_format(data$n_skip, "skip"), " ", sprintf("%3d", data$n_ok), " | ", data$name ) if (complete && time > self$min_time) { message <- paste0( message, cli::col_grey(sprintf(" [%.1fs]", time)) ) } if (pad) { message <- strpad(message, self$width) message <- crayon::col_substr(message, 1, self$width) } if (!complete) { message <- strpad(message, self$width) self$cat_tight(self$cr(), message) } else { self$cat_line(self$cr(), message) } }, cr = function() { if (self$dynamic) { "\r" } else { "\n" } }, end_context = function(context) { time <- proc.time() - self$ctxt_start_time self$last_update <- NULL # context with no expectation = automatic file context in file # that also has manual contexts if (self$ctxt_n == 0) { return() } self$show_status(complete = TRUE, time = time[[3]]) self$report_issues(self$ctxt_issues) if (self$is_full()) { snapshotter <- get_snapshotter() if (!is.null(snapshotter)) { snapshotter$end_file() } stop_reporter(paste0( "Maximum number of failures exceeded; quitting at end of file.\n", "Increase this number with (e.g.) `Sys.setenv('TESTTHAT_MAX_FAILS' = Inf)`" )) } }, add_result = function(context, test, result) { self$ctxt_n <- self$ctxt_n + 1L if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 self$ctxt_n_fail <- self$ctxt_n_fail + 1 self$ctxt_issues$push(result) } else if (expectation_skip(result)) { self$n_skip <- self$n_skip + 1 self$ctxt_n_skip <- self$ctxt_n_skip + 1 if (self$verbose_skips) { self$ctxt_issues$push(result) } self$skips$push(result$message) } else if (expectation_warning(result)) { self$n_warn <- self$n_warn + 1 self$ctxt_n_warn <- self$ctxt_n_warn + 1 self$ctxt_issues$push(result) } else { self$n_ok <- self$n_ok + 1 self$ctxt_n_ok <- self$ctxt_n_ok + 1 } self$show_status() }, end_reporter = function() { self$cat_line() colour_if <- function(n, type) { colourise(n, if (n == 0) "success" else type) } self$rule(crayon::bold("Results"), line = 2) time <- proc.time() - self$start_time if (time[[3]] > self$min_time) { self$cat_line("Duration: ", sprintf("%.1f s", time[[3]]), col = "cyan") self$cat_line() } if (self$n_skip > 0) { self$rule("Skipped tests ", line = 1) self$cat_line(skip_bullets(self$skips$as_list())) self$cat_line() } status <- summary_line(self$n_fail, self$n_warn, self$n_skip, self$n_ok) self$cat_line(status) if (self$is_full()) { self$rule("Terminated early", line = 2) } if (!self$show_praise || runif(1) > 0.1) { return() } self$cat_line() if (self$n_fail == 0) { self$cat_line(colourise(praise(), "success")) } else { self$cat_line(colourise(encourage(), "error")) } }, report_issues = function(issues) { if (issues$size() > 0) { self$rule() issues <- issues$as_list() summary <- vapply(issues, issue_summary, FUN.VALUE = character(1)) self$cat_tight(paste(summary, collapse = "\n\n")) self$cat_line() self$rule() } }, should_update = function() { if (self$update_interval == 0) { return(TRUE) } if (identical(self$update_interval, Inf)) { return(FALSE) } time <- proc.time()[[3]] if (!is.null(self$last_update) && (time - self$last_update) < self$update_interval) { return(FALSE) } self$last_update <- time TRUE } ) ) testthat_max_fails <- function() { val <- getOption("testthat.progress.max_fails") if (is.null(val)) { env <- Sys.getenv("TESTTHAT_MAX_FAILS") val <- if (!identical(env, "")) as.numeric(env) else 10 } val } #' @export #' @rdname ProgressReporter CompactProgressReporter <- R6::R6Class("CompactProgressReporter", inherit = ProgressReporter, public = list( # Is this being run by RStudio's test file button? rstudio = FALSE, initialize = function(rstudio = FALSE, min_time = Inf, ...) { self$rstudio <- rstudio super$initialize(min_time = min_time, ...) }, start_file = function(name) { if (!self$rstudio) { self$cat_line() self$rule(cli::style_bold(paste0("Testing ", name)), line = 2) } super$start_file(name) }, start_reporter = function(context) { }, end_context = function(context) { if (self$ctxt_issues$size() == 0) { return() } self$cat_line() self$cat_line() issues <- self$ctxt_issues$as_list() summary <- vapply(issues, issue_summary, rule = TRUE, FUN.VALUE = character(1) ) self$cat_tight(paste(summary, collapse = "\n\n")) self$cat_line() self$cat_line() }, end_reporter = function() { if (self$n_fail > 0 || self$n_warn > 0 || self$n_skip > 0) { self$show_status() self$cat_line() } else if (self$is_full()) { self$cat_line(" Terminated early") } else if (!self$rstudio) { self$cat_line(crayon::bold(" Done!")) } }, show_status = function(complete = NULL) { self$local_user_output() status <- summary_line(self$n_fail, self$n_warn, self$n_skip, self$n_ok) self$cat_tight(self$cr(), status) } ) ) # parallel progres reporter ----------------------------------------------- #' @export #' @rdname ProgressReporter ParallelProgressReporter <- R6::R6Class("ParallelProgressReporter", inherit = ProgressReporter, public = list( files = list(), spin_frame = 0L, is_rstudio = FALSE, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$capabilities$parallel_updates <- TRUE self$update_interval <- 0.05 self$is_rstudio <- Sys.getenv("RSTUDIO", "") == "1" }, start_file = function(file) { if (! file %in% names(self$files)) { self$files[[file]] <- list( issues = Stack$new(), n_fail = 0L, n_skip = 0L, n_warn = 0L, n_ok = 0L, name = context_name(file), start_time = proc.time() ) } self$file_name <- file }, start_context = function(context) { # we'll just silently ignore this }, end_context = function(context) { # we'll just silently ignore this }, end_file = function() { fsts <- self$files[[self$file_name]] time <- proc.time() - fsts$start_time # Workaround for https://github.com/rstudio/rstudio/issues/7649 if (self$is_rstudio) { self$cat_tight(strpad(self$cr(), self$width + 1)) # +1 for \r } self$show_status(complete = TRUE, time = time[[3]], pad = TRUE) self$report_issues(fsts$issues) self$files[[self$file_name]] <- NULL if (length(self$files)) self$update(force = TRUE) }, end_reporter = function() { self$cat_tight(self$cr(), strpad("", self$width)) super$end_reporter() }, show_header = function() { super$show_header() self$update(force = TRUE) }, status_data = function() { self$files[[self$file_name]] }, add_result = function(context, test, result) { self$ctxt_n <- self$ctxt_n + 1L file <- self$file_name if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 self$files[[file]]$n_fail <- self$files[[file]]$n_fail + 1L self$files[[file]]$issues$push(result) } else if (expectation_skip(result)) { self$n_skip <- self$n_skip + 1 self$files[[file]]$n_skip <- self$files[[file]]$n_skip + 1L if (self$verbose_skips) { self$files[[file]]$issues$push(result) } self$skips$push(result$message) } else if (expectation_warning(result)) { self$n_warn <- self$n_warn + 1 self$files[[file]]$n_warn <- self$files[[file]]$n_warn + 1L self$files[[file]]$issues$push(result) } else { self$n_ok <- self$n_ok + 1 self$files[[file]]$n_ok <- self$files[[file]]$n_ok + 1 } }, update = function(force = FALSE) { if (!force && !self$should_update()) return() self$spin_frame <- self$spin_frame + 1L status <- spinner(self$frames, self$spin_frame) message <- paste( status, summary_line(self$n_fail, self$n_warn, self$n_skip, self$n_ok), if (length(self$files) > 0) "@" else "Starting up...", paste(context_name(names(self$files)), collapse = ", ") ) message <- strpad(message, self$width) message <- crayon::col_substr(message, 1, self$width) self$cat_tight(self$cr(), message) } ) ) # helpers ----------------------------------------------------------------- spinner <- function(frames, i) { frames[((i - 1) %% length(frames)) + 1] } issue_header <- function(x, pad = FALSE) { type <- expectation_type(x) if (has_colour()) { type <- colourise(first_upper(type), type) } else { type <- first_upper(type) } if (pad) { type <- strpad(type, 7) } loc <- expectation_location(x) paste0(type, " (", loc, "): ", x$test) } issue_summary <- function(x, rule = FALSE, simplify = "branch") { header <- crayon::bold(issue_header(x)) if (rule) { header <- cli::rule(header, width = max(nchar(header) + 6, 80)) } paste0(header, "\n", format(x, simplify = simplify)) } strpad <- function(x, width = cli::console_width()) { n <- pmax(0, width - crayon::col_nchar(x)) paste0(x, strrep(" ", n)) } skip_bullets <- function(skips) { skips <- unlist(skips) skips <- gsub("Reason: ", "", skips) skips <- gsub(":?\n(\n|.)+", "", skips) # only show first line tbl <- table(skips) paste0(cli::symbol$bullet, " ", names(tbl), " (", tbl, ")") } testthat/R/reporter-summary.R0000644000176200001440000001055514164710002015774 0ustar liggesusers#' Test reporter: summary of errors. #' #' This is a reporter designed for interactive usage: it lets you know which #' tests have run successfully and as well as fully reporting information about #' failures and errors. #' #' You can use the `max_reports` field to control the maximum number #' of detailed reports produced by this reporter. This is useful when running #' with [auto_test()] #' #' As an additional benefit, this reporter will praise you from time-to-time #' if all your tests pass. #' #' @export #' @family reporters SummaryReporter <- R6::R6Class("SummaryReporter", inherit = Reporter, public = list( failures = NULL, skips = NULL, warnings = NULL, max_reports = NULL, show_praise = TRUE, omit_dots = FALSE, initialize = function(show_praise = TRUE, omit_dots = getOption("testthat.summary.omit_dots"), max_reports = getOption("testthat.summary.max_reports", 10L), ...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$failures <- Stack$new() self$skips <- Stack$new() self$warnings <- Stack$new() self$max_reports <- max_reports self$show_praise <- show_praise self$omit_dots <- omit_dots }, is_full = function() { self$failures$size() >= self$max_reports }, start_file = function(file) { context_start_file(file) }, start_context = function(context) { self$cat_tight(context, ": ") }, end_context = function(context) { self$cat_line() }, add_result = function(context, test, result) { if (expectation_broken(result)) { self$failures$push(result) } else if (expectation_skip(result)) { self$skips$push(result) } else if (expectation_warning(result)) { self$warnings$push(result) } else { if (isTRUE(self$omit_dots)) { return() } } self$cat_tight(private$get_summary(result)) }, end_reporter = function() { skips <- self$skips$as_list() failures <- self$failures$as_list() warnings <- self$warnings$as_list() self$cat_line() private$cat_reports("Skipped", skips, Inf, skip_summary) private$cat_reports("Warnings", warnings, Inf, skip_summary) private$cat_reports("Failed", failures, self$max_reports, failure_summary) if (self$failures$size() >= self$max_reports) { self$cat_line( "Maximum number of ", self$max_reports, " failures reached, ", "some test results may be missing." ) self$cat_line() } self$rule("DONE", line = 2) if (self$show_praise) { if (length(failures) == 0 && runif(1) < 0.1) { self$cat_line(colourise(praise(), "success")) } if (length(failures) > 0 && runif(1) < 0.25) { self$cat_line(colourise(encourage(), "error")) } } } ), private = list( get_summary = function(result) { if (expectation_broken(result)) { if (self$failures$size() <= length(labels)) { return(colourise(labels[self$failures$size()], "error")) } } single_letter_summary(result) }, cat_reports = function(header, expectations, max_n, summary_fun, collapse = "\n\n") { n <- length(expectations) if (n == 0L) { return() } self$rule(header, line = 2) if (n > max_n) { expectations <- expectations[seq_len(max_n)] } labels <- seq_along(expectations) exp_summary <- function(i) { summary_fun(expectations[[i]], labels[i]) } report_summary <- vapply(seq_along(expectations), exp_summary, character(1)) self$cat_tight(paste(report_summary, collapse = collapse)) if (n > max_n) { self$cat_line() self$cat_line(" ... and ", n - max_n, " more") } self$cat_line() self$cat_line() } ) ) labels <- c(1:9, letters, LETTERS) skip_summary <- function(x, label) { header <- paste0(label, ". ", x$test) paste0( colourise(header, "skip"), " (", expectation_location(x), ") - ", x$message ) } failure_summary <- function(x, label, width = cli::console_width()) { header <- paste0(label, ". ", issue_header(x)) paste0( cli::rule(header, col = testthat_style("error")), "\n", format(x) ) } testthat/R/local.R0000644000176200001440000001603514165635513013546 0ustar liggesusers#' Locally set options for maximal test reproducibility #' #' @description #' `local_test_context()` is run automatically by `test_that()` but you may #' want to run it yourself if you want to replicate test results interactively. #' If run inside a function, the effects are automatically reversed when the #' function exits; if running in the global environment, use #' [withr::deferred_run()] to undo. #' #' `local_reproducible_output()` is run automatically by `test_that()` in the #' 3rd edition. You might want to call it to override the the default settings #' inside a test, if you want to test Unicode, coloured output, or a #' non-standard width. #' #' @details #' `local_test_context()` sets `TESTTHAT = "true"`, which ensures that #' [is_testing()] returns `TRUE` and allows code to tell if it is run by #' testthat. #' #' In the third edition, `local_test_context()` also calls #' `local_reproducible_output()` which temporary sets the following options: #' #' * `cli.dynamic = FALSE` so that tests assume that they are not run in #' a dynamic console (i.e. one where you can move the cursor around). #' * `cli.unicode` (default: `FALSE`) so that the cli package never generates #' unicode output (normally cli uses unicode on Linux/Mac but not Windows). #' Windows can't easily save unicode output to disk, so it must be set to #' false for consistency. #' * `cli.condition_width = Inf` so that new lines introduced while #' width-wrapping condition messages don't interfere with message matching. #' * `crayon.enabled` (default: `FALSE`) suppresses ANSI colours generated by #' the crayon package (normally colours are used if crayon detects that you're #' in a terminal that supports colour). #' * `cli.num_colors` (default: `1L`) Same as the crayon option. #' * `lifecycle_verbosity = "warning"` so that every lifecycle problem always #' generates a warning (otherwise deprecated functions don't generate a #' warning every time). #' * `max.print = 99999` so the same number of values are printed. #' * `OutDec = "."` so numbers always uses `.` as the decimal point #' (European users sometimes set `OutDec = ","`). #' * `rlang_interactive = FALSE` so that [rlang::is_interactive()] returns #' `FALSE`, and code that uses it pretends you're in a non-interactive #' environment. #' * `useFancyQuotes = FALSE` so base R functions always use regular (straight) #' quotes (otherwise the default is locale dependent, see [sQuote()] for #' details). #' * `width` (default: 80) to control the width of printed output (usually this #' varies with the size of your console). #' #' And modifies the following env vars: #' #' * Unsets `RSTUDIO`, which ensures that RStudio is never detected as running. #' * Sets `LANGUAGE = "en"`, which ensures that no message translation occurs. #' #' Finally, it sets the collation locale to "C", which ensures that character #' sorting the same regardless of system locale. #' #' @export #' @param .env Environment to use for scoping; expert use only. #' @examples #' local({ #' local_test_context() #' cat(crayon::blue("Text will not be colored")) #' cat(cli::symbol$ellipsis) #' cat("\n") #' }) local_test_context <- function(.env = parent.frame()) { withr::local_envvar(TESTTHAT = "true", .local_envir = .env) if (edition_get() >= 3) { local_reproducible_output(.env = .env) } } #' @export #' @param width Value of the `"width"` option. #' @param crayon Value of the `"crayon.enabled"` option. #' @param unicode Value of the `"cli.unicode"` option. #' The test is skipped if `` l10n_info()$`UTF-8` `` is `FALSE`. #' @param lang Optionally, supply a BCP47 language code to set the language #' used for translating error messages. This is a lower case two letter #' [ISO 639 country code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes), #' optionally followed by "_" or "-" and an upper case two letter #' [ISO 3166 region code](https://en.wikipedia.org/wiki/ISO_3166-2). #' @rdname local_test_context #' @examples #' test_that("test ellipsis", { #' local_reproducible_output(unicode = FALSE) #' expect_equal(cli::symbol$ellipsis, "...") #' #' local_reproducible_output(unicode = TRUE) #' expect_equal(cli::symbol$ellipsis, "\u2026") #' }) local_reproducible_output <- function(width = 80, crayon = FALSE, unicode = FALSE, lang = "en", .env = parent.frame()) { if (unicode) { # If you force unicode display, you _must_ skip the test on non-utf8 # locales; otherwise it's guaranteed to fail skip_if(!l10n_info()$`UTF-8`, "non utf8 locale") } local_width(width = width, .env = .env) withr::local_options( crayon.enabled = crayon, cli.dynamic = FALSE, cli.unicode = unicode, cli.condition_width = Inf, cli.num_colors = if (crayon) 8L else 1L, useFancyQuotes = FALSE, lifecycle_verbosity = "warning", OutDec = ".", rlang_interactive = FALSE, max.print = 99999, .local_envir = .env, ) withr::local_envvar(RSTUDIO = NA, .local_envir = .env) withr::local_language(lang, .local_envir = .env) withr::local_collate("C", .local_envir = .env) } waldo_compare <- function(x, y, ..., x_arg = "x", y_arg = "y") { reporter <- get_reporter() if (!is.null(reporter)) { # Need to very carefully isolate this change to this function - can not set # in expectation functions because part of expectation handling bubbles # up through calling handlers, which are run before on.exit() reporter$local_user_output() } waldo::compare(x, y,..., x_arg = x_arg, y_arg = y_arg) } local_width <- function(width = 80, .env = parent.frame()) { withr::local_options(width = width, cli.width = width, .local_envir = .env) withr::local_envvar(RSTUDIO_CONSOLE_WIDTH = width, .local_envir = .env) } #' Locally set test directory options #' #' For expert use only. #' #' @param path Path to directory of files #' @param package Optional package name, if known. #' @export #' @keywords internal local_test_directory <- function(path, package = NULL, .env = parent.frame()) { # Set edition before changing working directory in case path is relative local_edition(find_edition(path, package), .env = .env) rlang_dep <- find_dep_version("rlang", path, package) withr::local_options( "testthat:::rlang_dep" = rlang_dep, .local_envir = .env ) withr::local_dir( path, .local_envir = .env ) withr::local_envvar( R_TESTS = "", TESTTHAT = "true", TESTTHAT_PKG = package, .local_envir = .env ) } local_interactive_reporter <- function(.env = parent.frame()) { # Definitely not on CRAN withr::local_envvar(NOT_CRAN = "true", .local_envir = .env) withr::local_options(testthat_interactive = TRUE, .local_envir = .env) # Use edition from working directory local_edition(find_edition("."), .env = .env) # Use StopReporter reporter <- StopReporter$new() old <- set_reporter(reporter) withr::defer(reporter$stop_if_needed(), envir = .env) withr::defer(set_reporter(old), envir = .env) reporter } testthat/R/expect-condition.R0000644000176200001440000003350714167646004015732 0ustar liggesusers#' Does code throw an error, warning, message, or other condition? #' #' @description #' `expect_error()`, `expect_warning()`, `expect_message()`, and #' `expect_condition()` check that code throws an error, warning, message, #' or condition with a message that matches `regexp`, or a class that inherits #' from `class`. See below for more details. #' #' In the 3rd edition, these functions match (at most) a single condition. All #' additional and non-matching (if `regexp` or `class` are used) conditions #' will bubble up outside the expectation. If these additional conditions #' are important you'll need to catch them with additional #' `expect_message()`/`expect_warning()` calls; if they're unimportant you #' can ignore with [suppressMessages()]/[suppressWarnings()]. #' #' It can be tricky to test for a combination of different conditions, #' such as a message followed by an error. [expect_snapshot()] is #' often an easier alternative for these more complex cases. #' #' @section Testing `message` vs `class`: #' When checking that code generates an error, it's important to check that the #' error is the one you expect. There are two ways to do this. The first #' way is the simplest: you just provide a `regexp` that match some fragment #' of the error message. This is easy, but fragile, because the test will #' fail if the error message changes (even if its the same error). #' #' A more robust way is to test for the class of the error, if it has one. #' You can learn more about custom conditions at #' , but in #' short, errors are S3 classes and you can generate a custom class and check #' for it using `class` instead of `regexp`. #' #' If you are using `expect_error()` to check that an error message is #' formatted in such a way that it makes sense to a human, we recommend #' using [expect_snapshot()] instead. #' #' @export #' @family expectations #' @inheritParams expect_that #' @param regexp Regular expression to test against. #' * A character vector giving a regular expression that must match the #' error message. #' * If `NULL`, the default, asserts that there should be an error, #' but doesn't test for a specific value. #' * If `NA`, asserts that there should be no errors. #' @inheritDotParams expect_match -object -regexp -info -label -all #' @param class Instead of supplying a regular expression, you can also supply #' a class name. This is useful for "classed" conditions. #' @param inherit Whether to match `regexp` and `class` across the #' ancestry of chained errors. #' @param all *DEPRECATED* If you need to test multiple warnings/messages #' you now need to use multiple calls to `expect_message()`/ #' `expect_warning()` #' @return If `regexp = NA`, the value of the first argument; otherwise #' the captured condition. #' @examples #' # Errors ------------------------------------------------------------------ #' f <- function() stop("My error!") #' expect_error(f()) #' expect_error(f(), "My error!") #' #' # You can use the arguments of grepl to control the matching #' expect_error(f(), "my error!", ignore.case = TRUE) #' #' # Note that `expect_error()` returns the error object so you can test #' # its components if needed #' err <- expect_error(rlang::abort("a", n = 10)) #' expect_equal(err$n, 10) #' #' # Warnings ------------------------------------------------------------------ #' f <- function(x) { #' if (x < 0) { #' warning("*x* is already negative") #' return(x) #' } #' -x #' } #' expect_warning(f(-1)) #' expect_warning(f(-1), "already negative") #' expect_warning(f(1), NA) #' #' # To test message and output, store results to a variable #' expect_warning(out <- f(-1), "already negative") #' expect_equal(out, -1) #' #' # Messages ------------------------------------------------------------------ #' f <- function(x) { #' if (x < 0) { #' message("*x* is already negative") #' return(x) #' } #' #' -x #' } #' expect_message(f(-1)) #' expect_message(f(-1), "already negative") #' expect_message(f(1), NA) expect_error <- function(object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL) { if (edition_get() >= 3) { expect_condition_matching("error", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label ) } else { act <- quasi_capture(enquo(object), label, capture_error, entrace = TRUE) msg <- compare_condition_2e( act$cap, act$lab, regexp = regexp, class = class, ..., inherit = inherit ) # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields expect(is.null(msg), msg, info = info, trace = act$cap[["trace"]]) invisible(act$val %||% act$cap) } } #' @export #' @rdname expect_error expect_warning <- function(object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL) { if (edition_get() >= 3) { if (!missing(all)) { warn("The `all` argument is deprecated") } expect_condition_matching("warning", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label, trace_env = caller_env() ) } else { act <- quasi_capture(enquo(object), label, capture_warnings) msg <- compare_messages( act$cap, act$lab, regexp = regexp, all = all, ..., cond_type = "warnings" ) expect(is.null(msg), msg, info = info) invisible(act$val) } } #' @export #' @rdname expect_error expect_message <- function(object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL) { if (edition_get() >= 3) { expect_condition_matching("message", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label, trace_env = caller_env() ) } else { act <- quasi_capture(enquo(object), label, capture_messages) msg <- compare_messages(act$cap, act$lab, regexp = regexp, all = all, ...) expect(is.null(msg), msg, info = info) invisible(act$val) } } #' @export #' @rdname expect_error expect_condition <- function(object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL) { if (edition_get() >= 3) { expect_condition_matching("condition", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label, trace_env = caller_env() ) } else { act <- quasi_capture(enquo(object), label, capture_condition, entrace = TRUE) msg <- compare_condition_2e( act$cap, act$lab, regexp = regexp, class = class, ..., inherit = inherit, cond_type = "condition" ) expect(is.null(msg), msg, info = info, trace = act$cap[["trace"]]) invisible(act$val %||% act$cap) } } expect_condition_matching <- function(base_class, object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL, trace_env = caller_env()) { ellipsis::check_dots_used(action = warn) matcher <- cnd_matcher( class %||% base_class, regexp, ..., inherit = inherit ) act <- quasi_capture( enquo(object), label, capture_matching_condition, matches = matcher ) expected <- !identical(regexp, NA) msg <- compare_condition_3e(base_class, act$cap, act$lab, expected) # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields expect(is.null(msg), msg, info = info, trace = act$cap[["trace"]], trace_env = trace_env) # If a condition was expected, return it. Otherwise return the value # of the expression. invisible(if (expected) act$cap else act$val) } # ------------------------------------------------------------------------- cnd_matcher <- function(class, pattern = NULL, ..., inherit = TRUE) { if (!is_string(class)) { abort("`class` must be a single string") } if (!is_string(pattern) && !is.null(pattern) && !isNA(pattern)) { abort("`pattern` must be a single string, NULL, or NA") } function(cnd) { if (!inherit) { cnd$parent <- NULL } if (is.null(pattern) || isNA(pattern)) { cnd_inherits(cnd, class) } else { cnd_matches(cnd, class, pattern, ...) } } } cnd_inherits <- function(cnd, class) { cnd_some(cnd, ~ inherits(.x, class)) } cnd_matches <- function(cnd, class, pattern, ...) { cnd_some(cnd, function(x) { inherits(x, class) && grepl(pattern, conditionMessage(x), ...) }) } cnd_some <- function(.cnd, .p, ...) { .p <- as_function(.p) while (is_condition(.cnd)) { if (.p(.cnd, ...)) { return(TRUE) } .cnd <- .cnd$parent } FALSE } capture_matching_condition <- function(expr, matches) { matched <- NULL tl <- current_env() withCallingHandlers(expr, condition = function(cnd) { if (!is.null(matched) || !matches(cnd)) { return() } if (can_entrace(cnd)) { cnd <- cnd_entrace(cnd) } matched <<- cnd if (inherits(cnd, "message") || inherits(cnd, "warning")) { cnd_muffle(cnd) } else if (inherits(cnd, "error") || inherits(cnd, "skip")) { return_from(tl, cnd) } }) matched } # Helpers ----------------------------------------------------------------- compare_condition_3e <- function(cond_type, cond, lab, expected) { if (expected) { if (is.null(cond)) { sprintf("%s did not throw the expected %s.", lab, cond_type) } else { NULL } } else { if (!is.null(cond)) { sprintf( "%s threw an unexpected %s.\nMessage: %s\nClass: %s", lab, cond_type, cnd_message(cond), paste(class(cond), collapse = "/") ) } else { NULL } } } compare_condition_2e <- function(cond, lab, regexp = NULL, class = NULL, ..., inherit = TRUE, cond_type = "error") { # Expecting no condition if (identical(regexp, NA)) { if (!is.null(cond)) { return(sprintf( "%s threw an %s.\nMessage: %s\nClass: %s", lab, cond_type, cnd_message(cond), paste(class(cond), collapse = "/") )) } else { return() } } # Otherwise we're definitely expecting a condition if (is.null(cond)) { return(sprintf("%s did not throw an %s.", lab, cond_type)) } matches <- cnd_matches_2e(cond, class, regexp, inherit, ...) ok_class <- matches[["class"]] ok_msg <- matches[["msg"]] # All good if (ok_msg && ok_class) { return() } problems <- c(if (!ok_class) "class", if (!ok_msg) "message") message <- cnd_message(cond) details <- c( if (!ok_class) { sprintf( "Expected class: %s\nActual class: %s\nMessage: %s", paste0(class, collapse = "/"), paste0(class(cond), collapse = "/"), message ) }, if (!ok_msg) { sprintf( "Expected match: %s\nActual message: %s", encodeString(regexp, quote = '"'), encodeString(message, quote = '"') ) } ) sprintf( "%s threw an %s with unexpected %s.\n%s", lab, cond_type, paste(problems, collapse = " and "), paste(details, collapse = "\n") ) } cnd_matches_2e <- function(cnd, class, regexp, inherit, ...) { if (!inherit) { cnd$parent <- NULL } ok_class <- is.null(class) || cnd_inherits(cnd, class) ok_msg <- is.null(regexp) || cnd_some(cnd, function(x) { any(grepl(regexp, cnd_message(x), ...)) }) c(class = ok_class, msg = ok_msg) } compare_messages <- function(messages, lab, regexp = NA, ..., all = FALSE, cond_type = "messages") { bullets <- paste0("* ", messages, collapse = "\n") # Expecting no messages if (identical(regexp, NA)) { if (length(messages) > 0) { return(sprintf("%s generated %s:\n%s", lab, cond_type, bullets)) } else { return() } } # Otherwise we're definitely expecting messages if (length(messages) == 0) { return(sprintf("%s did not produce any %s.", lab, cond_type)) } if (is.null(regexp)) { return() } matched <- grepl(regexp, messages, ...) # all/any ok if ((all && all(matched)) || (!all && any(matched))) { return() } sprintf( "%s produced unexpected %s.\n%s\n%s", lab, cond_type, paste0("Expected match: ", encodeString(regexp)), paste0("Actual values:\n", bullets) ) } # Disable rlang backtrace reminders so they don't interfere with # expected error messages cnd_message <- function(x) { withr::local_options(rlang_backtrace_on_error = "none") conditionMessage(x) } testthat/R/reporter-list.R0000644000176200001440000001252214164710002015246 0ustar liggesusersmethods::setOldClass("proc_time") #' List reporter: gather all test results along with elapsed time and #' file information. #' #' This reporter gathers all results, adding additional information such as #' test elapsed time, and test filename if available. Very useful for reporting. #' #' @export #' @family reporters ListReporter <- R6::R6Class("ListReporter", inherit = Reporter, public = list( current_start_time = NA, current_expectations = NULL, current_file = NULL, results = NULL, initialize = function() { super$initialize() self$capabilities$parallel_support <- TRUE self$results <- Stack$new() }, start_test = function(context, test) { self$current_expectations <- Stack$new() self$current_start_time <- proc.time() }, add_result = function(context, test, result) { if (is.null(self$current_expectations)) { # we received a result outside of a test: # could be a bare expectation or an exception/error if (!inherits(result, 'error')) { return() } self$current_expectations <- Stack$new() } self$current_expectations$push(result) }, end_test = function(context, test) { elapsed <- as.double(proc.time() - self$current_start_time) results <- list() if (!is.null(self$current_expectations)) results <- self$current_expectations$as_list() self$results$push(list( file = self$current_file %||% NA_character_, context = context, test = test, user = elapsed[1], system = elapsed[2], real = elapsed[3], results = results )) self$current_expectations <- NULL }, start_file = function(name) { self$current_file <- name }, end_file = function() { # fallback in case we have errors but no expectations self$end_context(self$current_file) }, end_context = function(context) { results <- self$current_expectations if (is.null(results)) { return() } self$current_expectations <- NULL # look for exceptions raised outside of tests # they happened just before end_context since they interrupt the test_file execution results <- results$as_list() if (length(results) == 0) return() self$results$push(list( file = self$current_file %||% NA_character_, context = context, test = NA_character_, user = NA_real_, system = NA_real_, real = NA_real_, results = results )) }, get_results = function() { testthat_results(self$results$as_list()) } ) ) #' Create a `testthat_results` object from the test results #' as stored in the ListReporter results field. #' #' @param results a list as stored in ListReporter #' @return its list argument as a `testthat_results` object #' @seealso ListReporter #' @keywords internal testthat_results <- function(results) { stopifnot(is.list(results)) structure(results, class = "testthat_results") } # return if all tests are successful w/o error all_passed <- function(res) { if (length(res) == 0) { return(TRUE) } df <- as.data.frame.testthat_results(res) sum(df$failed) == 0 && all(!df$error) } any_warnings <- function(res) { if (length(res) == 0) { return(FALSE) } df <- as.data.frame.testthat_results(res) any(df$warning > 0) } #' @export as.data.frame.testthat_results <- function(x, ...) { if (length(x) == 0) { return( data.frame( file = character(0), context = character(0), test = character(0), nb = integer(0), failed = integer(0), skipped = logical(0), error = logical(0), warning = integer(0), user = numeric(0), system = numeric(0), real = numeric(0), passed = integer(0), result = list(), stringsAsFactors = FALSE ) ) } rows <- lapply(x, summarize_one_test_results) do.call(rbind, rows) } summarize_one_test_results <- function(test) { test_results <- test$results nb_tests <- length(test_results) nb_failed <- nb_skipped <- nb_warning <- nb_passed <- 0L error <- FALSE if (nb_tests > 0) { # error reports should be handled differently. # They may not correspond to an expect_that() test so remove them last_test <- test_results[[nb_tests]] error <- expectation_error(last_test) if (error) { test_results <- test_results[-nb_tests] nb_tests <- length(test_results) } nb_passed <- sum(vapply(test_results, expectation_success, logical(1))) nb_skipped <- sum(vapply(test_results, expectation_skip, logical(1))) nb_failed <- sum(vapply(test_results, expectation_failure, logical(1))) nb_warning <- sum(vapply(test_results, expectation_warning, logical(1))) } context <- if (length(test$context) > 0) test$context else "" res <- data.frame( file = test$file, context = context, test = test$test, nb = nb_tests, failed = nb_failed, skipped = as.logical(nb_skipped), error = error, warning = nb_warning, user = test$user, system = test$system, real = test$real, stringsAsFactors = FALSE ) # Added at end for backward compatibility res$passed <- nb_passed # Cannot easily add list columns in data.frame() res$result <- list(test_results) res } #' @export print.testthat_results <- function(x, ...) { print(as.data.frame(x)) } testthat/R/compare.R0000644000176200001440000002260214164710002014061 0ustar liggesusers#' Provide human-readable comparison of two objects #' #' @description #' `r lifecycle::badge("superseded")` #' #' `compare` is similar to [base::all.equal()], but somewhat buggy in its #' use of `tolerance`. Please use [waldo](https://waldo.r-lib.org/) instead. #' #' @export #' @param x,y Objects to compare #' @param ... Additional arguments used to control specifics of comparison #' @keywords internal #' @order 1 compare <- function(x, y, ...) { UseMethod("compare", x) } comparison <- function(equal = TRUE, message = "Equal") { stopifnot(is.logical(equal), length(equal) == 1) stopifnot(is.character(message)) structure( list( equal = equal, message = paste(message, collapse = "\n") ), class = "comparison" ) } difference <- function(..., fmt = "%s") { comparison(FALSE, sprintf(fmt, ...)) } no_difference <- function() { comparison() } #' @export print.comparison <- function(x, ...) { if (x$equal) { cat("Equal\n") return() } cat(x$message) } #' @export #' @rdname compare #' @order 2 compare.default <- function(x, y, ..., max_diffs = 9) { same <- all.equal(x, y, ...) if (length(same) > max_diffs) { same <- c(same[1:max_diffs], "...") } comparison(identical(same, TRUE), as.character(same)) } print_out <- function(x, ...) { lines <- capture_output_lines(x, ..., print = TRUE) paste0(lines, collapse = "\n") } # Common helpers --------------------------------------------------------------- same_length <- function(x, y) length(x) == length(y) diff_length <- function(x, y) difference(fmt = "Lengths differ: %i is not %i", length(x), length(y)) same_type <- function(x, y) identical(typeof(x), typeof(y)) diff_type <- function(x, y) difference(fmt = "Types not compatible: %s is not %s", typeof(x), typeof(y)) same_class <- function(x, y) { if (!is.object(x) && !is.object(y)) { return(TRUE) } identical(class(x), class(y)) } diff_class <- function(x, y) { difference(fmt = "Classes differ: %s is not %s", format_class(class(x)), format_class(class(y))) } same_attr <- function(x, y) { is.null(attr.all.equal(x, y)) } diff_attr <- function(x, y) { out <- attr.all.equal(x, y) difference(out) } vector_equal <- function(x, y) { (is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y) & x == y) } vector_equal_tol <- function(x, y, tolerance = .Machine$double.eps ^ 0.5) { (is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y)) & (x == y | abs(x - y) < tolerance) } # character --------------------------------------------------------------- #' @param max_diffs Maximum number of differences to show #' @param max_lines Maximum number of lines to show from each difference #' @param check.attributes If `TRUE`, also checks values of attributes. #' @param width Width of output device #' @rdname compare #' @export #' @examples #' # Character ----------------------------------------------------------------- #' x <- c("abc", "def", "jih") #' compare(x, x) #' #' y <- paste0(x, "y") #' compare(x, y) #' #' compare(letters, paste0(letters, "-")) #' #' x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' compare(x, y) #' compare(c(x, x), c(y, y)) #' compare.character <- function(x, y, check.attributes = TRUE, ..., max_diffs = 5, max_lines = 5, width = cli::console_width()) { if (identical(x, y)) { return(no_difference()) } if (!same_type(x, y)) { return(diff_type(x, y)) } if (!same_class(x, y)) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } if (check.attributes && !same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal(x, y) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_character(x, y, diff) difference(format( mismatches, max_diffs = max_diffs, max_lines = max_lines, width = width )) } } mismatch_character <- function(x, y, diff = !vector_equal(x, y)) { structure( list( i = which(diff), x = x[diff], y = y[diff], n = length(diff), n_diff = sum(diff) ), class = "mismatch_character" ) } #' @export format.mismatch_character <- function(x, ..., max_diffs = 5, max_lines = 5, width = cli::console_width()) { width <- width - 6 # allocate space for labels n_show <- seq_len(min(x$n_diff, max_diffs)) encode <- function(x) encodeString(x, quote = '"') show_x <- str_trunc(encode(x$x[n_show]), width * max_lines) show_y <- str_trunc(encode(x$y[n_show]), width * max_lines) show_i <- x$i[n_show] sidebyside <- Map(function(x, y, pos) { x <- paste0("x[", pos, "]: ", str_chunk(x, width)) y <- paste0("y[", pos, "]: ", str_chunk(y, width)) paste(c(x, y), collapse = "\n") }, show_x, show_y, show_i) summary <- paste0(x$n_diff, "/", x$n, " mismatches") paste0(summary, "\n", paste0(sidebyside, collapse = "\n\n")) } #' @export print.mismatch_character <- function(x, ...) { cat(format(x, ...), "\n", sep = "") } str_trunc <- function(x, length) { too_long <- nchar(x) > length x[too_long] <- paste0(substr(x[too_long], 1, length - 3), "...") x } str_chunk <- function(x, length) { lines <- ceiling(nchar(x) / length) start <- (seq_len(lines) - 1) * length + 1 substring(x, start, start + length - 1) } # compare.numeric --------------------------------------------------------- #' @export #' @rdname compare #' @param tolerance Numerical tolerance: any differences (in the sense of #' [base::all.equal()]) smaller than this value will be ignored. #' #' The default tolerance is `sqrt(.Machine$double.eps)`, unless long doubles #' are not available, in which case the test is skipped. #' @examples #' # Numeric ------------------------------------------------------------------- #' #' x <- y <- runif(100) #' y[sample(100, 10)] <- 5 #' compare(x, y) #' #' x <- y <- 1:10 #' x[5] <- NA #' x[6] <- 6.5 #' compare(x, y) #' #' # Compare ignores minor numeric differences in the same way #' # as all.equal. #' compare(x, x + 1e-9) compare.numeric <- function(x, y, tolerance = testthat_tolerance(), check.attributes = TRUE, ..., max_diffs = 9) { all_equal <- all.equal( x, y, tolerance = tolerance, check.attributes = check.attributes, ... ) if (isTRUE(all_equal)) { return(no_difference()) } if (!typeof(y) %in% c("integer", "double")) { return(diff_type(x, y)) } if (!same_class(x, y)) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } if (check.attributes && !same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal_tol(x, y, tolerance = tolerance) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_numeric(x, y, diff) difference(format(mismatches, max_diffs = max_diffs)) } } #' @export #' @rdname compare testthat_tolerance <- function() { if (identical(capabilities("long.double"), FALSE)) { skip("Long doubles not available and `tolerance` not supplied") } .Machine$double.eps ^ 0.5 } mismatch_numeric <- function(x, y, diff = !vector_equal(x, y)) { structure( list( i = which(diff), x = x[diff], y = y[diff], n = length(diff), n_diff = sum(diff), mu_diff = mean(abs(x[diff] - y[diff]), na.rm = TRUE) ), class = "mismatch_numeric" ) } #' @export format.mismatch_numeric <- function(x, ..., max_diffs = 9, digits = 3) { summary <- paste0(x$n_diff, "/", x$n, " mismatches") if (x$n_diff > 1) { mu <- format(x$mu_diff, digits = digits, trim = TRUE) summary <- paste0(summary, " (average diff: ", mu, ")") } n_show <- seq_len(min(x$n_diff, max_diffs)) diffs <- paste0( format(paste0("[", x$i[n_show], "]")), " ", format(x$x[n_show], digits = digits), " - ", format(x$y[n_show], digits = digits), " == ", format(x$x[n_show] - x$y[n_show], digits = digits) ) if (x$n_diff > length(n_show)) { diffs <- c(diffs, "...") } paste0(summary, "\n", paste(diffs, collapse = "\n")) } #' @export print.mismatch_numeric <- function(x, ...) { cat(format(x, ...), "\n", sep = "") } # compare.time ------------------------------------------------------------ #' @rdname compare #' @export compare.POSIXt <- function(x, y, tolerance = 0.001, ..., max_diffs = 9) { if (!inherits(y, "POSIXt")) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } x <- standardise_tzone(as.POSIXct(x)) y <- standardise_tzone(as.POSIXct(y)) if (!same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal_tol(x, y, tolerance = tolerance) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_numeric(x, y, diff) difference(format(mismatches, max_diffs = max_diffs)) } } standardise_tzone <- function(x) { if (is.null(attr(x, "tzone")) || identical(attr(x, "tzone"), Sys.timezone())) { attr(x, "tzone") <- "" } x } testthat/R/stack.R0000644000176200001440000000264213201340454013543 0ustar liggesusers# Source: https://github.com/rstudio/shiny/blob/master/R/stack.R # License: GPL-3 # Relicensed a MIT with permission. # A Stack object backed by a list. The backing list will grow or shrink as # the stack changes in size. Stack <- R6Class( "Stack", class = FALSE, public = list( initialize = function(init = 20L) { # init is the initial size of the list. It is also used as the minimum # size of the list as it shrinks. private$stack <- vector("list", init) private$init <- init private$count <- 0L }, push = function(..., .list = NULL) { args <- c(list(...), .list) new_size <- private$count + length(args) # Grow if needed; double in size while (new_size > length(private$stack)) { private$stack[length(private$stack) * 2L] <- list(NULL) } private$stack[private$count + seq_along(args)] <- args private$count <- new_size invisible(self) }, size = function() { private$count }, # Return the entire stack as a list, where the first item in the list is the # oldest item in the stack, and the last item is the most recently added. as_list = function() { private$stack[seq_len(private$count)] } ), private = list( stack = NULL, # A list that holds the items count = 0L, # Current number of items in the stack init = 20L # Initial and minimum size of the stack ) ) testthat/R/make-expectation.R0000644000176200001440000000137513201340454015676 0ustar liggesusers#' Make an equality test. #' #' This a convenience function to make a expectation that checks that #' input stays the same. #' #' @param x a vector of values #' @param expectation the type of equality you want to test for #' (`"equals"`, `"is_equivalent_to"`, `"is_identical_to"`) #' @export #' @keywords internal #' @examples #' x <- 1:10 #' make_expectation(x) #' #' make_expectation(mtcars$mpg) #' #' df <- data.frame(x = 2) #' make_expectation(df) make_expectation <- function(x, expectation = "equals") { obj <- substitute(x) expectation <- match.arg( expectation, c("equals", "is_equivalent_to", "is_identical_to") ) dput(substitute( expect_equal(obj, values), list(obj = obj, expectation = as.name(expectation), values = x) )) } testthat/R/skip.R0000644000176200001440000001475014165635513013424 0ustar liggesusers#' Skip a test #' #' @description #' `skip_if()` and `skip_if_not()` allow you to skip tests, immediately #' concluding a [test_that()] block without executing any further expectations. #' This allows you to skip a test without failure, if for some reason it #' can't be run (e.g. it depends on the feature of a specific operating system, #' or it requires a specific version of a package). #' #' See `vignette("skipping")` for more details. #' #' @section Helpers: #' #' * `skip_if_not_installed("pkg")` skips tests if package "pkg" is not #' installed or cannot be loaded (using `requireNamespace()`). Generally, #' you can assume that suggested packages are installed, and you do not #' need to check for them specifically, unless they are particularly #' difficult to install. #' #' * `skip_if_offline()` skips if an internet connection is not available #' (using [curl::nslookup()]) or if the test is run on CRAN. #' #' * `skip_if_translated("msg")` skips tests if the "msg" is translated. #' #' * `skip_on_bioc()` skips on Bioconductor (using the `BBS_HOME` env var). #' #' * `skip_on_cran()` skips on CRAN (using the `NOT_CRAN` env var set by #' devtools and friends). #' #' * `skip_on_covr()` skips when covr is running (using the `R_COVR` env var). #' #' * `skip_on_ci()` skips on continuous integration systems like GitHub Actions, #' travis, and appveyor (using the `CI` env var). It supersedes the older #' `skip_on_travis()` and `skip_on_appveyor()` functions. #' #' * `skip_on_os()` skips on the specified operating system(s) ("windows", #' "mac", "linux", or "solaris"). #' #' @param message A message describing why the test was skipped. #' @param host A string with a hostname to lookup #' @export #' @examples #' if (FALSE) skip("No internet connection") #' #' test_that("skip example", { #' expect_equal(1, 1L) # this expectation runs #' skip('skip') #' expect_equal(1, 2) # this one skipped #' expect_equal(1, 3) # this one is also skipped #' }) skip <- function(message) { message <- paste0(message, collapse = "\n") cond <- structure( list(message = paste0("Reason: ", message)), class = c("skip", "condition") ) stop(cond) } # Called automatically if the test contains no expectations skip_empty <- function() { cond <- structure( list(message = "Reason: empty test"), class = c("skip_empty", "skip", "condition") ) stop(cond) } #' @export #' @rdname skip #' @param condition Boolean condition to check. `skip_if_not()` will skip if #' `FALSE`, `skip_if()` will skip if `TRUE`. skip_if_not <- function(condition, message = NULL) { if (is.null(message)) { message <- paste0(deparse1(substitute(condition)), " is not TRUE") } if (!isTRUE(condition)) { skip(message) } } #' @export #' @rdname skip skip_if <- function(condition, message = NULL) { if (is.null(message)) { message <- paste0(deparse1(substitute(condition)), " is TRUE") } if (isTRUE(condition)) { skip(message) } } #' @export #' @param pkg Name of package to check for #' @param minimum_version Minimum required version for the package #' @rdname skip skip_if_not_installed <- function(pkg, minimum_version = NULL) { if (!requireNamespace(pkg, quietly = TRUE)) { skip(paste0(pkg, " cannot be loaded")) } if (!is.null(minimum_version)) { installed_version <- utils::packageVersion(pkg) if (installed_version < minimum_version) { skip(paste0( "Installed ", pkg, " is version ", installed_version, "; ", "but ", minimum_version, " is required" )) } } return(invisible(TRUE)) } #' @export #' @rdname skip skip_if_offline <- function(host = "r-project.org") { skip_on_cran() skip_if_not_installed("curl") has_internet <- !is.null(curl::nslookup(host, error = FALSE)) if (!has_internet) { skip("offline") } } #' @export #' @rdname skip skip_on_cran <- function() { skip_if(on_cran(), "On CRAN") } on_cran <- function() !identical(Sys.getenv("NOT_CRAN"), "true") #' @export #' @param os Character vector of one or more operating systems to skip on. #' Supported values are `"windows"`, `"mac"`, `"linux"`, and `"solaris"`. #' @param arch Character vector of one or more architectures to skip on. #' Common values include `"i386"` (32 bit), `"x86_64"` (64 bit), and #' `"aarch64"` (M1 mac). Supplying `arch` makes the test stricter; i.e. both #' `os` and `arch` must match in order for the test to be skipped. #' @rdname skip skip_on_os <- function(os, arch = NULL) { os <- match.arg( os, choices = c("windows", "mac", "linux", "solaris"), several.ok = TRUE ) msg <- switch(system_os(), windows = if ("windows" %in% os) "On Windows", darwin = if ("mac" %in% os) "On Mac", linux = if ("linux" %in% os) "On Linux", sunos = if ("solaris" %in% os) "On Solaris" ) if (!is.null(arch) && !is.null(msg)) { if (!is.character(arch)) { abort("`arch` must be a character vector") } if (system_arch() %in% arch) { msg <- paste(msg, system_arch()) } else { msg <- NULL } } if (is.null(msg)) { invisible(TRUE) } else { skip(msg) } } system_os <- function() tolower(Sys.info()[["sysname"]]) system_arch <- function() R.version$arch #' @export #' @rdname skip skip_on_travis <- function() { if (!identical(Sys.getenv("TRAVIS"), "true")) { return(invisible(TRUE)) } skip("On Travis") } #' @export #' @rdname skip skip_on_appveyor <- function() { if (!identical(Sys.getenv("APPVEYOR"), "True")) { return() } skip("On Appveyor") } #' @export #' @rdname skip skip_on_ci <- function() { if (!on_ci()) { return(invisible(TRUE)) } skip("On CI") } on_ci <- function() { isTRUE(as.logical(Sys.getenv("CI"))) } in_covr <- function() { identical(Sys.getenv("R_COVR"), "true") } #' @export #' @rdname skip skip_on_covr <- function() { if (! in_covr()) { return(invisible(TRUE)) } skip("On covr") } #' @export #' @rdname skip skip_on_bioc <- function() { if (identical(Sys.getenv("BBS_HOME"), "")) { return(invisible(TRUE)) } skip("On Bioconductor") } #' @export #' @param msgid R message identifier used to check for translation: the default #' uses a message included in most translation packs. See the complete list in #' [`R-base.pot`](https://github.com/wch/r-source/blob/master/src/library/base/po/R-base.pot). #' @rdname skip skip_if_translated <- function(msgid = "'%s' not found") { if (gettext(msgid, domain = "R") == msgid) { return(invisible(TRUE)) } skip(paste0("\"", msgid, "\" is translated")) } testthat/R/reporter-tap.R0000644000176200001440000000304414164710002015056 0ustar liggesusers#' Test reporter: TAP format. #' #' This reporter will output results in the Test Anything Protocol (TAP), #' a simple text-based interface between testing modules in a test harness. #' For more information about TAP, see http://testanything.org #' #' @export #' @family reporters TapReporter <- R6::R6Class("TapReporter", inherit = Reporter, public = list( results = list(), n = 0L, has_tests = FALSE, contexts = NA_character_, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE }, start_context = function(context) { self$contexts[self$n + 1] <- context }, add_result = function(context, test, result) { self$has_tests <- TRUE self$n <- self$n + 1L self$results[[self$n]] <- result }, end_reporter = function() { if (!self$has_tests) { return() } self$cat_line("1..", self$n) for (i in 1:self$n) { if (!is.na(self$contexts[i])) { self$cat_line("# Context ", self$contexts[i]) } result <- self$results[[i]] if (expectation_success(result)) { self$cat_line("ok ", i, " ", result$test) } else if (expectation_broken(result)) { self$cat_line("not ok ", i, " ", result$test) msg <- gsub("(^|\n)", "\\1 ", format(result)) self$cat_line(msg) } else { self$cat_line( "ok ", i, " # ", toupper(expectation_type(result)), " ", format(result) ) } } } ) ) testthat/R/edition.R0000644000176200001440000000505114164710002014065 0ustar liggesusersfind_edition <- function(path, package = NULL) { from_environment <- Sys.getenv("TESTTHAT_EDITION") if (nzchar(from_environment)) { return(as.integer(from_environment)) } desc <- find_description(path, package) if (is.null(desc)) { return(2L) } as.integer(desc$get_field("Config/testthat/edition", default = 2L)) } find_description <- function(path, package = NULL) { if (!is.null(package)) { return(desc::desc(package = package)) } else { tryCatch( pkgload::pkg_desc(path), error = function(e) NULL ) } } edition_deprecate <- function(in_edition, what, instead = NULL) { if (edition_get() < in_edition) { return() } warn(c( paste0("`", what, "` was deprecated in ", edition_name(in_edition), "."), i = instead )) } edition_require <- function(in_edition, what) { if (edition_get() >= in_edition || isTRUE(getOption("testthat.edition_ignore"))) { return() } stop(paste0("`", what, "` requires ", edition_name(in_edition), ".")) } edition_name <- function(x) { if (x == 2) { "the 2nd edition" } else if (x == 3) { "the 3rd edition" } else { paste("edition ", x) } } #' Temporarily change the active testthat edition #' #' `local_edition()` allows you to temporarily (within a single test or #' a single test file) change the active edition of testthat. #' `edition_get()` allows you to retrieve the currently active edition. #' #' @export #' @param x Edition Should be a single integer. #' @param .env Environment that controls scope of changes. For expert use only. #' @keywords internal local_edition <- function(x, .env = parent.frame()) { stopifnot(is_zap(x) || (is.numeric(x) && length(x) == 1)) old <- edition_set(x) withr::defer(edition_set(old), envir = .env) } edition_set <- function(x) { env_poke(testthat_env, "edition", x) } #' @export #' @rdname local_edition edition_get <- function() { if (env_has(testthat_env, "edition")) { env_get(testthat_env, "edition", default = 2L) } else { find_edition(".") } } find_dep_version <- function(name, path, package = NULL) { desc <- find_description(path, package) if (is.null(desc)) { return(NULL) } deps <- desc$get_deps() i <- match(name, deps[["package"]]) if (is_na(i)) { return(NULL) } dep <- deps[[i, "version"]] dep <- strsplit(dep, " ")[[1]] if (!is_character(dep, 2) && !is_string(dep[[1]], ">=")) { return(NULL) } dep[[2]] } use_rlang_1_0 <- function() { ver <- peek_option("testthat:::rlang_dep") is_string(ver) && package_version(ver) >= "0.99.0.9001" } testthat/R/reporter-junit.R0000644000176200001440000001322614165635513015444 0ustar liggesusers# To allow the Java-style class name format that Jenkins prefers, # "package_name_or_domain.ClassName", allow "."s in the class name. classnameOK <- function(text) { gsub("[^._A-Za-z0-9]+", "_", text) } #' Test reporter: summary of errors in jUnit XML format. #' #' This reporter includes detailed results about each test and summaries, #' written to a file (or stdout) in jUnit XML format. This can be read by #' the Jenkins Continuous Integration System to report on a dashboard etc. #' Requires the _xml2_ package. #' #' To fit into the jUnit structure, context() becomes the `` #' name as well as the base of the ` classname`. The #' test_that() name becomes the rest of the ` classname`. #' The deparsed expect_that() call becomes the `` name. #' On failure, the message goes into the `` node message #' argument (first line only) and into its text content (full message). #' #' Execution time and some other details are also recorded. #' #' References for the jUnit XML format: #' \url{http://llg.cubic.org/docs/junit/} #' #' @export #' @family reporters JunitReporter <- R6::R6Class("JunitReporter", inherit = Reporter, public = list( results = NULL, timer = NULL, doc = NULL, errors = NULL, failures = NULL, skipped = NULL, tests = NULL, root = NULL, suite = NULL, suite_time = NULL, file_name = NULL, elapsed_time = function() { time <- (private$proctime() - self$timer)[["elapsed"]] self$timer <- private$proctime() time }, reset_suite = function() { self$errors <- 0 self$failures <- 0 self$skipped <- 0 self$tests <- 0 self$suite_time <- 0 }, start_reporter = function() { check_installed("xml2", "JunitReporter") self$timer <- private$proctime() self$doc <- xml2::xml_new_document() self$root <- xml2::xml_add_child(self$doc, "testsuites") self$reset_suite() }, start_file = function(file) { self$file_name <- file }, start_test = function(context, test) { if (is.null(context)) { context_start_file(self$file_name) } }, start_context = function(context) { self$suite <- xml2::xml_add_child( self$root, "testsuite", name = context, timestamp = private$timestamp(), hostname = private$hostname() ) }, end_context = function(context) { xml2::xml_attr(self$suite, "tests") <- as.character(self$tests) xml2::xml_attr(self$suite, "skipped") <- as.character(self$skipped) xml2::xml_attr(self$suite, "failures") <- as.character(self$failures) xml2::xml_attr(self$suite, "errors") <- as.character(self$errors) #jenkins junit plugin requires time has at most 3 digits xml2::xml_attr(self$suite, "time") <- as.character(round(self$suite_time, 3)) self$reset_suite() }, add_result = function(context, test, result) { self$tests <- self$tests + 1 time <- self$elapsed_time() self$suite_time <- self$suite_time + time # XML node for test case name <- test %||% "(unnamed)" testcase <- xml2::xml_add_child( self$suite, "testcase", time = toString(time), classname = classnameOK(context), name = classnameOK(name) ) first_line <- function(x) { loc <- expectation_location(x) paste0(strsplit(x$message, split = "\n")[[1]][1], " (", loc, ")") } # add an extra XML child node if not a success if (expectation_error(result)) { # "type" in Java is the exception class error <- xml2::xml_add_child(testcase, "error", type = "error", message = first_line(result)) xml2::xml_text(error) <- crayon::strip_style(format(result)) self$errors <- self$errors + 1 } else if (expectation_failure(result)) { # "type" in Java is the type of assertion that failed failure <- xml2::xml_add_child(testcase, "failure", type = "failure", message = first_line(result)) xml2::xml_text(failure) <- crayon::strip_style(format(result)) self$failures <- self$failures + 1 } else if (expectation_skip(result)) { xml2::xml_add_child(testcase, "skipped", message = first_line(result)) self$skipped <- self$skipped + 1 } }, end_reporter = function() { if (is.character(self$out)) { xml2::write_xml(self$doc, self$out, format = TRUE) } else if (inherits(self$out, "connection")) { file <- tempfile() xml2::write_xml(self$doc, file, format = TRUE) cat(brio::read_file(file), file = self$out) } else { stop("unsupported output type: ", toString(self$out)) } } # end_reporter ), # public private = list( proctime = function() { proc.time() }, timestamp = function() { strftime(Sys.time(), "%Y-%m-%dT%H:%M:%SZ", tz = "UTC") }, hostname = function() { Sys.info()[["nodename"]] } ) # private ) # Fix components of JunitReporter that otherwise vary from run-to-run # # The following functions need to be mocked out to run a unit test # against static contents of reporters/junit.txt: # - proctime - originally wrapper for proc.time() # - timestamp - originally wrapper for toString(Sys.time()) # - hostname - originally wrapper for Sys.info()[["nodename"]] # JunitReporterMock <- R6::R6Class("JunitReporterMock", inherit = JunitReporter, public = list(), private = list( proctime = function() { c(user = 0, system = 0, elapsed = 0) }, timestamp = function() { "1999:12:31 23:59:59" }, hostname = function() { "nodename" } ) ) testthat/NEWS.md0000644000176200001440000017374514172347333013240 0ustar liggesusers# testthat 3.1.2 * testthat now uses brio for all reading and writing (#1120). This ensures that snapshots always use "\n" to separate lines (#1516). * `expect_snapshot()` no longer inadvertently trims trailing new lines off of errors and messages (#1509). * If `expect_snapshot()` generates a snapshot with different value but still compares as equal (e.g. because you've set a numeric tolerance), the saved values no longer update if another snapshot in the same file changes. * `expect_snapshot()` now only adds a `.new` file for the variants that actually changed, not all variants, while `expect_snapshot_file()` with variant with no longer immediately deletes `.new` files (#1468). * `expect_snapshot_file()` gains a `transform` argument to match `expect_snapshot()` (#1474). `compare` now defaults to `NULL`, automatically guessing the comparison type based on the extension. * `expect_snapshot_file()` now errors if the file being snapshot does not exist; `SnapshotReporter` also now treats the file directory as an absolute path (#1476, @malcolmbarrett) * New `expect_snapshot_warning()` to match `expect_snapshot_error()` (#1532). * `JUnitReporter` now includes skip messages/reasons (@rfineman, #1507) * `local_reproducible_output()` gains a `lang` argument so that you can optionally override the language used to translate error messages (#1483). It also sets the global option `cli.num_colors` in addition to `crayon.enabled`. * `test_that()` no longer inappropriately skips when calling `expect_equal()` when you've temporarily set the locale to non-UTF-8 (#1285). * `skip_if_offline()` now automatically calls `skip_on_cran()` (#1479). * `snapshot_accept()` and `snapshot_review()` now work with exactly the same file specification which can be a snapshot name, a file name, or a directory (#1546). They both work better with variants (#1508). Snapshot cleanup also removes all empty directories (#1457). * When a snapshot changes the hint also mentions that you can use `snapshot_review()` (#1500, @DanChaltiel) and the message tells you what variant is active (#1540). # testthat 3.1.1 * Condition expectations like `expect_error()` now match across the ancestry of chained errors (#1493). You can disable this by setting the new `inherit` argument to `FALSE`. * Added preliminary support for rlang 1.0 errors. It is disabled by default for the time being. To activate it, specify `rlang (>= 1.0.0)` in your `DESCRIPTION` file (or `>= 0.99.0.9001` if you're using the dev version). Once activated, snapshots will now use rlang to print error and warning messages, including the `Error:` and `Warning:` prefixes. This means the `call` field of conditions is now displayed in snapshots if present. Parent error messages are also displayed. Following this change, all snapshots including error and warning messages need to be revalidated. We will enable the new rlang 1.0 output unconditionally in a future release. * `expect_snapshot()` gains a new argument `cnd_class` to control whether to show the class of errors, warnings, and messages. The default is currently unchanged so that condition classes keep being included in snapshots. However, we plan to change the default to `FALSE` in an upcoming release to prevent distracting snapshot diffing as upstream packages add error classes. For instance, the development version of R is currently adding classes to basic errors, which causes spurious snapshot changes when testing against R-devel on CI. If you depend on rlang 1.0 (see above), the default is already set to `FALSE`. * `expect_snapshot()` no longer processes rlang injection operators like `!!`. * Fixed bug in expectations with long inputs that use `::` (#1472). # testthat 3.1.0 ## Snapshot tests * `expect_snapshot()` is no longer experimental. * `expect_snapshot()` and friends gets an experimental new `variant` argument which causes the snapshot to be saved in `_snaps/{variant}/{test}.md` instead of `_snaps/{test}.md`. This allows you to generate (and compare) unique snapshots for different scenarios like operating system or R version (#1143). * `expect_snapshot()` gains a `transform` argument, which should be a function that takes a character vector of lines and returns a modified character vector of lines. This makes it easy to remove sensitive (e.g. API keys) or stochastic (e.g. random temporary directory names) from snapshot output (#1345). * `expect_snapshot_file()` now replaces previous `.new` snapshot if code fails again with a different value. * `expect_snapshot_value()` now has an explicit `tolerance` argument which uses the testthat default, thus making it more like `expect_equal()` rather than `expect_identical()`. Set it to `NULL` if you want precise comparisons (#1309). `expect_snapshot_value(style = "deparse")` now works with negative values (#1342). * If a test containing multiple snapshots fails (or skips) in between snapshots, the later snapshots are now silently restored. (Previously this warned and reset all snapshots, not just later snapshots). * If you have multiple tests with the same name that use snapshots (not a good idea), you will no longer get a warning. Instead the snapshots will be aggregated across the tests. ## Breaking changes * Condition expectations now consistently return the expected condition instead of the return value (#1371). Previously, they would only return the condition if the return value was `NULL`, leading to inconsistent behaviour. This is a breaking change to the 3rd edition. Where you could previously do: ``` expect_equal(expect_warning(f(), "warning"), "value") ``` You must now use condition expectations on the outside: ``` expect_warning(expect_equal(f(), "value"), "warning") # Equivalently, save the value before inspection expect_warning(value <- f(), "warning") expect_equal(value, "value") ``` This breaking change makes testthat more consistent. It also makes it possible to inspect both the value and the warning, which would otherwise require additional tools. ## Minor improvements and bug fixes * Errors in test blocks now display the call if stored in the condition object (#1418). Uncaught errors now show their class (#1426). * Multi-line skips only show the first line in the skip summary. * `expr_label()`, which is used to concisely describe expressions used in expectations, now does a better job of summarising infix function (#1442). * `local_reproducible_output()` now sets the `max.print` option to 99999 (the default), so your tests are unaffected by any changes you might've made in your `.Rprofile` (1367). * `ProgressReporter` (the default only) now stops at the end of a file; this ensures that you see the results of all related tests, and ensures that snapshots are handled consistently (#1402). * `ProgressReporter` now uses an env var to adjust the maximum number of failures. This makes it easier to adjust when the tests are run in a subprocess, as is common when using RStudio (#1450). * `skip_on_os()` gains an `arch` argument so you can also choose to skip selected architectures (#1421). * `test_that()` now correctly errors when an expectation fails when run interactively (#1430). * `test_that()` now automatically and correctly generate an "empty test" skip if it only generates warnings or messages (and doesn't contain any expectations). * `testthat_tolerance()` no longer has an unused argument. # testthat 3.0.4 * The vendored Catch code used for `use_catch()` now uses a constant value for the stack size rather than relying on SIGSTKSZ. This fixes compatibility for recent glibc versions where SIGSTKSZ is no longer a constant. * Fixed an issue that caused errors and early termination of tests on R <= 3.6 when a failing condition expectation was signalled inside a snapshot. # testthat 3.0.3 * `expect_snapshot_file()` gains a `compare` argument (#1378, @nbenn). This is a customisation point for how to compare old and new snapshot files. The functions `compare_file_binary()` and `compare_file_text()` are now exported from testthat to be supplied as `compare` argument. These implement the same behaviour as the old `binary` argument which is now deprecated. * `expect_snapshot()` no longer deletes snapshots when an unexpected error occurs. * New `announce_snapshot_file()` function for developers of testthat extensions. Announcing a snapshot file allows testthat to preserve files that were not generated because of an unexpected error or a `skip()` (#1393). Unannounced files are automatically deleted during cleanup if the generating code isn't called. * New expectation: `expect_no_match()`. It complements `expect_match()` by checking if a string **doesn't match** a regular expression (@michaelquinn32, #1381). * Support setting the testthat edition via an environment variable (`TESTTHAT_EDITION`) as well (@michaelquinn32, #1386). # testthat 3.0.2 * Failing expectations now include a backtrace when they're not called directly from within `test_that()` but are instead wrapped in some helper function (#1307). * `CheckReporter` now only records warnings when not on CRAN. Otherwise failed CRAN revdep checks tend to be cluttered up with warnings (#1300). It automatically cleans up `testthat-problems.rds` left over from previous runs if the latest run is succesful (#1314). * `expect_s3_class()` and `expect_s4_class()` can now check that an object _isn't_ an S3 or S4 object by supplying `NA` to the second argument (#1321). * `expect_s3_class()` and `expect_s4_class()` format class names in a less confusing way (#1322). * `expect_snapshot()` collapses multiple adjacent headings of the same, so that, e.g., if you have multiple lines of code in a row, you'll only see one "Code:" heading (#1311). # testthat 3.0.1 * New `testthat.progress.verbose_skips` option. Set to `FALSE` to stop reporting skips as they occur; they will still appear in the summary (#1209, @krlmlr). * `CheckReporter` results have been tweaked based on experiences from running R CMD check on many packages. Hopefully it should now be easier to see the biggest problems (i.e. failures and errors) while still having skips and warnings available to check if needed (#1274). And now the full test name is always shown, no matter how long (#1268). * Catch C++ tests are no longer reported multiple times (#1237) and are automatically skipped on Solaris since Catch is not supported (#1257). `use_catch()` makes it more clear that your package needs to suggest xml2 (#1235). * `auto_test_package()` works once again (@mbojan, #1211, #1214). * `expect_snapshot()` gains new `error` argument which controls whether or not an error is expected. If an unexpected error is thrown, or an expected error is not thrown, `expect_snapshot()` will fail (even on CRAN) (#1200). * `expect_snapshot_value(style = "deparse")` handles more common R data structures. * `expect_snapshot_value()` now passes `...` on to `waldo::compare()` (#1222). * `expect_snapshot_file()` gives a hint as to next steps when a failure occurs in non-interactive environments (with help from @maelle, #1179). `expect_snapshot_*()` gives a more informative hint when you're running tests interactively (#1226). * `expect_snapshot_*()` automatically removes the `_snaps` directory if it's empty (#1180). It also warns if snapshots are discarded because tests have duplicated names (#1278, @krlmlr). * `local_reproducible_output()` now sets the LANGUAGE env var to "en". This matches the behaviour of R CMD check in interactive settings (#1213). It also now unsets RSTUDIO envvar, instead of setting it to 0 (#1225). * `RstudioReporter` has been renamed to `RStudioReporter`. * `skip_if_not()` no longer appends "is not TRUE" to custom messages (@dpprdan, #1247). * `test_that()` now warns (3e only) if code doesn't have braces, since that makes it hard to track the source of an error (#1280, @krlmlr). # testthat 3.0.0 ## 3rd edition testhat 3.0.0 brings with it a 3rd edition that makes a number of breaking changes in order to clean up the interface and help you use our latest recommendations. To opt-in to the 3rd edition for your package, set `Config/testthat/edition: 3` in your `DESCRIPTION` or use `local_edition(3)` in individual tests. You can retrieve the active edition with `edition_get()`. Learn more in `vignette("third-edition")`. * `context()` is deprecated. * `expect_identical()` and `expect_equal()` use `waldo::compare()` to compare actual and expected results. This mostly yields much more informative output when the actual and expected values are different, but while writing it uncovered some bugs in the existing comparison code. * `expect_error()`, `expect_warning()`, `expect_message()`, and `expect_condition()` now all use the same underlying logic: they capture the first condition that matches `class`/`regexp` and allow anything else to bubble up (#998/#1052). They also warn if there are unexpected arguments that are never used. * The `all` argument to `expect_message()` and `expect_warning()` is now deprecated. It was never a particularly good idea or well documented, and is now superseded by the new condition capturing behaviour. * `expect_equivalent()`, `expect_reference()`, `expect_is()` and `expect_that()` are deprecated. * Messages are no longer automatically silenced. Either use `suppressMessages()` to hide unimportant messages, or `expect_messsage()` to catch important messages (#1095). * `setup()` and `teardown()` are deprecated in favour of test fixtures. See `vignette("test-fixtures")` for more details. * `expect_known_output()`, `expect_known_value()`, `expect_known_hash()`, and `expect_equal_to_reference()` are all deprecated in favour of `expect_snapshot_output()` and `expect_snapshot_value()`. * `test_that()` now sets a number of options and env vars to make output as reproducible as possible (#1044). Many of these options were previously set in various places (in `devtools::test()`, `test_dir()`, `test_file()`, or `verify_output()`) but they have now been centralised. You can use in your own code, or when debugging tests interactively with `local_test_context()`. * `with_mock()` and `local_mock()` are deprecated; please use the mockr or mockery packages instead (#1099). ## Snapshot testing New family of snapshot expectations (`expect_snapshot()`, `expect_snapshot_output()`, `expect_snapshot_error()`, and `expect_snapshot_value()`) provide "snapshot" tests, where the expected results are stored in separate files in `test/testthat/_snaps`. They're useful whenever it's painful to store expected results directly in the test files. `expect_snapshot_file()` along with `snapshot_review()` help snapshot more complex data, with initial support for text files, images, and data frames (#1050). See `vignette("snapshotting")` for more details. ## Reporters * `CheckReporter` (used inside R CMD check) now prints out all problems (i.e. errors, failures, warnings and skips; and not just the first 10), lists skips types, and records problems in machine readable format in `tests/testthat-problems.rds` (#1075). * New `CompactProgressReporter` tweaks the output of `ProgressReporter` for use with a single file, as in `devtools::test_file()`. You can pick a different default by setting `testthat.default_compact_reporter` to the name of a reporter. * `ProgressReporter` (the default reporter) now keeps the stack traces of an errors that happen before the before test, making problems substantially easier to track down (#1004). It checks if you've exceeded the maximum number of failures (from option `testthat.progress.max_fails`) after each expectation, rather than at the end of each file (#967). It also gains new random praise options that use emoji, and lists skipped tests by type (#1028). * `StopReporter` adds random praise emoji when a single test passes (#1094). It has more refined display of failures, now using the same style as `CompactProgressReporter` and `ProgressReporter`. * `SummaryReporter` now records file start, not just context start. This makes it more compatible with modern style which does not use `context()` (#1089). * All reporters now use exactly the same format when reporting the location of an expectation. * Warnings now include a backtrace, making it easier to figure out where they came from. * Catch C++ tests now provide detailed results for each test. To upgrade existing code, re-run `testthat::use_catch()` (#1008). * Many reporters (e.g. the check reporter) no longer raise an error when any tests fail. Use the `stop_on_failure` argument to `devtools::test()` and `testthat::test_dir()` if your code relies on this. Alternatively, use `reporter = c("check", "fail")` to e.g. create a failing check reporter. ## Fixures * New `vignette("test-fixtures")` describes test fixtures; i.e. how to temporarily and cleanly change global state in order to test parts of your code that otherwise would be hard to run (#1042). `setup()` and `teardown()` are superseded in favour of test fixtures. * New `teardown_env()` for use with `withr::defer()`. This allows you to run code after all other tests have been run. ## Skips * New `vignette("skipping")` gives more general information on skipping tests, include some basics on testing skipping helpers (#1060). * `ProgressReporter()` and `CheckReporter()` list the number of skipped tests by reason at the end of the reporter. This makes it easier to check that you're not skipping the wrong tests, particularly on CI services (#1028). ## Test running * `test_that()` no longer triggers an error when run outside of tests; instead it produces a more informative summary of all failures, errors, warnings, and skips that occurred inside the test. * `test_that()` now errors if `desc` is not a string (#1161). * `test_file()` now runs helper, setup, and teardown code, and has the same arguments as `test_dir()` (#968). Long deprecated `encoding` argument has been removed. * `test_dir()` now defaults `stop_on_failure` to `TRUE` for consistency with other `test_` functions. The `wrap` argument has been deprecated; it's not clear that it should ever have been exposed. * New `test_local()` tests a local source package directory. It's equivalent to `devtools::test()` but doesn't require devtools and all its dependencies to be installed (#1030). ## Minor improvements and bug fixes * testthat no longer supports tests stored in `inst/tests`. This has been deprecated since testthat 0.11.0 (released in 2015). `test_package()` (previously used for running tests in R CMD check) will fail silently if no tests are found to avoid breaking old packages on CRAN (#1149). * `capture_output()` and `verify_output()` use a new `testthat_print()` generic. This allows you to control the printed representation of your object specifically for tests (i.e. if your usual print method shows data that varies in a way that you don't care about for tests) (#1056). * `context_start_file()` is now exported for external reporters (#983, #1082). It now only strips first instance of prefix/suffix (#1041, @stufield). * `expect_error()` no longer encourages you to use `class`. This advice removes one type of fragility at the expense of creating a different type (#1013). * `expect_known_failure()` has been removed. As far as I can tell it was only ever used by testthat, and is rather fragile. * `expect_true()`, `expect_false()`, and `expect_null()` now use waldo to produce more informative failures. * `verify_output()` no longer always fails if output contains a carriage return character ("\r") (#1048). It uses the `pdf()` device instead of `png()` so it works on systems without X11 (#1011). And it uses `waldo::compare()` to give more informative failures. # testthat 2.3.2 * Fix R CMD check issues # testthat 2.3.1 * The last version of testthat introduced a performance regression in error assertions (#963). To fix it, you need to install rlang 0.4.2. * Fixed error assertions with rJava errors (#964). * Fixed issue where error and warning messages were not retrieved with `conditionMessage()` under certain circumstances. # testthat 2.3.0 ## Conditions This release mostly focusses on an overhaul of how testthat works with conditions (i.e. errors, warnings and messages). There are relatively few user-facing changes, although you should now see more informative backtraces from errors and failures. * Unexpected errors are now printed with a simplified backtrace. * `expect_error()` and `expect_condition()` now display a backtrace when the error doesn't conform to expectations (#729). * `expect_error()`, `expect_warning()` and `expect_message()` now call `conditionMessage()` to get the condition message. This generic makes it possible to generate messages at print-time rather than signal-time. * `expect_error()` gets a better warning message when you test for a custom error class with `regexp`. * New `exp_signal()` function is a condition signaller that implements the testthat protocol (signal with `stop()` if the expectation is broken, with a `continue_test` restart). * Existence of restarts is first checked before invokation. This makes it possible to signal warnings or messages with a different condition signaller (#874). * `ListReporter` now tracks expectations and errors, even when they occur outside of tests. This ensures that `stop_on_failure` matches the results displayed by the reporter (#936). * You can silence warnings about untested error classes by implementing a method for `is_uninformative_warning()`. This method should be lazily registered, e.g. with `vctrs::s3_register()`. This is useful for introducing an experimental error class without encouraging users to depend on the class in their tests. * Respect options(warn = -1) to ignore all warnings (@jeroen #958). ## Expectations * Expectations can now be explicitly subclassed with `new_expectation()`. This constructor follows our new conventions for S3 classes and takes an optional subclass and optional attributes. * Unquoted inputs no longer potentially generate multiple test messages (#929). * `verify_output()` no longer uses quasiquotation, which fixes issues when verifying the output of tidy eval functions (#945). * `verify_output()` gains a `unicode` parameter to turn on or off the use of Unicode characters by the cli package. It is disabled by default to prevent the tests from failing on platforms like Windows that don't support UTF-8 (which could be your contributors' or your CI machines). * `verify_output()` now correctly handles multi-line condition messages. * `verify_output()` now adds spacing after condition messages, consistent with the spacing added after normal output. * `verify_output()` has a new syntax for inserting headers in output files: insert a `"# Header"` string (starting with `#` as in Markdown) to add a header to a set of outputs. ## Other minor improvements and bug fixes * `compare.numeric()` uses a more sophisticated default tolerance that will automatically skip tests that rely on numeric tolerance if long doubles are not available (#940). * `JunitReporter` now reports tests in ISO 8601 in the UTC timezone and uses the maximum precision of 3 decimal places (#923). # testthat 2.2.1 * Repair regression in `test_rd()` and add a couple of tests to hopefully detect the problem earlier in the future. # testthat 2.2.0 ## New features * New `verify_output()` is designed for testing output aimed at humans (most commonly print methods and error messages). It is a regression test that saves output in a way that makes it easy to review. It is automatically skipped on CRAN (#782, #834). ## Minor improvements and bug fixes * `as.data.frame.testthat_results()` now always returns a data frame with 13 columns (@jozefhajnala, #887). * `auto_test_package()` now correctly handles helper files (`tests/testthat/helper-*.R`), automatically reloading all code and rerunning all tests (@CorradoLanera, #376, #896). * `expect_match()` now displays `info` even when match length is 0 (#867). * `expect_s3_class()` gains new `exact` argument that allows you to check for an exact class match, not just inheritance (#885). * `fail()` and `succeed()` gain `info` argument, which is passed along to `expect()`. * `test_examples()` gets some minor fixes: it now returns the results invisibly, doesn't assume that examples should contain tests, and documents that you shouldn't be using it routinely (#841). * `test_file()` only calls `Reporter$end_context()` if a context was started, fixing an error in `TeamcityReporter` (@atheriel, #883). * `skip()` now reports reason for skipping as: `Reason: {skip condition}` (@patr1ckm, #868). * `skip_if()` and `skip_if_not()` now report `Reason: {skip condition} is TRUE` and `Reason: {skip condition} is not TRUE` respectively (@ patr1ckm, #868). * `skip_if_translated()` now tests for translation of a specific message. This is more robust than the previous approach because translation happens message-by-message, not necessarily for the entire session (#879) (and in general, it's impossible to determine what language R is currently using). * `skip_on_covr()` allows you to skip tests when covr is running. (@ianmcook, #895) * `expect_known_value()` gains a new serialisation `version` argument, defaulting to 2. Prevents the `.rds` files created to hold reference objects from making a package appear to require R >= 3.5 (#888 @jennybc). # testthat 2.1.1 * Fix test failures in strict latin1 locale # testthat 2.1.0 ## New expectations * New `expect_visible()` and `expect_invisible()` make it easier to check if a function call returns its result visibly or invisibly (#719). * New `expect_mapequal(x, y)` checks that `x` and `y` have the same names, and the same value associated with each name (i.e. they compare the values of the vector standardising the order of the names) (#863). * New `expect_vector()` is a wrapper around `vctrs::vec_assert()` making it easy to test against the vctrs definitions of prototype and size (#846). (Currently requires development version of vctrs.) ## Improvements to existing expectations * All expectations give clearer error messages if you forget the `object` or `expected` arguments (#743). * `expect_equal()` now correctly compares infinite values (#789). * In `expect_equal_to_reference()`, the default value for `update` is now `FALSE` (@BrodieG, #683). * `expect_error()` now returns the error object as documentated (#724). It also now warns if you're using a classed expectation and you're not using the `class` argument. This is good practice as it decouples the error object (which tends to be stable) from its rendering to the user (which tends to be fragile) (#816). * `expect_identical()` gains a `...` argument to pass additional arguments down to `identical()` (#714). * `expect_lt()`, `expect_lte()`, `expect_gt()` `expect_gte()` now handle `Inf` and `NA` arguments appropriately (#732), and no longer require the inputs to be numeric. * `expect_output()` gains a `width` argument, allowing you to control the output width. This does not inherit from `getOption("width")`, ensuring that tests return the same results regardless of environment (#805). * `expect_setequal()` now works with more vector types (including lists), because it uses `%in%`, rather than `sort()`. It also warns if the inputs are named, as this suggests that your mental model of how `expect_setequal()` works is wrong (#750). * `is_true()` and `is_false()` have been deprecated because they conflict with other functions in the tidyverse. ## Reporters * Reporter documentation has been considerably improved (#657). * `CheckReporter`, used by R CMD check, now includes a count of warnings. * `JUnitReporter` no longer replaces `.` in class names (#753), and creates ouput that should be more compatible with Jenkins (#806, @comicfans). * `ListReporter` now records number of passed tests and original results in new columns (#675). * `ProgressReporter`, the default reporter, now: * Automatically generates a context from the file name. We no longer recommend the use of `context()` and instead encourage you to delete it, allowing the context to be autogenerated from the file name. This also eliminates the error that occured if tests can before the first `context()` (#700, #705). * Gains a `update_interval` parameter to control how often updates are printed (default 0.1 s). This prevents large printing overhead for very fast tests. (#701, @jimhester) * Uses a 3 character wide column to display test successes, so up to 999 successful tests can be displayed without changing the alignment (#712). * `reporter$end_reporter()` is now only called when testing completes successfully. This ensures that you don't get unnecessary output when the test fails partway through (#727). ## Skips * `skip_if_offline()` skips tests if an internet connection is not available (#685). * `skip_on_ci()` skips tests on continuous integration systems (@mbjoseph, #825) by looking for a `CI` env var.. ## Other new features * New `testthat_examples()` and `testthat_example()` make it easy to access new test files bundled with the package. These are used in various examples to make it easier to understand how to use the package. * New `local_mock()` which allows you to mock a function without having to add an additional layer of indentation as with `with_mock()` (#856). ## Other minor improvements and bug fixes * `auto_test_package()` works better with recent devtools and also watches `src/` for changes (#809). * `expect_s3_class()` now works with unquoting (@jalsalam, #771). * `expectation` objects now contain the failure message, even when successful (#836) * `devtools::test()` no longer fails if run multiple times within the same R session for a package containing Catch tests. ([devtools #1832](https://github.com/r-lib/devtools/issues/1832)) * New `testing_package()` retrieves the name of the package currently being tested (#699). * `run_testthat_tests` C entrypoint is registered more robustly. * `skip()` now always produces a `message` of length 1, as expected elsewhere in testthat (#791). * Warnings are passed through even when `options(warn = 2)` is set (@yutannihilation, #721). # testthat 2.0.1 * Fix failing tests with devtools 2.0.0 # testthat 2.0.0 ## Breaking API changes * "Can't mock functions in base packages": You can no longer use `with_mock()` to mock functions in base packages, because this no longer works in R-devel due to changes with the byte code compiler. I recommend using [mockery](https://github.com/r-lib/mockery) or [mockr](https://github.com/krlmlr/mockr) instead. * The order of arguments to `expect_equivalent()` and `expect_error()` has changed slightly as both now pass `...` on another function. This reveals itself with a number of different errors, like: * 'what' must be a character vector * 'check.attributes' must be logical * 'tolerance' should be numeric * argument is not interpretable as logical * threw an error with unexpected class * argument "quo" is missing, with no default * argument is missing, with no default If you see one of these errors, check the number, order, and names of arguments to the expectation. * "Failure: (unknown)". The last release mistakenly failed to test bare expectations not wrapped inside `test_that()`. If you see "(unknown)" in a failure message, this is a failing expectation that you previously weren't seeing. As well as fixing the failure, please also wrap inside a `test_that()` with an informative name. * "Error: the argument has already been evaluated": the way in which expectations now need create labels has changed, which caused a couple of failures with unusual usage when combined with `Reduce`, `lapply()`, and `Map()`. Avoid these functions in favour of for loops. I also recommend reading the section below on quasiquotation support in order to create more informative failure messages. ## Expectations ### New and improved expectations * `expect_condition()` works like `expect_error()` but captures any condition, not just error conditions (#621). * `expect_error()` gains a `class` argument that allows you to make an assertion about the class of the error object (#530). * `expect_reference()` checks if two names point to the same object (#622). * `expect_setequal()` compares two sets (stored in vectors), ignoring duplicates and differences in order (#528). ### New and improved skips * `skip_if()` makes it easy to skip a test when a condition is true (#571). For example, use `skip_if(getRversion() <= 3.1)` to skip a test in older R versions. * `skip_if_translated()` skips tests if you're running in an locale where translations are likely to occur (#565). Use this to avoid spurious failures when checking the text of error messages in non-English locales. * `skip_if_not_installed()` gains new `minimum_version` argument (#487, #499). ### Known good values We have identified a useful family of expectations that compares the results of an expression to a known good value stored in a file. They are designed to be use in conjunction with git so that you can see what precisely has changed, and revert it if needed. * `expect_known_output()` replaces `expect_output_file()`, which has been soft-deprecated. It now defaults to `update = TRUE` and warn, rather than failing on the first run. It gains a `print` argument to automatically print the input (#627). It also sets the width option to 80 to ensure consistent output across environments (#514) * `expect_known_value()` replaces `expect_equal_to_reference()`, which has been soft-deprecated. It gains an update argument defaulting to `TRUE`. This changes behaviour from the previous version, and soft-deprecated `expect_equal_to_reference()` gets `update = FALSE`. * `expect_known_failure()` stored and compares the failure message from an expectation. It's a useful regression test when developing informative failure messges for your own expectations. ### Quasiquotation support All expectations can now use unquoting (#626). This makes it much easier to generate informative failure messages when running tests in a for loop. For example take this test: ```R f <- function(i) if (i > 3) i * 9 else i * 10 for (i in 1:5) { expect_equal(f(i), i * 10) } ``` When it fails, you'll see the message ``Error: `f(i)` not equal to `i * 10` ``. That's hard to diagnose because you don't know which iteration caused the problem! ```R for (i in 1:5) { expect_equal(f(!!i), !!(i * 10)) } ``` If you unquote the values using `!!`, you get the failure message `` `f(4L)` not equal to 40.``. This is much easier to diagnose! See `?quasi_label()` for more details. (Note that this is not tidy evaluation per se, but is closely related. At this time you can not unquote quosures.) ## New features ### Setup and teardown * New `setup()` and `teardown()` functions allow you to run at the start and end of each test file. This is useful if you want to pair cleanup code with the code that messes up state (#536). * Two new prefixes are recognised in the `test/` directory. Files starting with `setup` are run before tests (but unlike `helpers` are not run in `devtools::load_all()`). Files starting with `teardown` are run after all tests are completed (#589). ### Other new features * All files are now read and written as UTF-8 (#510, #605). * `is_testing()` allows you to tell if your code is being run inside a testing environment (#631). Rather than taking a run-time dependency on testthat you may want to inline the function into your own package: ```R is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } ``` It's frequently useful to combine with `interactive()`. ### New default reporter A new default reporter, `ReporterProgress`, produces more aesthetically pleasing output and makes the most important information available upfront (#529). You can return to the previous default by setting `options(testthat.default_reporter = "summary")`. ### Reporters * Output colours have been tweaked to be consistent with clang: warnings are now in magenta, and skips in blue. * New `default_reporter()` and `check_reporter()` which returns the default reporters for interactive and check environments (#504). * New `DebugReporter` that calls a better version of `recover()` in case of failures, errors, or warnings (#360, #470). * New `JunitReporter` generates reports in JUnit compatible format. (#481, @lbartnik; #640, @nealrichardson; #575) * New `LocationReporter` which just prints the location of every expectation. This is useful for locating segfaults and C/C++ breakpoints (#551). * `SummaryReporter` recieved a number of smaller tweaks * Aborts testing as soon the limit given by the option `testthat.summary.max_reports` (default 10) is reached (#520). * New option `testthat.summary.omit_dots = TRUE` hides the progress dots speeding up tests by a small amount (#502). * Bring back random praise and encouragement which I accidentally dropped (#478). * New option `testthat.default_check_reporter`, defaults to `"check"`. Continuous Integration system can set this option before evaluating package test sources in order to direct test result details to known location. * All reporters now accept a `file` argument on initialization. If provided, reporters will write the test results to that path. This output destination can also be controlled with the option `testthat.output_file` (#635, @nealrichardson). ## Deprecated functions * `is_null()` and `matches()` have been deprecated because they conflict with other functions in the tidyverse (#523). ## Minor improvements and bug fixes * Updated Catch to 1.9.6. `testthat` now understands and makes use of the package routine registration mechanism required by CRAN with R >= 3.4.0. (@kevinushey) * Better reporting for deeply nested failures, limiting the stack trace to the first and last 10 entries (#474). * Bare expectations notify the reporter once again. This is achieved by running all tests inside `test_code()` by default (#427, #498). This behaviour can be overridden by setting `wrap = FALSE` in `test_dir()` and friends (#586). * `auto_test()` and `auto_test_package()` provide `hash` parameter to enable switching to faster, time-stamp-based modification detection (#598, @katrinleinweber). `auto_test_package()` works correctly on windows (#465). * `capture_output_lines()` is now exported (#504). * `compare.character()` works correctly for vectors of length > 5 (#513, @brodieG) * `compare.default()` gains a `max_diffs` argument and defaults to printing out only the first 9 differences (#538). * `compare.numeric()` respects `check.attributes()` so `expect_equivalent()` correctly ignores attributes of numeric vectors (#485). * Output expectations (`expect_output()`, `expect_message()`, `expect_warning()`, and `expect_silent()`) all invisibly return the first argument to be consistent with the other expectations (#615). * `expect_length()` works with any object that has a `length` method, not just vectors (#564, @nealrichardson) * `expect_match()` now accepts explicit `perl` and `fixed` arguments, and adapts the failure message to the value of `fixed`. This also affects other expectations that forward to `expect_match()`, like `expect_output()`, `expect_message()`, `expect_warning()`, and `expect_error()`. * `expect_match()` escapes special regular expression characters when printing (#522, @jimhester). * `expect_message()`, `expect_warning()` and `expect_error()` produce clearer failure messages. * `find_test_scripts()` only looks for `\.[rR]` in the extension (#492, @brodieG) * `test_dir()`, `test_package()`, `test_check()` unset the `R_TESTS` env var (#603) * `test_examples()` now works with installed packages as well as source packages (@jimhester, #532). * `test_dir()`, `test_package()`, and `test_check()` gain `stop_on_failure` and `stop_on_waring` arguments that control whether or not an error is signalled if any tests fail or generate warnings (#609, #619). * `test_file()` now triggers a `gc()` after tests are run. This helps to ensure that finalisers are run earlier (#535). * `test_path()` now generates correct path when called from within `tools::testInstalledPackage()` (#542). * `test_path()` no longer assumes that the path exists (#448). * `test_that()` calls without any expectations generate a default `skip()` (#413). * `test_dir()` gains `load_helpers` argument (#505). * `show_failures()` simply prints a failure if it occurs. This makes it easier to show failures in examples. * `with_mock()` disallows mocking of functions in base packages, because this doesn't work with the current development version of R (#553). # testthat 1.0.2 * Ensure `std::logic_error()` constructed with `std::string()` argument, to avoid build errors on Solaris. # testthat 1.0.1 * New `expect_output_file()` to compare output of a function with a text file, and optionally update it (#443, @krlmlr). * Properly scoped use + compilation of C++ unit testing code using Catch to `gcc` and `clang` only, as Catch includes code that does not strictly conform to the C++98 standard. (@kevinushey) * Fixed an out-of-bounds memory access when routing Catch output through `Rprintf()`. (@kevinushey) * Ensure that unit tests run on R-oldrel (remove use of `dir.exists()`). (@kevinushey) * Improved overriding of calls to `exit()` within Catch, to ensure compatibility with GCC 6.0. (@krlmlr) * Hardened formatting of difference messages, previously the presence of `%` characters could affect the output (#446, @krlmlr). * Fixed errors in `expect_equal()` when comparing numeric vectors with and without attributes (#453, @krlmlr). * `auto_test()` and `auto_test_package()` show only the results of the current test run and not of previously failed runs (#456, @krlmlr). # testthat 1.0.0 ## Breaking changes The `expectation()` function now expects an expectation type (one of "success", "failure", "error", "skip", "warning") as first argument. If you're creating your own expectations, you'll need to use `expect()` instead (#437). ## New expectations The expectation system got a thorough overhaul (#217). This primarily makes it easier to add new expectations in the future, but also included a thorough review of the documentation, ensuring that related expectations are documented together, and have evocative names. One useful change is that most expectations invisibly return the input `object`. This makes it possible to chain together expectations with magrittr: ```R factor("a") %>% expect_type("integer") %>% expect_s3_class("factor") %>% expect_length(1) ``` (And to make this style even easier, testthat now re-exports the pipe, #412). The exception to this rule are the expectations that evaluate (i.e. for messages, warnings, errors, output etc), which invisibly return `NULL`. These functions are now more consistent: using `NA` will cause a failure if there is a errors/warnings/mesages/output (i.e. they're not missing), and will `NULL` fail if there aren't any errors/warnings/mesages/output. This previously didn't work for `expect_output()` (#323), and the error messages were confusing with `expect_error(..., NA)` (#342, @nealrichardson + @krlmlr, #317). Another change is that `expect_output()` now requires you to explicitly print the output if you want to test a print method: `expect_output("a", "a")` will fail, `expect_output(print("a"), "a")` will succeed. There are six new expectations: * `expect_type()` checks the _type_ of the object (#316), `expect_s3_class()` tests that an object is S3 with given class, `expect_s4_class()` tests that an object is S4 with given class (#373). I recommend using these more specific expectations instead of the more general `expect_is()`. * `expect_length()` checks that an object has expected length. * `expect_success()` and `expect_failure()` are new expectations designed specifically for testing other expectations (#368). A number of older features have been deprecated: * `expect_more_than()` and `expect_less_than()` have been deprecated. Please use `expect_gt()` and `expect_lt()` instead. * `takes_less_than()` has been deprecated. * `not()` has been deprecated. Please use the explicit individual forms `expect_error(..., NA)` , `expect_warning(.., NA)` and so on. ## Expectations are conditions Now all expectations are also conditions, and R's condition system is used to signal failures and successes (#360, @krlmlr). All known conditions (currently, "error", "warning", "message", "failure", and "success") are converted to expectations using the new `as.expectation()`. This allows third-party test packages (such as `assertthat`, `testit`, `ensurer`, `checkmate`, `assertive`) to seamlessly establish `testthat` compatibility by issuing custom error conditions (e.g., `structure(list(message = "Error message"), class = c("customError", "error", "condition"))`) and then implementing `as.expectation.customError()`. The `assertthat` package contains an example. ## Reporters The reporters system class has been considerably refactored to make existing reporters simpler and to make it easier to write new reporters. There are two main changes: * Reporters classes are now R6 classes instead of Reference Classes. * Each callbacks receive the full context: * `add_results()` is passed context and test as well as the expectation. * `test_start()` and `test_end()` both get the context and test. * `context_start()` and `context_end()` get the context. * Warnings are now captured and reported in most reporters. * The reporter output goes to the original standard output and is not affected by `sink()` and `expect_output()` (#420, @krlmlr). * The default summary reporter lists all warnings (#310), and all skipped tests (@krlmlr, #343). New option `testthat.summary.max_reports` limits the number of reports printed by the summary reporter. The default is 15 (@krlmlr, #354). * `MinimalReporter` correct labels errors with E and failures with F (#311). * New `FailReporter` to stop in case of failures or errors after all tests (#308, @krlmlr). ## Other * New functions `capture_output()`, `capture_message()`, and `capture_warnings()` selectively capture function output. These are used in `expect_output()`, `expect_message()` and `expect_warning()` to allow other types out output to percolate up (#410). * `try_again()` allows you to retry code multiple times until it succeeds (#240). * `test_file()`, `test_check()`, and `test_package()` now attach testthat so all testing functions are available. * `source_test_helpers()` gets a useful default path: the testthat tests directory. It defaults to the `test_env()` to be consistent with the other source functions (#415). * `test_file()` now loads helpers in the test directory before running the tests (#350). * `test_path()` makes it possible to create paths to files in `tests/testthat` that work interactively and when called from tests (#345). * Add `skip_if_not()` helper. * Add `skip_on_bioc()` helper (@thomasp85). * `make_expectation()` uses `expect_equal()`. * `setup_test_dir()` has been removed. If you used it previously, instead use `source_test_helpers()` and `find_test_scripts()`. * `source_file()` exports the function testthat uses to load files from disk. * `test_that()` returns a `logical` that indicates if all tests were successful (#360, @krlmlr). * `find_reporter()` (and also all high-level testing functions) support a vector of reporters. For more than one reporter, a `MultiReporter` is created (#307, @krlmlr). * `with_reporter()` is used internally and gains new argument `start_end_reporter = TRUE` (@krlmlr, 355). * `set_reporter()` returns old reporter invisibly (#358, @krlmlr). * Comparing integers to non-numbers doesn't raise errors anymore, and falls back to string comparison if objects have different lengths. Complex numbers are compared using the same routine (#309, @krlmlr). * `compare.numeric()` and `compare.character()` received another overhaul. This should improve behaviour of edge cases, and provides a strong foundation for further work. Added `compare.POSIXt()` for better reporting of datetime differences. * `expect_identical()` and `is_identical_to()` now use `compare()` for more detailed output of differences (#319, @krlmlr). * Added [Catch](https://github.com/catchorg/Catch2) v1.2.1 for unit testing of C++ code. See `?use_catch()` for more details. (@kevinushey) # testthat 0.11.0 * Handle skipped tests in the TAP reporter (#262). * New `expect_silent()` ensures that code produces no output, messages, or warnings (#261). * New `expect_lt()`, `expect_lte()`, `expect_gt()` and `expect_gte()` for comparison with or without equality (#305, @krlmlr). * `expect_output()`, `expect_message()`, `expect_warning()`, and `expect_error()` now accept `NA` as the second argument to indicate that output, messages, warnings, and errors should be absent (#219). * Praise gets more diverse thanks to the praise package, and you'll now get random encouragment if your tests don't pass. * testthat no longer muffles warning messages. If you don't want to see them in your output, you need to explicitly quiet them, or use an expectation that captures them (e.g. `expect_warning()`). (#254) * Use tests in `inst/tests` is formally deprecated. Please move them into `tests/testthat` instead (#231). * `expect_match()` now encodes the match, as well as the output, in the expectation message (#232). * `expect_is()` gives better failure message when testing multiple inheritance, e.g. `expect_is(1:10, c("glm", "lm"))` (#293). * Corrected argument order in `compare.numeric()` (#294). * `comparison()` constructure now checks its arguments are the correct type and length. This bugs a bug where tests failed with an error like "values must be length 1, but FUN(X[[1]]) result is length 2" (#279). * Added `skip_on_os()`, to skip tests on specified operating systems (@kevinushey). * Skip test that depends on `devtools` if it is not installed (#247, @krlmlr) * Added `skip_on_appveyor()` to skip tests on Appveyor (@lmullen). * `compare()` shows detailed output of differences for character vectors of different length (#274, @krlmlr). * Detailed output from `expect_equal()` doesn't confuse expected and actual values anymore (#274, @krlmlr). # testthat 0.10.0 * Failure locations are now formated as R error locations. * Add an 'invert' argument to `find_tests_scripts()`. This allows one to select only tests which do _not_ match a pattern. (#239, @jimhester). * Deprecated `library_if_available()` has been removed. * test (`test_dir()`, `test_file()`, `test_package()`, `test_check()`) functions now return a `testthat_results` object that contains all results, and can be printed or converted to data frame. * `test_dir()`, `test_package()`, and `test_check()` have an added `...` argument that allows filtering of test files using, e.g., Perl-style regular expressions,or `fixed` character filtering. Arguments in `...` are passed to `grepl()` (@leeper). * `test_check()` uses a new reporter specifically designed for `R CMD check`. It displays a summary at the end of the tests, designed to be <13 lines long so test failures in `R CMD check` display something more useful. This will hopefully stop BDR from calling testthat a "test obfuscation suite" (#201). * `compare()` is now documented and exported. Added a numeric method so when long numeric vectors don't match you'll see some examples of where the problem is (#177). The line spacing in `compare.character()` was tweaked. * `skip_if_not_installed()` skips tests if a package isn't installed (#192). * `expect_that(a, equals(b))` style of testing has been soft-deprecated. It will keep working, but it's no longer demonstrated any where, and new expectations will only be available in `expect_equal(a, b)` style. (#172) * Once again, testthat suppresses messages and warnings in tests (#189) * New `test_examples()` lets you run package examples as tests. Each example counts as one expectation and it succeeds if the code runs without errors (#204). * New `succeed()` expectation always succeeds. * `skip_on_travis()` allows you to skip tests when run on Travis CI. (Thanks to @mllg) * `colourise()` was removed. (Colour is still supported, via the `crayon` package.) * Mocks can now access values local to the call of `with_mock` (#193, @krlmlr). * All equality expectations are now documented together (#173); all matching expectations are also documented together. # testthat 0.9.1 * Bump R version dependency # testthat 0.9 ## New features * BDD: testhat now comes with an initial behaviour driven development (BDD) interface. The language is similiar to RSpec for Ruby or Mocha for JavaScript. BDD tests read like sentences, so they should make it easier to understand the specification of a function. See `?describe()` for further information and examples. * It's now possible to `skip()` a test with an informative message - this is useful when tests are only available under certain conditions, as when not on CRAN, or when an internet connection is available (#141). * `skip_on_cran()` allows you to skip tests when run on CRAN. To take advantage of this code, you'll need either to use devtools, or run `Sys.setenv(NOT_CRAN = "true"))` * Simple mocking: `with_mock()` makes it easy to temporarily replace functions defined in packages. This is useful for testing code that relies on functions that are slow, have unintended side effects or access resources that may not be available when testing (#159, @krlmlr). * A new expectation, `expect_equal_to_reference()` has been added. It tests for equality to a reference value stored in a file (#148, @jonclayden). ## Minor improvements and bug fixes * `auto_test_package()` works once more, and now uses `devtools::load_all()` for higher fidelity loading (#138, #151). * Bug in `compare.character()` fixed, as reported by Georgi Boshnakov. * `colourise()` now uses option `testthat.use_colours` (default: `TRUE`). If it is `FALSE`, output is not colourised (#153, @mbojan). * `is_identical_to()` only calls `all.equal()` to generate an informative error message if the two objects are not identical (#165). * `safe_digest()` uses a better strategy, and returns NA for directories (#138, #146). * Random praise is renabled by default (again!) (#164). * Teamcity reporter now correctly escapes output messages (#150, @windelinckx). It also uses nested suites to include test names. ## Deprecated functions * `library_if_available()` has been deprecated. # testthat 0.8.1 * Better default environment for `test_check()` and `test_package()` which allows S4 class creation in tests * `compare.character()` no longer fails when one value is missing. # testthat 0.8 testthat 0.8 comes with a new recommended structure for storing your tests. To better meet CRAN recommended practices, testthat now recommend that you to put your tests in `tests/testthat`, instead of `inst/tests` (this makes it possible for users to choose whether or not to install tests). With this new structure, you'll need to use `test_check()` instead of `test_packages()` in the test file (usually `tests/testthat.R`) that runs all testthat unit tests. The other big improvement to usability comes from @kforner, who contributed code to allow the default results (i.e. those produced by `SummaryReporter`) to include source references so you can see exactly where failures occured. ## New reporters * `MultiReporter`, which combines several reporters into one. (Thanks to @kforner) * `ListReporter`, which captures all test results with their file, context, test and elapsed time. `test_dir`, `test_file`, `test_package` and `test_check` now use the `ListReporter` to invisibly return a summary of the tests as a data frame. (Thanks to @kforner) * `TeamCityReporter` to produce output compatible with the TeamCity continuous integration environment. (Thanks to @windelinckx) * `SilentReporter` so that `testthat` can test calls to `test_that`. (Thanks to @craigcitro, #83) ## New expectations * `expect_null()` and `is_null` to check if an object is NULL (#78) * `expect_named()` and `has_names()` to check the names of a vector (#79) * `expect_more_than()`, `is_more_than()`, `expect_less_than()`, `is_less_than()` to check values above or below a threshold. (#77, thanks to @jknowles) ## Minor improvements and bug fixes * `expect_that()` (and thus all `expect_*` functions) now invisibly return the expectation result, and stops if info or label arguments have length > 1 (thanks to @kforner) * fixed two bugs with source_dir(): it did not look for the source scripts at the right place, and it did not use its `chdir` argument. * When using `expect_equal()` to compare strings, the default output for failure provides a lot more information, which should hopefully help make finding string mismatches easier. * `SummaryReporter` has a `max_reports` option to limit the number of detailed failure reports to show. (Thanks to @crowding) * Tracebacks will now also contain information about where the functions came from (where that information is available). * `matches` and `expect_match` now pass additional arguments on to `grepl` so that you can use `fixed = TRUE`, `perl = TRUE` or `ignore.case = TRUE` to control details of the match. `expect_match` now correctly fails to match NULL. (#100) * `expect_output`, `expect_message`, `expect_warning` and `expect_error` also pass ... on to `grepl`, so that you can use `fixed = TRUE`, `perl = TRUE` or `ignore.case = TRUE` * Removed `stringr` and `evaluate` dependencies. * The `not()` function makes it possible to negate tests. For example, `expect_that(f(), not(throws_error()))` asserts that `f()` does not throw an error. * Make `dir_state` less race-y. (Thanks to @craigcitro, #80) * `auto_test` now pays attention to its 'reporter' argument (Thanks to @crowding, #81) * `get_reporter()`, `set_reporter()` and `with_reporter()` are now exported (#102) # testthat 0.7.1 * Ignore attributes in `is_true` and `is_false` (#49) * `make_expectation` works for more types of input (#52) * Now works better with evaluate 0.4.3. * new `fail()` function always forces a failure in a test. Suggested by Richie Cotton (#47) * Added `TapReporter` to produce output compatible with the "test anything protocol". Contributed by Dan Keshet. * Fixed where `auto_test` would identify the wrong files as having changed. (Thanks to Peter Meilstrup) # testthat 0.7 * `SummaryReporter`: still return informative messages even if no tests defined (just bare expectations). (Fixes #31) * Improvements to reference classes (Thanks to John Chambers) * Bug fixes for when nothing was generated in `gives_warning` / `shows_message`. (Thanks to Bernd Bischl) * New `make_expectation` function to programmatically generate an equality expectation. (Fixes #24) * `SummaryReporter`: You don't get praise until you have some tests. * Depend on `methods` rather than requiring it so that testthat works when run from `Rscript` * `auto_test` now normalises paths to enable better identification of file changes, and fixes bug in instantiating new reporter object. # testthat 0.6 * All `mutatr` classes have been replaced with ReferenceClasses. * Better documentation for short-hand expectations. * `test_dir` and `test_package` gain new `filter` argument which allows you to restrict which tests are run. # testthat 0.5 * bare expectations now correctly throw errors again # testthat 0.4 * autotest correctly loads code and executes tests in same environment * contexts are never closed before they are opened, and always closed at the end of file * fixed small bug in `test_dir` where each test was not given its own environment * all `expect_*` short cut functions gain a label argument, thanks to Steve Lianoglou # testthat 0.3 * all expectations now have a shortcut form, so instead of expect_that(a, is_identical_to(b)) you can do expect_identical(a, b) * new shows_message and gives_warning expectations to test warnings and messages * expect_that, equals, is_identical_to and is_equivalent to now have additional label argument which allows you to control the appearance of the text used for the expected object (for expect_that) and actual object (for all other functions) in failure messages. This is useful when you have loops that run tests as otherwise all the variable names are identical, and it's difficult to tell which iteration caused the failure. * executing bare tests gives nicer output * all expectations now give more information on failure to make it easier to track down the problem. * test_file and test_dir now run in code in separate environment to avoid pollution of global environment. They also temporary change the working directory so tests can use relative paths. * test_package makes it easier to run all tests in an installed package. Code run in this manner has access to non-exported functions and objects. If any errors or failures occur, test_package will throw an error, making it suitable for use with R CMD check. # testthat 0.2 * colourise also works in screen terminal * equals expectation provides more information about failure * expect_that has extra info argument to allow you to pass in any extra information you'd like included in the message - this is very helpful if you're using a loop to run tests * is_equivalent_to: new expectation that tests for equality ignoring attributes * library_if_available now works! (thanks to report and fix from Felix Andrews) * specify larger width and join pieces back together whenever deparse used (thanks to report and fix from Felix Andrews) * test_dir now looks for any files starting with test (not test- as before) testthat/MD50000644000176200001440000006521214172362302012430 0ustar liggesusersd01fbf45c685b29d8bd4965e0546da6a *DESCRIPTION 41b239837eef8d4c02659f86d53fe802 *LICENSE cdb7844c4d75c2817015047f38eeb641 *NAMESPACE 65fc4a81de8fea1ffbf1009db512df0a *NEWS.md 802e34f0c6eadc2ad197a17bc4888644 *R/auto-test.R dc353df66e35837d5d65de01144c7d7e *R/capture-output.R a12d0207725ffcd7e675deb942f57324 *R/colour-text.R fd44ce3405c5638877d39d724fba60d1 *R/compare.R 26530fa797e63d4c873c36cab56e7fb5 *R/context.R 849b2307f8932899a06330c426727c95 *R/deprec-condition.R 8a97ca63fd55a7c18e38fcdca2ca8b7d *R/describe.R 7cfd5fb94e0c6792bf17de9be05fd5d4 *R/edition.R 98381f10294371d3bca8f237bcbfb199 *R/evaluate-promise.R 4c97d19a013de45f294d5ad2d40d4a5d *R/example.R 88b25caf06a59477ffc0d8c670f1bfba *R/expect-comparison.R fced675ac8f6e85354c968de657f1b41 *R/expect-condition.R f92ff89c64ed26d748e40b9646fe847f *R/expect-constant.R 4878a51766c0a697157fa049ded641e7 *R/expect-equality.R cb497c06ca66ce9237c790e73a12cd98 *R/expect-inheritance.R bf48973d315d23b0db3fab0b48f641b3 *R/expect-invisible.R bb47012be49be6422b588e9d9e4a534e *R/expect-known.R 1e582c03f050febf82c740f09525fad1 *R/expect-length.R d11ce4b1435c6ee404638e9c505a9039 *R/expect-named.R 4a2f95afe32eb552a7b7cb53241948b2 *R/expect-output.R 7501e56a3ee11fa5ec7454fdec855dd1 *R/expect-self-test.R d63315e9ba5731a39ccc33b4036d9911 *R/expect-setequal.R 89b7b4fdf3b4905ce50c0dd676c7f36b *R/expect-silent.R 4fa3fc1ea7ef0c298c5d651b3f9a4087 *R/expect-that.R 301538ff815446ae531a59a633ad522a *R/expect-vector.R 664cbf2e3fac3349a0538a43a2130dc1 *R/expectation.R ac0cffc560449410bd5c39702e771df8 *R/expectations-matches.R 7cb45eb61c44fc09cc0f5d4740ee272e *R/local.R a2a562b4b752424a7a66c1bd11e8d718 *R/make-expectation.R 15e48ef68158308afcd3397859152d6f *R/mock.R 974bcc65f9893c76bb5a178ea10d2af4 *R/old-school.R 8416d7964f2ebb76f0c52403ba0e4f69 *R/parallel-config.R 89fff1763d6bfe3aa187b24664ff56af *R/parallel-taskq.R f4e48a6a063cdaa70d3aefc4d74f7577 *R/parallel.R ac67b133b202448bdaec11a774aa886d *R/praise.R 96bf8fd477ae03415a136fcbca68115f *R/quasi-label.R 599909386289589db2612368a8b85415 *R/reporter-check.R f17210d92a3e6987b9849f6626ba6389 *R/reporter-debug.R 758b2751f8e124b79ba99365f23a6d47 *R/reporter-fail.R bae3ee2b0d4c98be3232c1c3548e6dc6 *R/reporter-junit.R a7bd75128e704dffeed4b300de5b27e9 *R/reporter-list.R 6ab3fa8206a16bdd6ca543adf952f5a2 *R/reporter-location.R 30edcf9243f5e474f62db9194e6a26ef *R/reporter-minimal.R 1fc8bbb63823173620e69158fe9763c2 *R/reporter-multi.R 0b63b02811744a07031288e6da8fa0f7 *R/reporter-progress.R ecf71d4fb8b77a9c41c15105559578e8 *R/reporter-rstudio.R 5febcb7776b44069599f7058216cb156 *R/reporter-silent.R 815862d98e9faebb50ccd126bf2c3849 *R/reporter-stop.R 687977929a977a62f24e1cc326e6f900 *R/reporter-summary.R ab483ded469442eb3e7de3c5abf1bdad *R/reporter-tap.R f169ef41d98b616eab94aafd3acbf567 *R/reporter-teamcity.R d379da0a76cba131735f2f8247fc5803 *R/reporter-zzz.R e128cdad1ed36062302b0d8bfbe8e755 *R/reporter.R 869b1b97d288424e5c16b0809babd7c9 *R/skip.R 5534cd15f2b19208970321faa9097500 *R/snapshot-cleanup.R 31a174d884515f8b6aff3a193658db9e *R/snapshot-file-snaps.R 9c270807fb2f669273c02935bb3b4a11 *R/snapshot-file.R 4306c2812290d26405c6e4979b1bb247 *R/snapshot-manage.R cc792e7be896480f7a0754446c1758fd *R/snapshot-reporter.R 86e7cb85a36cc8e8806721e172b14802 *R/snapshot-serialize.R 674656ccd0887404ddfd674a75b70827 *R/snapshot.R 9a50be19263b0cf9c858b6859521beb6 *R/source.R 0e8e617e8f105c742610b1146e5c71d9 *R/stack.R ac841635d360c58c3f2bf2b9536a6d35 *R/teardown.R e459bee96ad0c9c7883a34ea0a52917a *R/test-compiled-code.R 74e9d311d857b816bd903c1436b97c2a *R/test-env.R 2aeabfb6235389e20670f4e77671dc00 *R/test-example.R 7f4561d017ff9181fa5e59033927d635 *R/test-files.R 2bdba0e767cb91043e99c5e94826f1f4 *R/test-package.R f58cc700df3c4be03fd84842a94f71a9 *R/test-path.R d54b7d3b7aeb83d2240d67c946d4e937 *R/test-that.R 4a91c1fdf03ff55aca69da38192bb696 *R/testthat-package.R 04e13da826a1f40d2e85fabe87e934de *R/try-again.R e346d876f90915153e09188c60b13c4d *R/utils.R 6ebc7079ae9dc89ef10619f7f39ac928 *R/verify-output.R c8e8ce0679bbce63f99c4ae3ec3ba6ad *R/watcher.R 9f765246e4f0dd178f534f1650543739 *README.md 97135503d4b58252649cbd8b455c36de *build/vignette.rds cd1f5828e307a856f2844936aea84ef9 *inst/CITATION dd843ceb5a1b0bdde93647b4659fe17a *inst/doc/custom-expectation.R f1f467f495c3579da4b55332fa07a8c3 *inst/doc/custom-expectation.Rmd cb81951fd67735210e9683b3fd012aff *inst/doc/custom-expectation.html d76c7a669f99acd6eb21ddc5529934fe *inst/doc/parallel.R 7e876d3d1dd249fa7452e42f93ce6238 *inst/doc/parallel.Rmd 3f1680685192e55011ba378a5a538336 *inst/doc/parallel.html d9085ca920d4ec3da61eaa3c6857f0da *inst/doc/skipping.R 712386791b14344a3ab7b2c5a0da63a9 *inst/doc/skipping.Rmd a183f5560603c40d02dcb4a036f1588c *inst/doc/skipping.html 7b03807b6a08f41652f0024b2000f7c4 *inst/doc/snapshotting.R 7d89879aac94338a894440abe4a2a4ae *inst/doc/snapshotting.Rmd a61edf10d02ca372ccd62b0f1c1ef12e *inst/doc/snapshotting.html c4f4dda62cac797bd42dd69449245cba *inst/doc/test-fixtures.R 269cedcd7be1757537bd025225e7d340 *inst/doc/test-fixtures.Rmd 256e12c61b8e6963b6b801d6d1c4670f *inst/doc/test-fixtures.html 48e38f687ff75264ab8a0f8deb0fe230 *inst/doc/third-edition.R 3103ff73d02560f0d4fdff7f3408df9f *inst/doc/third-edition.Rmd 5399d5170c38a2add81e429eea6f537d *inst/doc/third-edition.html 1ece0759f60e98193d53545648d19146 *inst/examples/test-failure.R 522cf134c79624117d579f2c4d18f42b *inst/examples/test-success.R 543392950ccc39ed50ebf26c75de0910 *inst/include/testthat.h 6fedbe5ace0d8e58a00b296d75b4c83f *inst/include/testthat/testthat.h ed1eab7fc6a36a53aa5170c5b5923e2e *inst/include/testthat/vendor/catch.h f481e7613929d4968a4a4c73031f7967 *inst/resources/catch-routine-registration.R f35a879e88834e75aee99fc9817547a0 *inst/resources/test-cpp.R 3332a4affe5b211b37b65a3d3311d9ba *inst/resources/test-example.cpp c2cb0c30e10da611f9fbe56aea52f7a9 *inst/resources/test-runner.cpp d4e373bac6ea29bee356469a5bbaebb2 *man/CheckReporter.Rd edc5d3802e3076e4606f28f09b5b1ce2 *man/DebugReporter.Rd 9881fb8c1dd20268d378f9e1bd548b5a *man/FailReporter.Rd 876c86281574b5eee4831552be70dc4d *man/JunitReporter.Rd fe5a3b0d86c3e7cdec477325324562c2 *man/ListReporter.Rd 3548290db7cf9df21daae636d875297c *man/LocationReporter.Rd 5687abe769ec8edd9eeb63bf9c1fca67 *man/MinimalReporter.Rd 5db0f624d2f94d9c3724ce737cc8c816 *man/MultiReporter.Rd bca0db0f1a10adc2c4829a0d9f9e32e2 *man/ProgressReporter.Rd 1c32e4cee3fdb817981c9988d65bcbf5 *man/RStudioReporter.Rd be876d6eb0600f4f477c7f03a74dba27 *man/Reporter.Rd 2d2ee7460e12c84af9a43a2ba3757e46 *man/SilentReporter.Rd a5b2ccf9a024a8e28ffb41e966a8191c *man/StopReporter.Rd cf04a9521c40ba782a47e0042ab6ab79 *man/SummaryReporter.Rd f97d284533b5adb0f3c81cbb2896eefe *man/TapReporter.Rd a1561afe69be72d8a002b6500089ac1b *man/TeamcityReporter.Rd 2de182af781dd89758b736e0d56700eb *man/auto_test.Rd 01158709d49a260cf79cd8613aa57a1e *man/auto_test_package.Rd 8f0558d9d3215c196560727c4137f095 *man/capture_condition.Rd 6d79d954a3742bf9c545015fdc66ba43 *man/capture_output.Rd b1592f1159ecf2c61ef06d67ba19adeb *man/compare.Rd ea1c54d24c6395c0f988d14b0ae40956 *man/compare_state.Rd 13e06eba0d99b89ceccefa897893d91e *man/comparison-expectations.Rd 814661cc0285f178d3e01259f3e4ce8f *man/context.Rd 3ffd0eedcd434635ea708cb81e763fe1 *man/context_start_file.Rd b87e21bc33b7f60643433a5bb46b1086 *man/default_reporter.Rd cd824d50d591bff1b124200fbdf85353 *man/describe.Rd f2b7ae4da7936bcf6a6d13a161c451ae *man/dir_state.Rd 0287dd5313a8591b8e487aa2fcd2e7eb *man/equality-expectations.Rd 83120acf2b4d08657f53db8ec927e470 *man/evaluate_promise.Rd 8cd508c23aa5bd6f4b341856ee58abcb *man/expect.Rd e21560c182e3f4ab215740ae8f73dc3e *man/expect_equivalent.Rd 85a69f065e4854b12c2d2fbccc4f1c5f *man/expect_error.Rd fe667304a801324eb42f05a3cd466e2a *man/expect_invisible.Rd de2914b6db8a2ef5214e56fd2ceb011a *man/expect_is.Rd 906962cc8a7e40311b55c871ed44ec42 *man/expect_known_output.Rd 0110276629d570a21ab5592e591c3c53 *man/expect_length.Rd c56df08d2028bd283bc3aa4fec3263e7 *man/expect_less_than.Rd 95579b49e810d8b29c450a0a574be349 *man/expect_match.Rd 81546f172945741b9c53d5db74a38034 *man/expect_named.Rd 7a77995480ea2766d2bf066a9b43a9b0 *man/expect_null.Rd cfa5b7125b388a54c89a8f55f63def01 *man/expect_output.Rd 1b59635d836afff547df6f8945629bff *man/expect_output_file.Rd 09fc963aa28bf093a57be94f9b42763d *man/expect_reference.Rd 6b86d50f9260fb216c136ff3cc6cd25b *man/expect_setequal.Rd e0802bb2288cbe59c92777e14d1243fc *man/expect_silent.Rd e4ff84e88523912e422a1b705fa8ee50 *man/expect_snapshot.Rd c97b01ce91c16f9b2756faa373047181 *man/expect_snapshot_file.Rd 3e1c9da323c0d49d8a8ae8fd351d7b83 *man/expect_success.Rd 6967739c8594696c2551a0f636dacd8c *man/expect_that.Rd da32dd115d1683204617fd1751860fef *man/expect_vector.Rd f04aa37cff268d7bcd1512f47e57acf5 *man/expectation.Rd fe3daf4355432a6dd7038a4ffd55aa48 *man/fail.Rd cb1e46f469cfbbbde29c8b5113e1d789 *man/figures/lifecycle-archived.svg c0d2e5a54f1fa4ff02bf9533079dd1f7 *man/figures/lifecycle-defunct.svg a1b8c987c676c16af790f563f96cbb1f *man/figures/lifecycle-deprecated.svg c3978703d8f40f2679795335715e98f4 *man/figures/lifecycle-experimental.svg 952b59dc07b171b97d5d982924244f61 *man/figures/lifecycle-maturing.svg 27b879bf3677ea76e3991d56ab324081 *man/figures/lifecycle-questioning.svg 6902bbfaf963fbc4ed98b86bda80caa2 *man/figures/lifecycle-soft-deprecated.svg 53b3f893324260b737b3c46ed2a0e643 *man/figures/lifecycle-stable.svg 1c1fe7a759b86dc6dbcbe7797ab8246c *man/figures/lifecycle-superseded.svg b7e68745669ec4141b7940f4dbebc48f *man/figures/logo.png bb0e79b8bdc83b519a64be149499c48c *man/find_reporter.Rd e92deedee075ce21672aa7c6e81e3f5b *man/find_test_scripts.Rd 71241dcc6074871b9302cf525f0b1970 *man/inheritance-expectations.Rd 1bec47b13d94110b9b4f18e0684a9597 *man/is_informative_error.Rd c24fefed7ac5160091ad4d80619530d2 *man/is_testing.Rd d9f231b910a533f0602f635f1365dc5e *man/local_edition.Rd 35a701ec78c0f4afd9dc295060cad41f *man/local_snapshotter.Rd 642600684ba47d52d012eb9980552399 *man/local_test_context.Rd 72081c5d88493c1808f7ac0ebf6b46f0 *man/local_test_directory.Rd bc75702b5d5b045e3c356935f8765be2 *man/logical-expectations.Rd 8aa87707c6a53a51dcc2a913a93154bd *man/make_expectation.Rd 970f42fa63244262e70100f5f8304614 *man/not.Rd aab6f1c7aa7616c8e4a66cd0e714e959 *man/oldskool.Rd 9f3353edc3135d9b1238e80d36835de9 *man/quasi_label.Rd b8f8a1cbf3a5afc81d1df65fa2f2df03 *man/reexports.Rd efdd9f36891d19bf8d083d569c6ab8d6 *man/reporter-accessors.Rd 99a203507a521441bd629594b424c0c2 *man/run_cpp_tests.Rd ae91c9a61217c2f9da9633d63c74b405 *man/safe_digest.Rd d337b306fad605e6872b8226af56987f *man/skip.Rd 8e5371e5ea33d6764735d58e37d516f8 *man/snapshot_accept.Rd f6c624697cf5d3815493375df55e1474 *man/source_file.Rd 8b7e53a2eb81a7448c7fc7d4e26c1623 *man/takes_less_than.Rd dfe3cdc79671f3b5f56be60ddf09fb35 *man/teardown.Rd da7c3d8b9499eba0e5b074d16d91cd07 *man/teardown_env.Rd 003d8dd0a4193fd3720528778380c51e *man/test_dir.Rd 1069e33073c62ed24b08bd0f30dff578 *man/test_env.Rd 44da3653452f5d2f415cbe2d50366bca *man/test_examples.Rd 3198358b835bf690025e23cc4d9c8d8e *man/test_file.Rd ddf48075ae98c7b06df82c6029565510 *man/test_package.Rd 042d98b34dd7977b9a62aafcf9e6609d *man/test_path.Rd fa315565b0e708e735bcf28468672b47 *man/test_that.Rd 150931ae2c30e4d787aee58a664fdec9 *man/testthat-package.Rd d8686384001c8ebc03b84dada1fa43f4 *man/testthat_examples.Rd 79ee52da3640f0c553c36954613485f7 *man/testthat_results.Rd ea0d54d0e935a91b3b1ec9eb4cc1c792 *man/try_again.Rd 4763664af48c4e1008c47f7fd2d14808 *man/use_catch.Rd 60f4ba97516a0fc94acd5ddf56321e6b *man/verify_output.Rd 54995c6ac4ddd10f9cdc2266214921de *man/watch.Rd 39c10b4769f2514e407b2ce1631e4400 *man/with_mock.Rd d09410b8ca2729ce83dca3d0d4359f3e *src/Makevars d09410b8ca2729ce83dca3d0d4359f3e *src/Makevars.win 992cdc3ae7ebba7ba239d56091c5e079 *src/init.c 8d71e14ae7b661a36b31c729f0b5a994 *src/reassign.c 69850efbfde51c34daccd28048c41e87 *src/test-catch.cpp 3332a4affe5b211b37b65a3d3311d9ba *src/test-example.cpp c2cb0c30e10da611f9fbe56aea52f7a9 *src/test-runner.cpp 286b1785c90d8f8bc951c4e044b53fc5 *tests/test-catch.R 0eb375d3daee2350d0f7f39aa5b0f1a6 *tests/testthat.R c1c38a2c4f64bce28da3c95e8d4dca2f *tests/testthat/_snaps/R4.0/snapshot-file/nickname.txt 5b7cdc91495760247b7d59cd5d5f8419 *tests/testthat/_snaps/R4.0/snapshot.md a6f9aaf0f0295a151cfcb17a220e93f6 *tests/testthat/_snaps/R4.1/snapshot-file/nickname.txt 92a9a42959a92937d78a744b05a40cf7 *tests/testthat/_snaps/R4.1/snapshot.md f3e0d4e81d23f7f66caf5ae62568e398 *tests/testthat/_snaps/edition.md e0b1b11ff01a3ba6dcfa0241af42e614 *tests/testthat/_snaps/expect-condition.md ff901ca601d27afc930ec05b6e1ae18f *tests/testthat/_snaps/expect-constant.md 4859969895edcbdb11d67a6f1343d056 *tests/testthat/_snaps/expect-equality.md 034c5ca1f9c51882894d71b9a67686a6 *tests/testthat/_snaps/expect-inheritance.md 0a60cb0224c171b937ba5b0b79b5c485 *tests/testthat/_snaps/quasi-label.md 8413ae700235cfde6d11c7172910d4ef *tests/testthat/_snaps/reporter-check.md 415d35b8e7a20e47ff95cc5cb14f6336 *tests/testthat/_snaps/reporter-debug.md c4e9df91875fe129901efe4190b7d2d2 *tests/testthat/_snaps/reporter-junit.md 29bc99ee7861078312ab8e6977cf712d *tests/testthat/_snaps/reporter-location.md d18dd86f32e3226cfe5a295c88d1893c *tests/testthat/_snaps/reporter-minimal.md a7743f86e8dc9048dd1b41c29e6046ad *tests/testthat/_snaps/reporter-progress.md 4a719b76196ff128e80b4d004f0212fe *tests/testthat/_snaps/reporter-rstudio.md d135a16952dcec53f28dbf91c25cb396 *tests/testthat/_snaps/reporter-silent.md b12c91936f2477ebd6b40664ade9bae3 *tests/testthat/_snaps/reporter-stop.md 043415632bc60f06721d5d263e684f29 *tests/testthat/_snaps/reporter-summary.md c5ef70921e74caac60fe1394e335eac0 *tests/testthat/_snaps/reporter-tap.md 128133af55e47a20882ed2cde579f28c *tests/testthat/_snaps/reporter-teamcity.md cbe208856d70c9f36ebd7778ec14fe22 *tests/testthat/_snaps/reporter.md aa9250a2b33cdcc1940aff557de6fb51 *tests/testthat/_snaps/rlang-1.0/snapshot.md 9a6793f060a49e7f97b25abc99a701cd *tests/testthat/_snaps/rlang-pre-1.0/snapshot.md 97deb866b09b5f103648e98d3186b61e *tests/testthat/_snaps/skip.md 7bd5f05b86a4188dab4dec126d3a9ff1 *tests/testthat/_snaps/snapshot-cleanup.md f779bd54a3df2fe2dd4d78c3c232b970 *tests/testthat/_snaps/snapshot-file.md 933222b19ff3e7ea5f65517ea1f7d57e *tests/testthat/_snaps/snapshot-file/a.txt 6463474bfe6973a81dc7cbc4a71e8dd1 *tests/testthat/_snaps/snapshot-file/foo.csv 241e8f7ec356602f89a684b4bbf36456 *tests/testthat/_snaps/snapshot-file/foo.png b7fdd99fac291c4bbf958d9aee731951 *tests/testthat/_snaps/snapshot-file/foo.r dd7194eb0b78cf0086ed31cc60e473a3 *tests/testthat/_snaps/snapshot-file/secret.txt dfbf5d7e932ab579f13420db4f2f1289 *tests/testthat/_snaps/snapshot-manage.md 56a92c16c95b14892dcfba38097dfa8c *tests/testthat/_snaps/snapshot.md 64b9df146b6649dbb61cf8f5953beba2 *tests/testthat/_snaps/test-files.md e371718f8ce69ca45ae1ec0cb3436348 *tests/testthat/context.R 7340014f5a98c1db4a52789a689ee123 *tests/testthat/helper-assign.R 58f2df05f4c5b14dc005ac4db3aa352c *tests/testthat/helper-testthat.R cfde242811815c10019817cd919d7719 *tests/testthat/one.rds fe065203804974da89a96058ef154eaf *tests/testthat/reporters/backtraces.R b57f2dd170cc6d984479cb92587cae53 *tests/testthat/reporters/context.R 652090f643ef38fb1ed64cc2a37587b0 *tests/testthat/reporters/error-setup.R ccdc6f8bd60642b51e8a68e397189484 *tests/testthat/reporters/fail-many.R a058feb5736127ca5180330c3674864c *tests/testthat/reporters/fail.R fc7290dc83a3ce9be1ccbcaf21fa953c *tests/testthat/reporters/long-test.R e5da2b4d0dd836996ff542cf90e16dc9 *tests/testthat/reporters/skips.R 6067370f05fc90a582ec2034b5d7ce45 *tests/testthat/reporters/successes.R 9f58005dcfbe8b9d1ae60630a801f59a *tests/testthat/reporters/tests.R cb65e68c4e96571239640fba1d557511 *tests/testthat/setup.R bf15d5dd3f1110e097d830715000eeed *tests/testthat/teardown.R 6b43c8a6b6a93727a1d9d1ffb87a416c *tests/testthat/test-bare.R d27753448fec04085e147158cf972f8e *tests/testthat/test-catch.R e7424efdfd996568c24e494faefdfc08 *tests/testthat/test-colour.R 9f67151bae4bf0ba114983b97f47a437 *tests/testthat/test-compare.R 544d6c83622dd48f81a0fe304d63db5f *tests/testthat/test-context.R b504f51639651d3ace94dc2ed7fa2930 *tests/testthat/test-deprec-condition.R fd8f5eebfc9276a1486c518c56233ea8 *tests/testthat/test-describe.R ee7d5651d5f7166fae6b1b0ca284a063 *tests/testthat/test-edition.R ef67f70c07f7152e1e6c2ded78e0e530 *tests/testthat/test-error/test-error.R 1319379d822056d441babfc1d1eaf10c *tests/testthat/test-evaluate-promise.R c1df8464c5185fb393443b935a6b98db *tests/testthat/test-examples.R 4acf95068d86e780ff2c2bc25bd99321 *tests/testthat/test-expect-comparison.R a7874e69ca701b728220f78597b4e1db *tests/testthat/test-expect-condition.R d236231f0cfc21771f38d319f4d6a7e5 *tests/testthat/test-expect-constant.R 3c3d19844d34e8ba693b399915d8093b *tests/testthat/test-expect-equality.R f08b227e0f9de2394f4e2d7fc6a4227b *tests/testthat/test-expect-inheritance.R 62eeef5caaf6c04fdb851387d6a606cd *tests/testthat/test-expect-invisible.R d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-expect-known-hash.R 236e190fb803158e038a3d756384d97c *tests/testthat/test-expect-known.R 668d3c8b65fac6f1be4e1c9db3ab05da *tests/testthat/test-expect-known.txt 01f5516789b88009a1a8974acad4b8b2 *tests/testthat/test-expect-length.R fb4bc5b30b834af8dde1e244ef74c135 *tests/testthat/test-expect-match.R 9dc68f259d6c188a8b8655f39a9c7374 *tests/testthat/test-expect-named.R 5c82785a60440d0453aefabe39886670 *tests/testthat/test-expect-output.R ae08f96cafe24d57519a37f81119ea7a *tests/testthat/test-expect-reference.R a73c4849e3bfb2dfe8587cf3e74e5a7c *tests/testthat/test-expect-self-test.R 7e8bd7ece9dba98a6b5e085b50435abf *tests/testthat/test-expect-setequal.R 74d6b5f91689b82365595a5cfb6c06c6 *tests/testthat/test-expect-silent.R effdd09400ca0cfe2bb8ff77e98411cd *tests/testthat/test-expect-vector.R 36979f047b3d34dc153218c84000cf6a *tests/testthat/test-expect_that.R 792017f0158d2305c77b3c68998eaf27 *tests/testthat/test-expectation.R dca9d21a10fa57c3ab3d08b240855623 *tests/testthat/test-helpers.R 9516ad5a07a2871d72f2226e1ac40996 *tests/testthat/test-label.R 1ef4f0238860c46c6486f32722c24171 *tests/testthat/test-list-reporter/test-bare-expectations.R 0c8496c069c63c0ef11ee19faa7ad845 *tests/testthat/test-list-reporter/test-exception-outside-tests.R 3161831aded4882822106344d35eed7e *tests/testthat/test-list-reporter/test-exercise-list-reporter.R aaa1e95b5eaa73ec22fa705f9013a3e6 *tests/testthat/test-list-reporter/test-only-error.R bfcec6eccc3982df96149128f3051b32 *tests/testthat/test-local.R 80f9ec8717b0d42b02dc49422f715f63 *tests/testthat/test-make-expectation.R b2c43bf1921166d9d94816e75edb56f5 *tests/testthat/test-mock.R b132e74e85ed7c07b7d2db188d7dd175 *tests/testthat/test-old-school.R a71af7f5125c81451629647d025ceb07 *tests/testthat/test-parallel-crash.R 6c5d413cfb42174258f529f64f974184 *tests/testthat/test-parallel-outside.R daae1f7a2a401900239f71196b6c6c62 *tests/testthat/test-parallel-setup.R 3e37ffdd5d85280ccd67fde69b1a33be *tests/testthat/test-parallel-startup.R 38a2c7013b5eff26083cd181dd01d093 *tests/testthat/test-parallel-teardown.R 8dfaa1ec213eb5358b50799da42cd184 *tests/testthat/test-parallel.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/crash/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/crash/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/crash/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/crash/tests/testthat/test-crash-1.R 6eb2187975cc0ccd1b73b80b5cbab548 *tests/testthat/test-parallel/crash/tests/testthat/test-crash-2.R e434fde02430d748399de92d7565b53c *tests/testthat/test-parallel/crash/tests/testthat/test-crash-3.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/ok/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/ok/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/ok/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/ok/tests/testthat/test-ok-1.R 6eb2187975cc0ccd1b73b80b5cbab548 *tests/testthat/test-parallel/ok/tests/testthat/test-ok-2.R b1871f1f6b5cfb6f63680e3d3c1aa0fa *tests/testthat/test-parallel/ok/tests/testthat/test-ok-3.R a33379cc3428a98003d46198e7074e32 *tests/testthat/test-parallel/outside/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/outside/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/outside/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/outside/tests/testthat/test-outside-1.R 33dc2f3ae68023ba7e1b2f5e68bc0fb8 *tests/testthat/test-parallel/outside/tests/testthat/test-outside-2.R b1871f1f6b5cfb6f63680e3d3c1aa0fa *tests/testthat/test-parallel/outside/tests/testthat/test-outside-3.R 9ec1263eb2ad029be653e7ed104c1a5d *tests/testthat/test-parallel/setup/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/setup/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/setup/tests/testthat.R e4f8942fe5ff380578890459dd5149cc *tests/testthat/test-parallel/setup/tests/testthat/setup-bad.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/setup/tests/testthat/test-setup-1.R 6eb2187975cc0ccd1b73b80b5cbab548 *tests/testthat/test-parallel/setup/tests/testthat/test-setup-2.R b1871f1f6b5cfb6f63680e3d3c1aa0fa *tests/testthat/test-parallel/setup/tests/testthat/test-setup-3.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/startup/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/startup/NAMESPACE e6a9b063ac42301110ff53aa24715b23 *tests/testthat/test-parallel/startup/R/fail.R 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/startup/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/startup/tests/testthat/test-startup-1.R 71ef3908290c54ef643890998d61099b *tests/testthat/test-parallel/teardown/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/teardown/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/teardown/tests/testthat.R cda7cbaf6630da439cc5f74a16dc53ad *tests/testthat/test-parallel/teardown/tests/testthat/teardown-bad.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/teardown/tests/testthat/test-teardown-1.R d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-path-installed/testthat-tests/testthat/empty d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-path-missing/empty d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-path-present/tests/testthat/empty d8a356c9b4ad1db5b1e952836cfa5d6a *tests/testthat/test-quasi-label.R 83e096791fddb7fd68fade67e4d8333b *tests/testthat/test-reporter-check.R a3dce6b660f9019d0cc3791e32500273 *tests/testthat/test-reporter-debug.R d3b38c4215e3f39665d392a13fde91d8 *tests/testthat/test-reporter-junit.R f8d3371d060087a3421a70679c509211 *tests/testthat/test-reporter-list.R 3d307310e7394ec031092bf46ffca5f6 *tests/testthat/test-reporter-location.R 7884937e8a588fd8dbb0dc7470478be8 *tests/testthat/test-reporter-minimal.R e966ab94f3dfb4422545f93ad5272ed2 *tests/testthat/test-reporter-multi.R e6cd7d988614c8cfb793879f969cf839 *tests/testthat/test-reporter-progress.R b97f40768cbb38cdd8672c483342c041 *tests/testthat/test-reporter-rstudio.R 67f9cf6d77482b5e4f97bd6eac82a141 *tests/testthat/test-reporter-silent.R 281963bf1731e5766f2448b9c758c068 *tests/testthat/test-reporter-stop.R dc9273fcbec26a40eeab4c5a12c9e57c *tests/testthat/test-reporter-summary.R 19081ae794f7447f93376eb59b4080de *tests/testthat/test-reporter-tap.R 7e7a59a45234dbf25710470819604770 *tests/testthat/test-reporter-teamcity.R a59a0a456397a6f888e602e70ca15b94 *tests/testthat/test-reporter-zzz.R 3515de5444f5209ad6be06812a53bd66 *tests/testthat/test-reporter.R 9947fe8dc66e3e39681d043dc66694ae *tests/testthat/test-skip.R b37fd261db81d69032da921e1092da3c *tests/testthat/test-snapshot-cleanup.R 4cc0e6cdce17502b95fc89eb16bae733 *tests/testthat/test-snapshot-file-snaps.R 653f213c2a1b9e3eeea62e63a3143c85 *tests/testthat/test-snapshot-file.R 8bbeae167ab960bd86e1cdb9e548f162 *tests/testthat/test-snapshot-manage.R 5bfb8370598fca931fb64270351444cc *tests/testthat/test-snapshot-reporter.R aaf361e1b153030224f357d9f294cf91 *tests/testthat/test-snapshot-serialize.R f4caa429dadfb0f1015e9c56849dfbbe *tests/testthat/test-snapshot.R 5d4b8e95ddde21af7c298512f1012c35 *tests/testthat/test-snapshot/_snaps/snapshot.md 797943912637f1bcd858ab419f74e152 *tests/testthat/test-snapshot/test-expect-condition.R 2bb5aca20c88188e301a72c26d6ef087 *tests/testthat/test-snapshot/test-snapshot.R c4a62cd162daf6fccc348f9cbe7d7486 *tests/testthat/test-source.R 6addb8da099d18560d99764513cb07a5 *tests/testthat/test-source_dir.R a7af0f0871cdbebbac0049dd5d293914 *tests/testthat/test-teardown.R c8376dc1c47a51eb9c76f248406a0a03 *tests/testthat/test-teardown/test-teardown.R b966a510407e764ec554e9c768b9a8fd *tests/testthat/test-test-env.R 3e00745173d6a97f092f657129334ac9 *tests/testthat/test-test-example.R 714b46fd2a84853e2db38f788a47b23f *tests/testthat/test-test-files.R 365120709f80d07c3b29e1ab9e9d304d *tests/testthat/test-test-path.R 3d7cc5f8f0456186f87654b29aa4bd0c *tests/testthat/test-test-that.R 8180c3cea7b02ab0f28567b25655bee0 *tests/testthat/test-try-again.R d057f919f3b2a3290f5fffac03562c85 *tests/testthat/test-verify-conditions-cr.txt 665a6dd827f27542fe0d19d50954d315 *tests/testthat/test-verify-conditions-lines.txt 2ced23f41abd70581eef5cd593c49a56 *tests/testthat/test-verify-conditions.txt 116a95120141a2554ce41fd7ca35cc6b *tests/testthat/test-verify-constructed-calls.txt eb291cb4ef31de56569971459166ef54 *tests/testthat/test-verify-output.R 825064675efaad62e7b9e69653bd169c *tests/testthat/test-verify-output.txt e20a8def27befc0c9f779683b97c8d4a *tests/testthat/test-verify-unicode-false.txt 28606b7dd874612a7e8ed0c1e0b0ab4d *tests/testthat/test-verify-unicode-true.txt f5af1f7fab9ca0f74cabd7e5eb913cb7 *tests/testthat/test-warning/test-warning.R a1b399327ceb9912efd8d151d9149fb7 *tests/testthat/test-watcher.R a6efbc07fba2e30ed4da6b6806873548 *tests/testthat/test_dir/helper_hello.R 78eae8b642ede48d5023c6e6abe463fc *tests/testthat/test_dir/test-bare-expectations.R ba645a6fc60ca2944390c7503cdffcfb *tests/testthat/test_dir/test-basic.R e9e88c0f432cda4fcd7b18029a976d08 *tests/testthat/test_dir/test-empty.R 6af43cbf46ca3b343269f62408f86b28 *tests/testthat/test_dir/test-errors.R 293aeab21289b01d552cfee428197607 *tests/testthat/test_dir/test-failures.R be7cdfaf36144fd3faf4b5ecd6590673 *tests/testthat/test_dir/test-helper.R d60e93fdd0bbe19790181b8b4cd75b16 *tests/testthat/test_dir/test-skip.R 703ea03463d04859caee9ec7be0ebfed *tests/testthat/too-many-failures.R 51e992e553c268305920c6522b0b284e *tests/testthat/utf8.R 9674460b911257f8a2de15ed1a900b14 *tests/testthat/width-80.txt f1f467f495c3579da4b55332fa07a8c3 *vignettes/custom-expectation.Rmd 7e876d3d1dd249fa7452e42f93ce6238 *vignettes/parallel.Rmd d67a684934b84d11edf5e52ad8fa4fe2 *vignettes/review-image.png b8d2d88c269e3edce142c51baf7af114 *vignettes/review-text.png 712386791b14344a3ab7b2c5a0da63a9 *vignettes/skipping.Rmd 7d89879aac94338a894440abe4a2a4ae *vignettes/snapshotting.Rmd 269cedcd7be1757537bd025225e7d340 *vignettes/test-fixtures.Rmd 3103ff73d02560f0d4fdff7f3408df9f *vignettes/third-edition.Rmd testthat/inst/0000755000176200001440000000000014172347710013075 5ustar liggesuserstestthat/inst/examples/0000755000176200001440000000000014164710002014700 5ustar liggesuserstestthat/inst/examples/test-failure.R0000644000176200001440000000025014164710002017424 0ustar liggesusersplus <- function(x, y) 1 + 1 test_that("one plus one is two", { expect_equal(plus(1, 1), 2) }) test_that("two plus two is four", { expect_equal(plus(2, 2), 4) }) testthat/inst/examples/test-success.R0000644000176200001440000000051414164710002017450 0ustar liggesuserstest_that("one plus one is two", { expect_equal(1 + 1, 2) }) test_that("you can skip tests if needed", { skip("This test hasn't been written yet") }) test_that("some tests have warnings", { expect_equal(log(-1), NaN) }) test_that("some more successes just to pad things out", { expect_true(TRUE) expect_false(FALSE) }) testthat/inst/doc/0000755000176200001440000000000014172347705013646 5ustar liggesuserstestthat/inst/doc/snapshotting.Rmd0000644000176200001440000002434214164723442017035 0ustar liggesusers--- title: "Snapshot tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Snapshot tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) set.seed(1014) ``` The goal of a unit test is to record the expected output of a function using code. This is a powerful technique because not only does it ensure that code doesn't change unexpectedly, it also expresses the desired behaviour in a way that a human can understand. However, it's not always convenient to record the expected behaviour with code. Some challenges include: - Text output that includes many characters like quotes and newlines that require special handling in a string. - Output that is large, making it painful to define the reference output, and bloating the size of the test file and making it hard to navigate. - Binary formats like plots or images, which are very difficult to describe in code: i.e. the plot looks right, the error message is useful to a human, the print method uses colour effectively. For these situations, testthat provides an alternative mechanism: snapshot tests. Instead of using code to describe expected output, snapshot tests (also known as [golden tests](https://ro-che.info/articles/2017-12-04-golden-tests)) record results in a separate human readable file. Snapshot tests in testthat are inspired primarily by [Jest](https://jestjs.io/docs/en/snapshot-testing), thanks to a number of very useful discussions with Joe Cheng. ```{r setup} library(testthat) ``` ```{r include = FALSE} snapper <- local_snapshotter() snapper$start_file("snapshotting.Rmd", "test") ``` ## Basic workflow We'll illustrate the basic workflow with a simple function that generates an HTML heading. It can optionally include an `id` attribute, which allows you to construct a link directly to that heading. ```{r} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } cat(bullets("a", id = "x")) ``` Testing this simple function is relatively painful. To write the test you have to carefully escape the newlines and quotes. And then when you re-read the test in the future, all that escaping makes it hard to tell exactly what it's supposed to return. ```{r} test_that("bullets", { expect_equal(bullets("a"), "
      \n
    • a
    • \n
    \n") expect_equal(bullets("a", id = "x"), "
      \n
    • a
    • \n
    \n") }) ``` This is a great place to use snapshot testing. To do this we make two changes to our code: - We use `expect_snapshot()` instead of `expect_equal()` - We wrap the call in `cat()` (to avoid `[1]` in the output, like in my first interactive example). This yields the following test: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r, include = FALSE} # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` When we run the test for the first time, it automatically generates reference output, and prints it, so that you can visually confirm that it's correct. The output is automatically saved in `_snaps/{name}.R`. The name of the snapshot matches your test file name --- e.g. if your test is `test-pizza.R` then your snapshot will be saved in `test/testthat/_snaps/pizza.md`. As the file name suggests, this is a markdown file, which I'll explain shortly. If you run the test again, it'll succeed: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r, include = FALSE} # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` But if you change the underlying code, say to tweak the indenting, the test will fail: ```{r, error = TRUE} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` If this is a deliberate change, you can follow the advice in the message and update the snapshots for that file by running `snapshot_accept("pizza")`; otherwise you can fix the bug and your tests will pass once more. (You can also accept snapshot for all files with `snapshot_accept()`). ### Snapshot format Snapshots are recorded using a subset of markdown. You might wonder why we use markdown? It's important that snapshots be readable by humans, because humans have to look at it during code reviews. Reviewers often don't run your code but still want to understand the changes. Here's the snapshot file generated by the test above: ``` md # bullets
    • a
    ---
    • a
    ``` Each test starts with `# {test name}`, a level 1 heading. Within a test, each snapshot expectation is indented by four spaces, i.e. as code, and are separated by `---`, a horizontal rule. ### Interactive usage Because the snapshot output uses the name of the current test file and the current test, snapshot expectations don't really work when run interactively at the console. Since they can't automatically find the reference output, they instead just print the current value for manual inspection. ## Other types of output So far we've focussed on snapshot tests for output printed to the console. But `expect_snapshot()` also captures messages, errors, and warnings. The following function generates a some output, a message, and a warning: ```{r} f <- function() { print("Hello") message("Hi!") warning("How are you?") } ``` And `expect_snapshot()` captures them all: ```{r} test_that("f() makes lots of noice", { expect_snapshot(f()) }) ``` Capturing errors is *slightly* more difficult because `expect_snapshot()` will fail when there's an error: ```{r, error = TRUE} test_that("you can't add a number and a letter", { expect_snapshot(1 + "a") }) ``` This is a safety valve that ensures that you don't accidentally write broken code. To deliberately snapshot an error, you'll have to specifically request it with `error = TRUE`: ```{r} test_that("you can't add a number and a letter", { expect_snapshot(1 + "a", error = TRUE) }) ``` When the code gets longer, I like to put `error = TRUE` up front so it's a little more obvious: ```{r} test_that("you can't add weird thngs", { expect_snapshot(error = TRUE, { 1 + "a" mtcars + iris mean + sum }) }) ``` ## Other types of snapshot `expect_snapshot()` is the most used snapshot function because it records everything: the code you run, printed output, messages, warnings, and errors. But sometimes you just want to capture the output or errors in which you might want to use `expect_snapshot_output()` or `expect_snapshot_error()`. Or rather than caring about side-effects, you may want to check that the value of an R object stays the same. In this case, you can use `expect_snapshot_value()` which offers a number of serialisation approaches that provide a tradeoff between accuracy and human readability. ## Whole file snapshotting `expect_snapshot()`, `expect_snapshot_output()`, `expect_snapshot_error()`, and `expect_snapshot_value()` all store their snapshots in a single file per test. But that doesn't work for all file types --- for example, what happens if you want to snapshot an image? `expect_snapshot_file()` provides an alternative workflow that generates one snapshot per expectation, rather than one file per test. Assuming you're in `test-burger.R` then the snapshot created by `expect_snapshot_file(code_that_returns_path_to_file(), "toppings.png")` would be saved in `tests/testthat/_snaps/burger/toppings.png`. If a future change in the code creates a different file it will be saved in `tests/testthat/_snaps/burger/toppings.new.png`. Unlike `expect_snapshot()` and friends, `expect_snapshot_file()` can't provide an automatic diff when the test fails. Instead you'll need to call `snapshot_review()`. This launches a Shiny app that allows you to visually review each change and approve it if it's deliberate: ![](review-image.png) ![](review-text.png) The display varies based on the file type (currently text files, common image files, and csv files are supported). Sometimes the failure occurs in a non-interactive environment where you can't run `snapshot_review()`, e.g. in `R CMD check`. In this case, the easiest fix is to retrieve the `.new` file, copy it into the appropriate directory, then run `snapshot_review()` locally. If your code was run on a CI platform, you'll need to start by downloading the run "artifact", which contains the check folder. In most cases, we don't expect you to use `expect_snapshot_file()` directly. Instead, you'll use it via a wrapper that does its best to gracefully skip tests when differences in platform or package versions make it unlikely to generate perfectly reproducible output. ## Previous work This is not the first time that testthat has attempted to provide snapshot testing (although it's the first time I knew what other languages called them). This section describes some of the previous attempts and why we believe the new approach is better. - `verify_output()` has three main drawbacks: - You have to supply a path where the output will be saved. This seems like a small issue, but thinking of a good name, and managing the difference between interactive and test-time paths introduces a suprising amount of friction. - It always overwrites the previous result; automatically assuming that the changes are correct. That means you have to use it with git and it's easy to accidentally accept unwanted changes. - It's relatively coarse grained, which means tests that use it tend to keep growing and growing. - `expect_known_output()` is finer grained version of `verify_output()` that captures output from a single function. The requirement to produce a path for each individual expectation makes it even more painful to use. - `expect_known_value()` and `expect_known_hash()` have all the disadvantages of `expect_known_output()`, but also produce binary output meaning that you can't easily review test differences in pull requests. testthat/inst/doc/parallel.Rmd0000644000176200001440000002135514164710003016076 0ustar liggesusers--- title: "Running tests in parallel" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Running tests in parallel} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} editor_options: markdown: wrap: sentence --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` To take advantage of parallel tests, add the following line to the `DESCRIPTION`: Config/testthat/parallel: true You'll also need to be using the 3rd edition: Config/testthat/edition: 3 ## Basic operation Starting a new R process is relatively expensive, so testthat begins by creating a pool of workers. The size of the pool will be determined by `getOption("Ncpus")`, then the `TESTTHAT_CPUS` envvar. If neither are set, then two processes are started. In any case, testthat will never start more subprocesses than test files. Each worker begins by loading testthat and the package being tested. It then runs any setup files (so if you have existing setup files you'll need to make sure they work when executed in parallel). testthat runs test *files* in parallel. Once the worker pool is initialized, testthat then starts sending test files to workers, by default in alphabetical order: as soon as a subprocess has finished, it receives another file, until all files are done. This means that state is persisted across test files: options are *not* reset, loaded packages are *not* unloaded, the global environment is *not* cleared, etc. You are responsible for making sure each file leaves the world as it finds it. Because files are run in alphabetical order, you may want to rename your slowest test files so that they start first, e.g. `test-1-slowest.R`, `test-2-next-slowest.R`, etc. ## Common problems - If tests fail stochastically (i.e. they sometimes work and sometimes fail) you may have accidentally introduced a dependency between your test files. This sort of dependency is hard to track down due to the random nature, and you'll need to check all tests to make sure that they're not accidentally changing global state. - If you use [packaged scope test fixtures](https://testthat.r-lib.org/articles/test-fixtures.html#package), you'll need to review them to make sure that they work in parallel. For example, if you were previously creating a temporary database in the test directory, you'd need to instead create it in the session temporary directory so that each process gets its own independent version. ## Performance There is some overhead associated with running tests in parallel: - Startup cost is linear in the number of subprocesses, because we need to create them in a loop. This is about 50ms on my laptop. Each subprocess needs to load testthat and the tested package, this happens in parallel, and we cannot do too much about it. - Clean up time is again linear in the number of subprocesses, and it about 80ms per subprocess on my laptop. - It seems that sending a message (i.e. a passing or failing expectation) is about 2ms currently. This is the total cost that includes sending the message, receiving it, and replying it to a non-parallel reporter. This overhead generally means that if you have many test files that take a short amount of time, you're unlikely to see a huge benefit by using parallel tests. For example, testthat itself takes about 10s to run tests in serial, and 8s to run the tests in parallel. ### Changing the order of the test files By default testthat starts the test files in alphabetical order. If you have a few number of test files that take longer than the rest, then this might not be the best order. Ideally the slow files would start first, as the whole test suite will take at least as much time as its slowest test file. You can change the order with the `Config/testthat/start-first` option in `DESCRIPTION`. For example testthat currently has: Config/testthat/start-first: watcher, parallel* The format is a comma separated list of glob patterns, see `?utils::glob2rx`. The matching test files will start first. (The `test-` prefix is ignored.) ## Reporters ### Default reporters See `default_reporter()` for how testthat selects the default reporter for `devtools::test()` and `testthat::test_local()`. In short, testthat selects `ProgressReporter` for non-parallel and `ParallelProgressReporter` for parallel tests by default. (Other testthat test functions, like `test_check()`, `test_file()` , etc. select different reporters by default.) ### Parallel support Most reporters support parallel tests. If a reporter is passed to `devtools::test()`, `testthat::test_dir()`, etc. directly, and it does not support parallel tests, then testthat runs the test files sequentially. Currently the following reporters *don't* support parallel tests: - `DebugReporter`, because it is not currently possible to debug subprocesses. - `JunitReporter`, because this reporter records timing information for each test block, and this is currently only available for reporters that support multiple active test files. (See "Writing parallel reporters" below.) - `LocationReporter` because testthat currently does not include location information for successful tests when running in parallel, to minimize messaging between the processes. - `StopReporter`, as this is a reporter that testthat uses for interactive `expect_that()` calls. The other built-in reporters all support parallel tests, with some subtle differences: - Reporters that stop after a certain number of failures can only stop at the end of a test file. - Reporters report all information about a file at once, unless they support *parallel updates*. E.g. `ProgressReporter` does not update its display until a test file is complete. - The standard output and standard error, i.e. `print()`, `cat()`, `message()`, etc. output from the test files are lost currently. If you want to use `cat()` or `message()` for print-debugging test cases, then the best is to temporarily run tests sequentially, by changing the `Config` entry in `DESCRIPTION` or selecting a non-parallel reporter, e.g. the `CheckReporter`: ``` {.r} devtools::test(filter = "badtest", reporter = "check") ``` ### Writing parallel reporters To support parallel tests, a reporter must be able to function when the test files run in a subprocess. For example `DebugReporter` does not support parallel tests, because it requires direct interaction with the frames in the subprocess. When running in parallel, testthat does not provide location information (source references) for test successes. To support parallel tests, a reporter must set `self$capabilities$parallel_support` to `TRUE` in its `initialize()` method: ``` {.r} ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE ... } ... ``` When running in parallel, testthat runs the reporter in the main process, and relays information between the reporter and the test code transparently. (Currently the reporter does not even know that the tests are running in parallel.) If a reporter does not support parallel updates (see below), then testthat internally caches all calls to the reporter methods from subprocesses, until a test file is complete. This is because these reporters are not prepared for running multiple test files concurrently. Once a test file is complete, testthat calls the reporter's `$start_file()` method, relays all `$start_test()` , `$end_test()`, `$add_result()`, etc. calls in the order they came in from the subprocess, and calls `$end_file()` . ### Parallel updates The `ParallelProgressReporter` supports parallel updates. This means that once a message from a subprocess comes in, the reporter is updated immediately. For this to work, a reporter must be able to handle multiple test files concurrently. A reporter declares parallel update support by setting `self$capabilities$parallel_updates` to `TRUE`: ``` {.r} ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$capabilities$parallel_updates <- TRUE ... } ... ``` For these reporters, testthat does not cache the messages from the subprocesses. Instead, when a message comes in: - It calls the `$start_file()` method, letting the reporter know which file the following calls apply to. This means that the reporter can receive multiple `$start_file()` calls for the same file. - Then relays the message from the subprocess, calling the appropriate `$start_test()` , `$add_result()`, etc. method. testthat also calls the new `$update()` method of the reporter regularly, even if it does not receive any messages from the subprocess. (Currently aims to do this every 100ms, but there are no guarantees.) The `$update()` method may implement a spinner to let the user know that the tests are running. testthat/inst/doc/test-fixtures.html0000644000176200001440000012677714172347704017405 0ustar liggesusers Test fixtures

    Test fixtures

    Test hygiene

    Take nothing but memories, leave nothing but footprints.

    ― Chief Si’ahl

    Ideally, a test should leave the world exactly as it found it. But you often need to make some changes in order to exercise every part of your code:

    • Create a file or directory
    • Create a resource on an external system
    • Set an R option
    • Set an environment variable
    • Change working directory
    • Change an aspect of the tested package’s state

    How can you clean up these changes to get back to a clean slate? Scrupulous attention to cleanup is more than just courtesy or being fastidious. It is also self-serving. The state of the world after test i is the starting state for test i + 1. Tests that change state willy-nilly eventually end up interfering with each other in ways that can be very difficult to debug.

    Most tests are written with an implicit assumption about the starting state, usually whatever tabula rasa means for the target domain of your package. If you accumulate enough sloppy tests, you will eventually find yourself asking the programming equivalent of questions like “Who forgot to turn off the oven?” and “Who didn’t clean up after the dog?”.

    It’s also important that your setup and cleanup is easy to use when working interactively. When a test fails, you want to be able to quickly recreate the exact environment in which the test is run so you can interactively experiment to figure out what went wrong.

    This article introduces a powerful technique that allows you to solve both problems: test fixtures. We’ll begin with an introduction to the tools that make fixtures possible, then talk about exactly what a test fixture is, and show a few examples.

    Much of this vignette is derived from https://www.tidyverse.org/blog/2020/04/self-cleaning-test-fixtures/; if this is your first encounter with on.exit() or withr::defer(), I’d recommend starting with that blog as it gives a gentler introduction. This vignette moves a little faster since it’s designed as more of a reference doc.

    library(testthat)

    Foundations

    Before we can talk about test fixtures, we need to lay some foundations to help you understand how they work. We’ll motivate the discussion with a sloppy() function that prints a number with a specific number of significant digits by adjusting an R option:

    sloppy <- function(x, sig_digits) {
      options(digits = sig_digits)
      print(x)
    }
    
    pi
    #> [1] 3.141593
    sloppy(pi, 2)
    #> [1] 3.1
    pi
    #> [1] 3.1

    Notice how pi prints differently before and after the call to sloppy(). Calling sloppy() has a side effect: it changes the digits option globally, not just within its own scope of operations. This is what we want to avoid1.

    on.exit()

    The first function you need to know about is base R’s on.exit(). on.exit() calls the code to supplied to its first argument when the current function exits, regardless of whether it returns a value or errors. You can use on.exit() to clean up after yourself by ensuring that every mess-making function call is paired with an on.exit() call that cleans up.

    We can use this idea to turn sloppy() into neat():

    neat <- function(x, sig_digits) {
      op <- options(digits = sig_digits)
      on.exit(options(op), add = TRUE, after = FALSE)
      print(x)
    }
    
    pi
    #> [1] 3.141593
    neat(pi, 2)
    #> [1] 3.1
    pi
    #> [1] 3.141593

    Here we make use of a useful pattern options() implements: when you call options(digits = sig_digits) it both sets the digits option and (invisibly) returns the previous value of digits. We can then use that value to restore the previous options.

    on.exit() also works in tests:

    test_that("can print one digit of pi", {
      op <- options(digits = 1)
      on.exit(options(op), add = TRUE, after = FALSE)
      
      expect_output(print(pi), "3")
    })
    #> Test passed 🌈
    pi
    #> [1] 3.141593

    There are three main drawbacks to on.exit():

    • You should always call it with add = TRUE and after = FALSE. These ensure that the call is added to the list of deferred tasks (instead of replaces) and is added to the front of the stack (not the back, so that cleanup occurs in reverse order to setup). These arguments only matter if you’re using multiple on.exit() calls, but it’s a good habit to always use them to avoid potential problems down the road.

    • It doesn’t work outside a function or test. If you run the following code in the global environment, you won’t get an error, but the cleanup code will never be run:

      op <- options(digits = 1)
      on.exit(options(op), add = TRUE, after = FALSE)

      This is annoying when you are running tests interactively.

    • You can’t program with it; on.exit() always works inside the current function so you can’t wrap up repeated on.exit() code in a helper function.

    To resolve these drawbacks, we use withr::defer().

    withr::defer()

    withr::defer() resolves the main drawbacks of on.exit(). First, it has the behaviour we want by default; no extra arguments needed:

    neat <- function(x, sig_digits) {
      op <- options(digits = sig_digits)
      withr::defer(options(op))
      print(x)
    }

    Second, it works when called in the global environment. Since the global environment isn’t perishable, like a test environment is, you have to call deferred_run() explicitly to execute the deferred events. You can also clear them, without running, with deferred_clear().

    withr::defer(print("hi"))
    #> Setting deferred event(s) on global environment.
    #>   * Execute (and clear) with `deferred_run()`.
    #>   * Clear (without executing) with `deferred_clear()`.
    
    withr::deferred_run()
    #> [1] "hi"
    #> [1] "hi"

    Finally, withr::defer() lets you pick which function to bind the clean up behaviour too. This makes it possible to create helper functions.

    “Local” helpers

    Imagine we have many functions where we want to temporarily set the digits option. Wouldn’t it be nice if we could write a helper function to automate? Unfortunately we can’t write a helper with on.exit():

    local_digits <- function(sig_digits) {
      op <- options(digits = sig_digits)
      on.exit(options(op), add = TRUE, after = FALSE)
    }
    neater <- function(x, sig_digits) {
      local_digits(1)
      print(x)
    }
    neater(pi)
    #> [1] 3.141593

    This code doesn’t work because the cleanup happens too soon, when local_digits() exists, not when neat() finishes.

    Fortunately, withr::defer() allows us to solve this problem by providing an envir argument that allows you to control when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there’s a common pattern you can use without understanding all the details. Your helper function should always have an env argument that defaults to parent.frame(), which you pass to the second argument of defer():

    local_digits <- function(sig_digits, env = parent.frame()) {
      op <- options(digits = sig_digits)
      withr::defer(options(op), env)
    }
    
    neater(pi)
    #> [1] 3

    Just like on.exit() and defer(), our helper also works within tests:

    test_that("withr lets us write custom helpers for local state manipulation", {
      local_digits(1)
      expect_output(print(exp(1)), "3")
      
      local_digits(3)
      expect_output(print(exp(1)), "2.72")
    })
    #> Test passed 🎊
    
    print(exp(1))
    #> [1] 2.718282

    We always call these helper functions local_; “local” here refers to the fact that the state change persists only locally, for the lifetime of the associated function or test.

    Pre-existing helpers

    But before you write your own helper function, make sure to check out the wide range of local functions already provided by withr:

    Do / undo this withr function
    Create a file local_file()
    Set an R option local_options()
    Set an environment variable local_envvar()
    Change working directory local_dir()

    We can use withr::local_options() to write yet another version of neater():

    neatest <- function(x, sig_digits) {
      withr::local_options(list(digits = sig_digits))
      print(x)
    }
    neatest(pi, 3)
    #> [1] 3.14

    Each local_*() function has a companion with_() function, which is a nod to with(), and the inspiration for withr’s name. We won’t use the with_*() functions here, but you can learn more about them at withr.r-lib.org.

    Test fixtures

    Testing is often demonstrated with cute little tests and functions where all the inputs and expected results can be inlined. But in real packages, things aren’t always so simple and functions often depend on other global state. For example, take this variant on message() that only shows a message if the verbose option is TRUE. How would you test that setting the option does indeed silence the message?

    message2 <- function(...) {
      if (!isTRUE(getOption("verbose"))) {
        return()
      }
      message(...)
    }

    In some cases, it’s possible to make the global state an explicit argument to the function. For example, we could refactor message2() to make the verbosity an explicit argument:

    message3 <- function(..., verbose = getOption("verbose")) {
      if (!isTRUE(verbose)) {
        return()
      }
      message(...)
    }

    Making external state explicit is often worthwhile, because it makes it more clear exactly what inputs determine the outputs of your function. But it’s simply not possible in many cases. That’s where test fixtures come in: they allow you to temporarily change global state in order to test your function. Test fixture is a pre-existing term in the software engineering world (and beyond):

    A test fixture is something used to consistently test some item, device, or piece of software.

    Wikipedia

    A test fixture is just a local_ function that you use to change state in such a way that you can reach inside and test parts of your code that would otherwise be challenging. For example, here’s how you could use withr::local_options() as a test fixture to test message2():

    test_that("message2() output depends on verbose option", {
      withr::local_options(verbose = TRUE)
      expect_message(message2("Hi!"))
      
      withr::local_options(verbose = FALSE)
      expect_message(message2("Hi!"), NA)
    })
    #> Test passed 🥇

    Case study: usethis

    One place that we use test fixtures extensively is in the usethis package (usethis.r-lib.org), which provides functions for looking after the files and folders in R projects, especially packages. Many of these functions only make sense in the context of a package, which means to test them, we also have to be working inside an R package. We need a way to quickly spin up a minimal package in a temporary directory, then test some functions against it, then destroy it.

    To solve this problem we create a test fixture, which we place in R/test-helpers.R so that’s it’s available for both testing and interactive experimentation:

    local_create_package <- function(dir = file_temp(), env = parent.frame()) {
      old_project <- proj_get_()
      
      # create new folder and package
      create_package(dir, open = FALSE) # A
      withr::defer(fs::dir_delete(dir), envir = env) # -A
      
      # change working directory
      setwd(dir) # B
      withr::defer(setwd(old_project), envir = env) # -B
      
      # switch to new usethis project
      proj_set(dir) # C
      withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C
      
      dir
    }

    Note that the cleanup automatically unfolds in the opposite order from the setup. Setup is A, then B, then C; cleanup is -C, then -B, then -A. This is important because we must create directory dir before we can make it the working directory; and we must restore the original working directory before we can delete dir; we can’t delete dir while it’s still the working directory!

    local_create_package() is used in over 170 tests. Here’s one example that checks that usethis::use_roxygen_md() does the setup necessary to use roxygen2 in a package, with markdown support turned on. All 3 expectations consult the DESCRIPTION file, directly or indirectly. So it’s very convenient that local_create_package() creates a minimal package, with a valid DESCRIPTION file, for us to test against. And when the test is done — poof! — the package is gone.

    test_that("use_roxygen_md() adds DESCRIPTION fields", {
      pkg <- local_create_package()
      use_roxygen_md()
      
      expect_true(uses_roxygen_md())
      expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)"))
      expect_true(desc::desc_has_fields("RoxygenNote", pkg))
    })

    Scope

    So far we have applied our test fixture to individual tests, but it’s also possible to apply them to a file or package.

    File

    If you move the local_() call outside of a test_that() block, it will affect all tests that come after it. This means that by calling the test fixture at the top of the file you can change the behaviour for all tests. This has both advantages and disadvantages:

    • If you would otherwise have called the fixture in every test, you’ve saved yourself a bunch of work and duplicate code.

    • But on the downside, if you a test fails and you want to recreate the failure in an interactive environment so you can debug, you need to remember to run all the setup code at the top of the file first.

    Generally, I think it’s better to copy and paste test fixtures across many tests — sure, it adds some duplication to your code, but it makes debugging test failures so much easier.

    Package

    To run code before any test is run, you can create a file called test/testthat/setup.R. If the code in this file needs clean up, you can use the special teardown_env():

    # Run before any test
    write.csv("mtcars.csv", mtcars)
    
    # Run after all tests
    withr::defer(unlink("mtcars.csv"), teardown_env())

    Setup code is typically best used to create external resources that are needed by many tests. It’s best kept to a minimum because you will have to manually run it before interactively debugging tests.

    Other challenges

    A collection of miscellaneous problems that I don’t know where else to describe:

    • There are a few base functions that are hard to test because they depend on state that you can’t control. One such example is interactive(): there’s no way to write a test fixture that allows you to pretend that interactive is either TRUE or FALSE. So we now usually use rlang::is_interactive() which can be controlled by the rlang_interactive option.

    • If you’re using a test fixture in a function, be careful about what you return. For example, if you write a function that does dir <- create_local_package() you shouldn’t return dir, because after the function returns the directory will no longer exist.


    1. Don’t worry, I’m restoring global state (specifically, the digits option) behind the scenes here.↩︎

    testthat/inst/doc/snapshotting.html0000644000176200001440000075072514172347704017274 0ustar liggesusers Snapshot tests

    Snapshot tests

    The goal of a unit test is to record the expected output of a function using code. This is a powerful technique because not only does it ensure that code doesn’t change unexpectedly, it also expresses the desired behaviour in a way that a human can understand.

    However, it’s not always convenient to record the expected behaviour with code. Some challenges include:

    • Text output that includes many characters like quotes and newlines that require special handling in a string.

    • Output that is large, making it painful to define the reference output, and bloating the size of the test file and making it hard to navigate.

    • Binary formats like plots or images, which are very difficult to describe in code: i.e. the plot looks right, the error message is useful to a human, the print method uses colour effectively.

    For these situations, testthat provides an alternative mechanism: snapshot tests. Instead of using code to describe expected output, snapshot tests (also known as golden tests) record results in a separate human readable file. Snapshot tests in testthat are inspired primarily by Jest, thanks to a number of very useful discussions with Joe Cheng.

    library(testthat)

    Basic workflow

    We’ll illustrate the basic workflow with a simple function that generates an HTML heading. It can optionally include an id attribute, which allows you to construct a link directly to that heading.

    bullets <- function(text, id = NULL) {
      paste0(
        "<ul", if (!is.null(id)) paste0(" id=\"", id, "\""), ">\n", 
        paste0("  <li>", text, "</li>\n", collapse = ""),
        "</ul>\n"
      )
    }
    cat(bullets("a", id = "x"))
    #> <ul id="x">
    #>   <li>a</li>
    #> </ul>

    Testing this simple function is relatively painful. To write the test you have to carefully escape the newlines and quotes. And then when you re-read the test in the future, all that escaping makes it hard to tell exactly what it’s supposed to return.

    test_that("bullets", {
      expect_equal(bullets("a"), "<ul>\n  <li>a</li>\n</ul>\n")
      expect_equal(bullets("a", id = "x"), "<ul id=\"x\">\n  <li>a</li>\n</ul>\n")
    })
    #> Test passed 🥇

    This is a great place to use snapshot testing. To do this we make two changes to our code:

    • We use expect_snapshot() instead of expect_equal()

    • We wrap the call in cat() (to avoid [1] in the output, like in my first interactive example).

    This yields the following test:

    test_that("bullets", {
      expect_snapshot(cat(bullets("a")))
      expect_snapshot(cat(bullets("a", "b")))
    })
    #> ── Warning (<text>:2:3): bullets ───────────────────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   cat(bullets("a"))
    #> Output
    #>   <ul>
    #>     <li>a</li>
    #>   </ul>
    #> 
    #> ── Warning (<text>:3:3): bullets ───────────────────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   cat(bullets("a", "b"))
    #> Output
    #>   <ul id="b">
    #>     <li>a</li>
    #>   </ul>

    When we run the test for the first time, it automatically generates reference output, and prints it, so that you can visually confirm that it’s correct. The output is automatically saved in _snaps/{name}.R. The name of the snapshot matches your test file name — e.g. if your test is test-pizza.R then your snapshot will be saved in test/testthat/_snaps/pizza.md. As the file name suggests, this is a markdown file, which I’ll explain shortly.

    If you run the test again, it’ll succeed:

    test_that("bullets", {
      expect_snapshot(cat(bullets("a")))
      expect_snapshot(cat(bullets("a", "b")))
    })
    #> Test passed 🎊

    But if you change the underlying code, say to tweak the indenting, the test will fail:

    bullets <- function(text, id = NULL) {
      paste0(
        "<ul", if (!is.null(id)) paste0(" id=\"", id, "\""), ">\n", 
        paste0("<li>", text, "</li>\n", collapse = ""),
        "</ul>\n"
      )
    }
    test_that("bullets", {
      expect_snapshot(cat(bullets("a")))
      expect_snapshot(cat(bullets("a", "b")))
    })
    #> ── Failure (<text>:9:3): bullets ───────────────────────────────────────────────
    #> Snapshot of code has changed:
    #> `old[2:6]`: "  cat(bullets(\"a\"))" "Output" "  <ul>" "    <li>a</li>" "  </ul>"
    #> `new[2:6]`: "  cat(bullets(\"a\"))" "Output" "  <ul>" "  <li>a</li>"   "  </ul>"
    #> 
    #> * Run `snapshot_accept('snapshotting.Rmd')` to accept the change
    #> * Run `snapshot_review('snapshotting.Rmd')` to interactively review the change
    #> 
    #> ── Failure (<text>:10:3): bullets ──────────────────────────────────────────────
    #> Snapshot of code has changed:
    #>     old                            | new                               
    #> [2] "  cat(bullets(\"a\", \"b\"))" | "  cat(bullets(\"a\", \"b\"))" [2]
    #> [3] "Output"                       | "Output"                       [3]
    #> [4] "  <ul id=\"b\">"              | "  <ul id=\"b\">"              [4]
    #> [5] "    <li>a</li>"               - "  <li>a</li>"                 [5]
    #> [6] "  </ul>"                      | "  </ul>"                      [6]
    #> 
    #> * Run `snapshot_accept('snapshotting.Rmd')` to accept the change
    #> * Run `snapshot_review('snapshotting.Rmd')` to interactively review the change
    #> Error: Test failed

    If this is a deliberate change, you can follow the advice in the message and update the snapshots for that file by running snapshot_accept("pizza"); otherwise you can fix the bug and your tests will pass once more. (You can also accept snapshot for all files with snapshot_accept()).

    Snapshot format

    Snapshots are recorded using a subset of markdown. You might wonder why we use markdown? It’s important that snapshots be readable by humans, because humans have to look at it during code reviews. Reviewers often don’t run your code but still want to understand the changes.

    Here’s the snapshot file generated by the test above:

    # bullets
    
        <ul>
          <li>a</li>
        </ul>
      
    ---
    
        <ul id="x">
          <li>a</li>
        </ul>

    Each test starts with # {test name}, a level 1 heading. Within a test, each snapshot expectation is indented by four spaces, i.e. as code, and are separated by ---, a horizontal rule.

    Interactive usage

    Because the snapshot output uses the name of the current test file and the current test, snapshot expectations don’t really work when run interactively at the console. Since they can’t automatically find the reference output, they instead just print the current value for manual inspection.

    Other types of output

    So far we’ve focussed on snapshot tests for output printed to the console. But expect_snapshot() also captures messages, errors, and warnings. The following function generates a some output, a message, and a warning:

    f <- function() {
      print("Hello")
      message("Hi!")
      warning("How are you?")
    }

    And expect_snapshot() captures them all:

    test_that("f() makes lots of noice", {
      expect_snapshot(f())
    })
    #> ── Warning (<text>:2:3): f() makes lots of noice ───────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   f()
    #> Output
    #>   [1] "Hello"
    #> Message <simpleMessage>
    #>   Hi!
    #> Warning <simpleWarning>
    #>   How are you?

    Capturing errors is slightly more difficult because expect_snapshot() will fail when there’s an error:

    test_that("you can't add a number and a letter", {
      expect_snapshot(1 + "a")
    })
    #> ── Error (<text>:2:3): you can't add a number and a letter ─────────────────────
    #> `1 + "a"` threw an unexpected error.
    #> Message: non-numeric argument to binary operator
    #> Class:   simpleError/error/condition
    #> Error: Test failed

    This is a safety valve that ensures that you don’t accidentally write broken code. To deliberately snapshot an error, you’ll have to specifically request it with error = TRUE:

    test_that("you can't add a number and a letter", {
      expect_snapshot(1 + "a", error = TRUE)
    })
    #> ── Warning (<text>:2:3): you can't add a number and a letter ───────────────────
    #> Adding new snapshot:
    #> Code
    #>   1 + "a"
    #> Error <simpleError>
    #>   non-numeric argument to binary operator

    When the code gets longer, I like to put error = TRUE up front so it’s a little more obvious:

    test_that("you can't add weird thngs", {
      expect_snapshot(error = TRUE, {
        1 + "a"
        mtcars + iris
        mean + sum
      })
    })
    #> ── Warning (<text>:2:3): you can't add weird thngs ─────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   1 + "a"
    #> Error <simpleError>
    #>   non-numeric argument to binary operator
    #> Code
    #>   mtcars + iris
    #> Error <simpleError>
    #>   '+' only defined for equally-sized data frames
    #> Code
    #>   mean + sum
    #> Error <simpleError>
    #>   non-numeric argument to binary operator

    Other types of snapshot

    expect_snapshot() is the most used snapshot function because it records everything: the code you run, printed output, messages, warnings, and errors. But sometimes you just want to capture the output or errors in which you might want to use expect_snapshot_output() or expect_snapshot_error().

    Or rather than caring about side-effects, you may want to check that the value of an R object stays the same. In this case, you can use expect_snapshot_value() which offers a number of serialisation approaches that provide a tradeoff between accuracy and human readability.

    Whole file snapshotting

    expect_snapshot(), expect_snapshot_output(), expect_snapshot_error(), and expect_snapshot_value() all store their snapshots in a single file per test. But that doesn’t work for all file types — for example, what happens if you want to snapshot an image? expect_snapshot_file() provides an alternative workflow that generates one snapshot per expectation, rather than one file per test. Assuming you’re in test-burger.R then the snapshot created by expect_snapshot_file(code_that_returns_path_to_file(), "toppings.png") would be saved in tests/testthat/_snaps/burger/toppings.png. If a future change in the code creates a different file it will be saved in tests/testthat/_snaps/burger/toppings.new.png.

    Unlike expect_snapshot() and friends, expect_snapshot_file() can’t provide an automatic diff when the test fails. Instead you’ll need to call snapshot_review(). This launches a Shiny app that allows you to visually review each change and approve it if it’s deliberate:

    The display varies based on the file type (currently text files, common image files, and csv files are supported).

    Sometimes the failure occurs in a non-interactive environment where you can’t run snapshot_review(), e.g. in R CMD check. In this case, the easiest fix is to retrieve the .new file, copy it into the appropriate directory, then run snapshot_review() locally. If your code was run on a CI platform, you’ll need to start by downloading the run “artifact”, which contains the check folder.

    In most cases, we don’t expect you to use expect_snapshot_file() directly. Instead, you’ll use it via a wrapper that does its best to gracefully skip tests when differences in platform or package versions make it unlikely to generate perfectly reproducible output.

    Previous work

    This is not the first time that testthat has attempted to provide snapshot testing (although it’s the first time I knew what other languages called them). This section describes some of the previous attempts and why we believe the new approach is better.

    • verify_output() has three main drawbacks:

      • You have to supply a path where the output will be saved. This seems like a small issue, but thinking of a good name, and managing the difference between interactive and test-time paths introduces a suprising amount of friction.

      • It always overwrites the previous result; automatically assuming that the changes are correct. That means you have to use it with git and it’s easy to accidentally accept unwanted changes.

      • It’s relatively coarse grained, which means tests that use it tend to keep growing and growing.

    • expect_known_output() is finer grained version of verify_output() that captures output from a single function. The requirement to produce a path for each individual expectation makes it even more painful to use.

    • expect_known_value() and expect_known_hash() have all the disadvantages of expect_known_output(), but also produce binary output meaning that you can’t easily review test differences in pull requests.

    testthat/inst/doc/skipping.Rmd0000644000176200001440000001116114164710003016120 0ustar liggesusers--- title: "Skipping tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Skipping tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Some times you have tests that you don't want to run in certain circumstances. This vignette describes how to **skip** tests to avoid execution in undesired environments. Skipping is a relatively advanced topic because in most cases you want all your tests to run everywhere. The most common exceptions are: - You're testing a web service that occasionally fails, and you don't want to run the tests on CRAN. Or maybe the API requires authentication, and you can only run the tests when you've [securely distributed](https://gargle.r-lib.org/articles/articles/managing-tokens-securely.html) some secrets. - You're relying on features that not all operating systems possess, and want to make sure your code doesn't run on a platform where it doesn't work. This platform tends to be Windows, since amongst other things, it lacks full utf8 support. - You're writing your tests for multiple versions of R or multiple versions of a dependency and you want to skip when a feature isn't available. You generally don't need to skip tests if a suggested package is not installed. This is only needed in exceptional circumstances, e.g. when a package is not available on some operating system. ```{r setup} library(testthat) ``` ## Basics testthat comes with a variety of helpers for the most common situations: - `skip_on_cran()` skips tests on CRAN. This is useful for slow tests and tests that occasionally fail for reasons outside of your control. - `skip_on_os()` allows you to skip tests on a specific operating system. Generally, you should strive to avoid this as much as possible (so your code works the same on all platforms), but sometimes it's just not possible. - `skip_on_ci()` skips tests on most continuous integration platforms (e.g. GitHub Actions, Travis, Appveyor). You can also easily implement your own using either `skip_if()` or `skip_if_not()`, which both take an expression that should yield a single `TRUE` or `FALSE`. All reporters show which tests as skipped. As of testthat 3.0.0, ProgressReporter (used interactively) and CheckReporter (used inside of `R CMD check`) also display a summary of skips across all tests. It looks something like this: ── Skipped tests ─────────────────────────────────────────────────────── ● No token (3) ● On CRAN (1) You should keep an on eye this when developing interactively to make sure that you're not accidentally skipping the wrong things. ## Helpers If you find yourself using the same `skip_if()`/`skip_if_not()` expression across multiple tests, it's a good idea to create a helper function. This function should start with `skip_` and live somewhere in your `R/` directory. ```{r} skip_if_Tuesday <- function() { if (as.POSIXlt(Sys.Date())$wday != 2) { return(invisible(TRUE)) } skip("Not run on Tuesday") } ``` It's important to test your skip helpers because it's easy to miss if you're skipping more often than desired, and the test code is never run. This is unlikely to happen locally (since you'll see the skipped tests in the summary), but is quite possible in continuous integration. For that reason, it's a good idea to add a test that you skip is activated when you expect. skips are a special type of condition, so you can test for their presence/absence with `expect_condition()`. For example, imagine that you've defined a custom skipper that skips tests whenever an environment variable `DANGER` is set: ```{r} skip_if_dangerous <- function() { if (identical(Sys.getenv("DANGER"), "")) { return(invisible(TRUE)) } skip("Not run in dangerous enviromnents") } ``` Then you can use `expect_condition()` to test that it skips tests when it should, and doesn't skip when it shouldn't: ```{r} test_that("skip_if_dangerous work", { # Test that a skip happens withr::local_envvar(DANGER = "yes") expect_condition(skip_if_dangerous(), class = "skip") # Test that a skip doesn't happen withr::local_envvar(DANGER = "") expect_condition(skip_if_dangerous(), NA, class = "skip") }) ``` Testing `skip_if_Tuesday()` is harder because there's no way to control the skipping from the outside. That means you'd need to "mock" its behaviour in a test, using the [mockery](https://github.com/r-lib/mockery) or [mockr](https://krlmlr.github.io/mockr/) packages. testthat/inst/doc/parallel.R0000644000176200001440000000023314172347702015560 0ustar liggesusers## ----setup, include = FALSE--------------------------------------------------- library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") testthat/inst/doc/parallel.html0000644000176200001440000005107514172347703016336 0ustar liggesusers Running tests in parallel

    Running tests in parallel

    To take advantage of parallel tests, add the following line to the DESCRIPTION:

    Config/testthat/parallel: true

    You’ll also need to be using the 3rd edition:

    Config/testthat/edition: 3

    Basic operation

    Starting a new R process is relatively expensive, so testthat begins by creating a pool of workers. The size of the pool will be determined by getOption("Ncpus"), then the TESTTHAT_CPUS envvar. If neither are set, then two processes are started. In any case, testthat will never start more subprocesses than test files.

    Each worker begins by loading testthat and the package being tested. It then runs any setup files (so if you have existing setup files you’ll need to make sure they work when executed in parallel).

    testthat runs test files in parallel. Once the worker pool is initialized, testthat then starts sending test files to workers, by default in alphabetical order: as soon as a subprocess has finished, it receives another file, until all files are done. This means that state is persisted across test files: options are not reset, loaded packages are not unloaded, the global environment is not cleared, etc. You are responsible for making sure each file leaves the world as it finds it.

    Because files are run in alphabetical order, you may want to rename your slowest test files so that they start first, e.g. test-1-slowest.R, test-2-next-slowest.R, etc.

    Common problems

    • If tests fail stochastically (i.e. they sometimes work and sometimes fail) you may have accidentally introduced a dependency between your test files. This sort of dependency is hard to track down due to the random nature, and you’ll need to check all tests to make sure that they’re not accidentally changing global state.

    • If you use packaged scope test fixtures, you’ll need to review them to make sure that they work in parallel. For example, if you were previously creating a temporary database in the test directory, you’d need to instead create it in the session temporary directory so that each process gets its own independent version.

    Performance

    There is some overhead associated with running tests in parallel:

    • Startup cost is linear in the number of subprocesses, because we need to create them in a loop. This is about 50ms on my laptop. Each subprocess needs to load testthat and the tested package, this happens in parallel, and we cannot do too much about it.

    • Clean up time is again linear in the number of subprocesses, and it about 80ms per subprocess on my laptop.

    • It seems that sending a message (i.e. a passing or failing expectation) is about 2ms currently. This is the total cost that includes sending the message, receiving it, and replying it to a non-parallel reporter.

    This overhead generally means that if you have many test files that take a short amount of time, you’re unlikely to see a huge benefit by using parallel tests. For example, testthat itself takes about 10s to run tests in serial, and 8s to run the tests in parallel.

    Changing the order of the test files

    By default testthat starts the test files in alphabetical order. If you have a few number of test files that take longer than the rest, then this might not be the best order. Ideally the slow files would start first, as the whole test suite will take at least as much time as its slowest test file. You can change the order with the Config/testthat/start-first option in DESCRIPTION. For example testthat currently has:

    Config/testthat/start-first: watcher, parallel*

    The format is a comma separated list of glob patterns, see ?utils::glob2rx. The matching test files will start first. (The test- prefix is ignored.)

    Reporters

    Default reporters

    See default_reporter() for how testthat selects the default reporter for devtools::test() and testthat::test_local(). In short, testthat selects ProgressReporter for non-parallel and ParallelProgressReporter for parallel tests by default. (Other testthat test functions, like test_check(), test_file() , etc. select different reporters by default.)

    Parallel support

    Most reporters support parallel tests. If a reporter is passed to devtools::test(), testthat::test_dir(), etc. directly, and it does not support parallel tests, then testthat runs the test files sequentially.

    Currently the following reporters don’t support parallel tests:

    • DebugReporter, because it is not currently possible to debug subprocesses.

    • JunitReporter, because this reporter records timing information for each test block, and this is currently only available for reporters that support multiple active test files. (See “Writing parallel reporters” below.)

    • LocationReporter because testthat currently does not include location information for successful tests when running in parallel, to minimize messaging between the processes.

    • StopReporter, as this is a reporter that testthat uses for interactive expect_that() calls.

    The other built-in reporters all support parallel tests, with some subtle differences:

    • Reporters that stop after a certain number of failures can only stop at the end of a test file.

    • Reporters report all information about a file at once, unless they support parallel updates. E.g. ProgressReporter does not update its display until a test file is complete.

    • The standard output and standard error, i.e. print(), cat(), message(), etc. output from the test files are lost currently. If you want to use cat() or message() for print-debugging test cases, then the best is to temporarily run tests sequentially, by changing the Config entry in DESCRIPTION or selecting a non-parallel reporter, e.g. the CheckReporter:

      devtools::test(filter = "badtest", reporter = "check")

    Writing parallel reporters

    To support parallel tests, a reporter must be able to function when the test files run in a subprocess. For example DebugReporter does not support parallel tests, because it requires direct interaction with the frames in the subprocess. When running in parallel, testthat does not provide location information (source references) for test successes.

    To support parallel tests, a reporter must set self$capabilities$parallel_support to TRUE in its initialize() method:

    ...
    initialize = function(...) {
      super$initialize(...)
      self$capabilities$parallel_support <- TRUE
      ...
    }
    ...

    When running in parallel, testthat runs the reporter in the main process, and relays information between the reporter and the test code transparently. (Currently the reporter does not even know that the tests are running in parallel.)

    If a reporter does not support parallel updates (see below), then testthat internally caches all calls to the reporter methods from subprocesses, until a test file is complete. This is because these reporters are not prepared for running multiple test files concurrently. Once a test file is complete, testthat calls the reporter’s $start_file() method, relays all $start_test() , $end_test(), $add_result(), etc. calls in the order they came in from the subprocess, and calls $end_file() .

    Parallel updates

    The ParallelProgressReporter supports parallel updates. This means that once a message from a subprocess comes in, the reporter is updated immediately. For this to work, a reporter must be able to handle multiple test files concurrently.

    A reporter declares parallel update support by setting self$capabilities$parallel_updates to TRUE:

    ...
    initialize = function(...) {
      super$initialize(...)
      self$capabilities$parallel_support <- TRUE
      self$capabilities$parallel_updates <- TRUE
      ...
    }
    ...

    For these reporters, testthat does not cache the messages from the subprocesses. Instead, when a message comes in:

    • It calls the $start_file() method, letting the reporter know which file the following calls apply to. This means that the reporter can receive multiple $start_file() calls for the same file.

    • Then relays the message from the subprocess, calling the appropriate $start_test() , $add_result(), etc. method.

    testthat also calls the new $update() method of the reporter regularly, even if it does not receive any messages from the subprocess. (Currently aims to do this every 100ms, but there are no guarantees.) The $update() method may implement a spinner to let the user know that the tests are running.

    testthat/inst/doc/skipping.html0000644000176200001440000004156414172347703016370 0ustar liggesusers Skipping tests

    Skipping tests

    Some times you have tests that you don’t want to run in certain circumstances. This vignette describes how to skip tests to avoid execution in undesired environments. Skipping is a relatively advanced topic because in most cases you want all your tests to run everywhere. The most common exceptions are:

    • You’re testing a web service that occasionally fails, and you don’t want to run the tests on CRAN. Or maybe the API requires authentication, and you can only run the tests when you’ve securely distributed some secrets.

    • You’re relying on features that not all operating systems possess, and want to make sure your code doesn’t run on a platform where it doesn’t work. This platform tends to be Windows, since amongst other things, it lacks full utf8 support.

    • You’re writing your tests for multiple versions of R or multiple versions of a dependency and you want to skip when a feature isn’t available. You generally don’t need to skip tests if a suggested package is not installed. This is only needed in exceptional circumstances, e.g. when a package is not available on some operating system.

    library(testthat)

    Basics

    testthat comes with a variety of helpers for the most common situations:

    • skip_on_cran() skips tests on CRAN. This is useful for slow tests and tests that occasionally fail for reasons outside of your control.

    • skip_on_os() allows you to skip tests on a specific operating system. Generally, you should strive to avoid this as much as possible (so your code works the same on all platforms), but sometimes it’s just not possible.

    • skip_on_ci() skips tests on most continuous integration platforms (e.g. GitHub Actions, Travis, Appveyor).

    You can also easily implement your own using either skip_if() or skip_if_not(), which both take an expression that should yield a single TRUE or FALSE.

    All reporters show which tests as skipped. As of testthat 3.0.0, ProgressReporter (used interactively) and CheckReporter (used inside of R CMD check) also display a summary of skips across all tests. It looks something like this:

    ── Skipped tests  ───────────────────────────────────────────────────────
    ● No token (3)
    ● On CRAN (1)

    You should keep an on eye this when developing interactively to make sure that you’re not accidentally skipping the wrong things.

    Helpers

    If you find yourself using the same skip_if()/skip_if_not() expression across multiple tests, it’s a good idea to create a helper function. This function should start with skip_ and live somewhere in your R/ directory.

    skip_if_Tuesday <- function() {
      if (as.POSIXlt(Sys.Date())$wday != 2) {
        return(invisible(TRUE))
      }
      
      skip("Not run on Tuesday")
    }

    It’s important to test your skip helpers because it’s easy to miss if you’re skipping more often than desired, and the test code is never run. This is unlikely to happen locally (since you’ll see the skipped tests in the summary), but is quite possible in continuous integration.

    For that reason, it’s a good idea to add a test that you skip is activated when you expect. skips are a special type of condition, so you can test for their presence/absence with expect_condition(). For example, imagine that you’ve defined a custom skipper that skips tests whenever an environment variable DANGER is set:

    skip_if_dangerous <- function() {
      if (identical(Sys.getenv("DANGER"), "")) {
        return(invisible(TRUE))
      }
      
      skip("Not run in dangerous enviromnents")
    }

    Then you can use expect_condition() to test that it skips tests when it should, and doesn’t skip when it shouldn’t:

    test_that("skip_if_dangerous work", {
      # Test that a skip happens
      withr::local_envvar(DANGER = "yes")
      expect_condition(skip_if_dangerous(), class = "skip") 
    
      # Test that a skip doesn't happen
      withr::local_envvar(DANGER = "")
      expect_condition(skip_if_dangerous(), NA, class = "skip")
    })
    #> Test passed 🎊

    Testing skip_if_Tuesday() is harder because there’s no way to control the skipping from the outside. That means you’d need to “mock” its behaviour in a test, using the mockery or mockr packages.

    testthat/inst/doc/custom-expectation.R0000644000176200001440000000264314172347702017626 0ustar liggesusers## ----setup, include = FALSE--------------------------------------------------- library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ## ----------------------------------------------------------------------------- expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object), arg = "object") # 2. Call expect() act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) # 3. Invisibly return the value invisible(act$val) } ## ----------------------------------------------------------------------------- mtcars %>% expect_type("list") %>% expect_s3_class("data.frame") %>% expect_length(11) ## ----------------------------------------------------------------------------- expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object), arg = "object") act$n <- length(act$val) if (act$n == n) { succeed() return(invisible(act$val)) } message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n) fail(message) } ## ----------------------------------------------------------------------------- test_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) testthat/inst/doc/test-fixtures.Rmd0000644000176200001440000003575414164710003017140 0ustar liggesusers--- title: "Test fixtures" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Test fixtures} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", # Since after is not available prior to 3.5 eval = getRversion() >= "3.5" ) ``` ## Test hygiene > Take nothing but memories, leave nothing but footprints. > > ― Chief Si'ahl Ideally, a test should leave the world exactly as it found it. But you often need to make some changes in order to exercise every part of your code: - Create a file or directory - Create a resource on an external system - Set an R option - Set an environment variable - Change working directory - Change an aspect of the tested package's state How can you clean up these changes to get back to a clean slate? Scrupulous attention to cleanup is more than just courtesy or being fastidious. It is also self-serving. The state of the world after test `i` is the starting state for test `i + 1`. Tests that change state willy-nilly eventually end up interfering with each other in ways that can be very difficult to debug. Most tests are written with an implicit assumption about the starting state, usually whatever *tabula rasa* means for the target domain of your package. If you accumulate enough sloppy tests, you will eventually find yourself asking the programming equivalent of questions like "Who forgot to turn off the oven?" and "Who didn't clean up after the dog?". It's also important that your setup and cleanup is easy to use when working interactively. When a test fails, you want to be able to quickly recreate the exact environment in which the test is run so you can interactively experiment to figure out what went wrong. This article introduces a powerful technique that allows you to solve both problems: **test fixtures**. We'll begin with an introduction to the tools that make fixtures possible, then talk about exactly what a test fixture is, and show a few examples. Much of this vignette is derived from ; if this is your first encounter with `on.exit()` or `withr::defer()`, I'd recommend starting with that blog as it gives a gentler introduction. This vignette moves a little faster since it's designed as more of a reference doc. ```{r} library(testthat) ``` ## Foundations Before we can talk about test fixtures, we need to lay some foundations to help you understand how they work. We'll motivate the discussion with a `sloppy()` function that prints a number with a specific number of significant digits by adjusting an R option: ```{r include = FALSE} op <- options() ``` ```{r} sloppy <- function(x, sig_digits) { options(digits = sig_digits) print(x) } pi sloppy(pi, 2) pi ``` ```{r include = FALSE} options(op) ``` Notice how `pi` prints differently before and after the call to `sloppy()`. Calling `sloppy()` has a side effect: it changes the `digits` option globally, not just within its own scope of operations. This is what we want to avoid[^1]. [^1]: Don't worry, I'm restoring global state (specifically, the `digits` option) behind the scenes here. ### `on.exit()` The first function you need to know about is base R's `on.exit()`. `on.exit()` calls the code to supplied to its first argument when the current function exits, regardless of whether it returns a value or errors. You can use `on.exit()` to clean up after yourself by ensuring that every mess-making function call is paired with an `on.exit()` call that cleans up. We can use this idea to turn `sloppy()` into `neat()`: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) print(x) } pi neat(pi, 2) pi ``` Here we make use of a useful pattern `options()` implements: when you call `options(digits = sig_digits)` it both sets the `digits` option *and* (invisibly) returns the previous value of digits. We can then use that value to restore the previous options. `on.exit()` also works in tests: ```{r} test_that("can print one digit of pi", { op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) expect_output(print(pi), "3") }) pi ``` There are three main drawbacks to `on.exit()`: - You should always call it with `add = TRUE` and `after = FALSE`. These ensure that the call is **added** to the list of deferred tasks (instead of replaces) and is added to the **front** of the stack (not the back, so that cleanup occurs in reverse order to setup). These arguments only matter if you're using multiple `on.exit()` calls, but it's a good habit to always use them to avoid potential problems down the road. - It doesn't work outside a function or test. If you run the following code in the global environment, you won't get an error, but the cleanup code will never be run: ```{r, eval = FALSE} op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) ``` This is annoying when you are running tests interactively. - You can't program with it; `on.exit()` always works inside the *current* function so you can't wrap up repeated `on.exit()` code in a helper function. To resolve these drawbacks, we use `withr::defer()`. ### `withr::defer()` `withr::defer()` resolves the main drawbacks of `on.exit()`. First, it has the behaviour we want by default; no extra arguments needed: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) withr::defer(options(op)) print(x) } ``` Second, it works when called in the global environment. Since the global environment isn't perishable, like a test environment is, you have to call `deferred_run()` explicitly to execute the deferred events. You can also clear them, without running, with `deferred_clear()`. ```{r} withr::defer(print("hi")) #> Setting deferred event(s) on global environment. #> * Execute (and clear) with `deferred_run()`. #> * Clear (without executing) with `deferred_clear()`. withr::deferred_run() #> [1] "hi" ``` Finally, `withr::defer()` lets you pick which function to bind the clean up behaviour too. This makes it possible to create helper functions. ### "Local" helpers Imagine we have many functions where we want to temporarily set the digits option. Wouldn't it be nice if we could write a helper function to automate? Unfortunately we can't write a helper with `on.exit()`: ```{r} local_digits <- function(sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) } neater <- function(x, sig_digits) { local_digits(1) print(x) } neater(pi) ``` This code doesn't work because the cleanup happens too soon, when `local_digits()` exists, not when `neat()` finishes. Fortunately, `withr::defer()` allows us to solve this problem by providing an `envir` argument that allows you to control when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there's a common pattern you can use without understanding all the details. Your helper function should always have an `env` argument that defaults to `parent.frame()`, which you pass to the second argument of `defer()`: ```{r} local_digits <- function(sig_digits, env = parent.frame()) { op <- options(digits = sig_digits) withr::defer(options(op), env) } neater(pi) ``` Just like `on.exit()` and `defer()`, our helper also works within tests: ```{r} test_that("withr lets us write custom helpers for local state manipulation", { local_digits(1) expect_output(print(exp(1)), "3") local_digits(3) expect_output(print(exp(1)), "2.72") }) print(exp(1)) ``` We always call these helper functions `local_`; "local" here refers to the fact that the state change persists only locally, for the lifetime of the associated function or test. ### Pre-existing helpers But before you write your own helper function, make sure to check out the wide range of local functions already provided by withr: | Do / undo this | withr function | |-----------------------------|-------------------| | Create a file | `local_file()` | | Set an R option | `local_options()` | | Set an environment variable | `local_envvar()` | | Change working directory | `local_dir()` | We can use `withr::local_options()` to write yet another version of `neater()`: ```{r} neatest <- function(x, sig_digits) { withr::local_options(list(digits = sig_digits)) print(x) } neatest(pi, 3) ``` Each `local_*()` function has a companion `with_()` function, which is a nod to `with()`, and the inspiration for withr's name. We won't use the `with_*()` functions here, but you can learn more about them at [withr.r-lib.org](https://withr.r-lib.org). ## Test fixtures Testing is often demonstrated with cute little tests and functions where all the inputs and expected results can be inlined. But in real packages, things aren't always so simple and functions often depend on other global state. For example, take this variant on `message()` that only shows a message if the `verbose` option is `TRUE`. How would you test that setting the option does indeed silence the message? ```{r} message2 <- function(...) { if (!isTRUE(getOption("verbose"))) { return() } message(...) } ``` In some cases, it's possible to make the global state an explicit argument to the function. For example, we could refactor `message2()` to make the verbosity an explicit argument: ```{r} message3 <- function(..., verbose = getOption("verbose")) { if (!isTRUE(verbose)) { return() } message(...) } ``` Making external state explicit is often worthwhile, because it makes it more clear exactly what inputs determine the outputs of your function. But it's simply not possible in many cases. That's where test fixtures come in: they allow you to temporarily change global state in order to test your function. Test fixture is a pre-existing term in the software engineering world (and beyond): > A test fixture is something used to consistently test some item, device, or piece of software. > > --- [Wikipedia](https://en.wikipedia.org/wiki/Test_fixture) A **test fixture** is just a `local_` function that you use to change state in such a way that you can reach inside and test parts of your code that would otherwise be challenging. For example, here's how you could use `withr::local_options()` as a test fixture to test `message2()`: ```{r} test_that("message2() output depends on verbose option", { withr::local_options(verbose = TRUE) expect_message(message2("Hi!")) withr::local_options(verbose = FALSE) expect_message(message2("Hi!"), NA) }) ``` ### Case study: usethis One place that we use test fixtures extensively is in the usethis package ([usethis.r-lib.org](https://usethis.r-lib.org)), which provides functions for looking after the files and folders in R projects, especially packages. Many of these functions only make sense in the context of a package, which means to test them, we also have to be working inside an R package. We need a way to quickly spin up a minimal package in a temporary directory, then test some functions against it, then destroy it. To solve this problem we create a test fixture, which we place in `R/test-helpers.R` so that's it's available for both testing and interactive experimentation: ```{r, eval = FALSE} local_create_package <- function(dir = file_temp(), env = parent.frame()) { old_project <- proj_get_() # create new folder and package create_package(dir, open = FALSE) # A withr::defer(fs::dir_delete(dir), envir = env) # -A # change working directory setwd(dir) # B withr::defer(setwd(old_project), envir = env) # -B # switch to new usethis project proj_set(dir) # C withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C dir } ``` Note that the cleanup automatically unfolds in the opposite order from the setup. Setup is `A`, then `B`, then `C`; cleanup is `-C`, then `-B`, then `-A`. This is important because we must create directory `dir` before we can make it the working directory; and we must restore the original working directory before we can delete `dir`; we can't delete `dir` while it's still the working directory! `local_create_package()` is used in over 170 tests. Here's one example that checks that `usethis::use_roxygen_md()` does the setup necessary to use roxygen2 in a package, with markdown support turned on. All 3 expectations consult the DESCRIPTION file, directly or indirectly. So it's very convenient that `local_create_package()` creates a minimal package, with a valid `DESCRIPTION` file, for us to test against. And when the test is done --- poof! --- the package is gone. ```{r eval = FALSE} test_that("use_roxygen_md() adds DESCRIPTION fields", { pkg <- local_create_package() use_roxygen_md() expect_true(uses_roxygen_md()) expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)")) expect_true(desc::desc_has_fields("RoxygenNote", pkg)) }) ``` ## Scope So far we have applied our test fixture to individual tests, but it's also possible to apply them to a file or package. ### File If you move the `local_()` call outside of a `test_that()` block, it will affect all tests that come after it. This means that by calling the test fixture at the top of the file you can change the behaviour for all tests. This has both advantages and disadvantages: - If you would otherwise have called the fixture in every test, you've saved yourself a bunch of work and duplicate code. - But on the downside, if you a test fails and you want to recreate the failure in an interactive environment so you can debug, you need to remember to run all the setup code at the top of the file first. Generally, I think it's better to copy and paste test fixtures across many tests --- sure, it adds some duplication to your code, but it makes debugging test failures so much easier. ### Package To run code before any test is run, you can create a file called `test/testthat/setup.R`. If the code in this file needs clean up, you can use the special `teardown_env()`: ```{r, eval = FALSE} # Run before any test write.csv("mtcars.csv", mtcars) # Run after all tests withr::defer(unlink("mtcars.csv"), teardown_env()) ``` Setup code is typically best used to create external resources that are needed by many tests. It's best kept to a minimum because you will have to manually run it before interactively debugging tests. ## Other challenges A collection of miscellaneous problems that I don't know where else to describe: - There are a few base functions that are hard to test because they depend on state that you can't control. One such example is `interactive()`: there's no way to write a test fixture that allows you to pretend that interactive is either `TRUE` or `FALSE`. So we now usually use `rlang::is_interactive()` which can be controlled by the `rlang_interactive` option. - If you're using a test fixture in a function, be careful about what you return. For example, if you write a function that does `dir <- create_local_package()` you shouldn't return `dir`, because after the function returns the directory will no longer exist. testthat/inst/doc/custom-expectation.Rmd0000644000176200001440000000674614164710003020144 0ustar liggesusers--- title: "Custom expectations" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Custom expectations} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` This vignette shows you how to create custom expectations that work identically to the built-in `expect_` functions. Since these functions will need to be loaded when your package is loaded for testing, it is recommended that `expect_` functions be defined in `test-helpers.R` in your packages `R/` directory. ## Creating an expectation There are three main parts to writing an expectation, as illustrated by `expect_length()`: ```{r} expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object), arg = "object") # 2. Call expect() act$n <- length(act$val) expect( act$n == n, sprintf("%s has length %i, not length %i.", act$lab, act$n, n) ) # 3. Invisibly return the value invisible(act$val) } ``` ## Quasi-labelling The first step in any expectation is to capture the actual object, and generate a label for it to use if a failure occur. All testthat expectations support quasiquotation so that you can unquote variables. This makes it easier to generate good labels when the expectation is called from a function or within a for loop. By convention, the first argument to every `expect_` function is called `object`, and you capture it's value (`val`) and label (`lab`) with `act <- quasi_label(enquo(object))`, where `act` is short for actual. ### Verify the expectation Next, you should verify the expectation. This often involves a little computation (here just figuring out the `length`), and you should typically store the results back into the `act` object. Next you call `expect()`. This has two arguments: 1. `ok`: was the expectation successful? This is usually easy to write 2. `failure_message`: What informative error message should be reported to the user so that they can diagnose the problem. This is often hard to write! For historical reasons, most built-in expectations generate these with `sprintf()`, but today I'd recommend using the [glue](https://glue.tidyverse.org) package ### Invisibly return the input Expectation functions are called primarily for their side-effects (triggering a failure), so should invisibly return their input, `act$val`. This allows expectations to be chained: ```{r} mtcars %>% expect_type("list") %>% expect_s3_class("data.frame") %>% expect_length(11) ``` ## `succeed()` and `fail()` For expectations with more complex logic governing when success or failure occurs, you can use `succeed()` and `fail()`. These are simple wrappers around `expect()` that allow you to write code that looks like this: ```{r} expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object), arg = "object") act$n <- length(act$val) if (act$n == n) { succeed() return(invisible(act$val)) } message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n) fail(message) } ``` ## Testing your expectations Use the expectations `expect_success()` and `expect_failure()` to test your expectation. ```{r} test_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_failure(expect_length(1, 2), "has length 1, not length 2.") expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) ``` testthat/inst/doc/custom-expectation.html0000644000176200001440000004335014172347702020371 0ustar liggesusers Custom expectations

    Custom expectations

    This vignette shows you how to create custom expectations that work identically to the built-in expect_ functions. Since these functions will need to be loaded when your package is loaded for testing, it is recommended that expect_ functions be defined in test-helpers.R in your packages R/ directory.

    Creating an expectation

    There are three main parts to writing an expectation, as illustrated by expect_length():

    expect_length <- function(object, n) {
      # 1. Capture object and label
      act <- quasi_label(rlang::enquo(object), arg = "object")
    
      # 2. Call expect()
      act$n <- length(act$val)
      expect(
        act$n == n,
        sprintf("%s has length %i, not length %i.", act$lab, act$n, n)
      )
    
      # 3. Invisibly return the value
      invisible(act$val)
    }

    Quasi-labelling

    The first step in any expectation is to capture the actual object, and generate a label for it to use if a failure occur. All testthat expectations support quasiquotation so that you can unquote variables. This makes it easier to generate good labels when the expectation is called from a function or within a for loop.

    By convention, the first argument to every expect_ function is called object, and you capture it’s value (val) and label (lab) with act <- quasi_label(enquo(object)), where act is short for actual.

    Verify the expectation

    Next, you should verify the expectation. This often involves a little computation (here just figuring out the length), and you should typically store the results back into the act object.

    Next you call expect(). This has two arguments:

    1. ok: was the expectation successful? This is usually easy to write

    2. failure_message: What informative error message should be reported to the user so that they can diagnose the problem. This is often hard to write!

      For historical reasons, most built-in expectations generate these with sprintf(), but today I’d recommend using the glue package

    Invisibly return the input

    Expectation functions are called primarily for their side-effects (triggering a failure), so should invisibly return their input, act$val. This allows expectations to be chained:

    mtcars %>%
      expect_type("list") %>%
      expect_s3_class("data.frame") %>% 
      expect_length(11)

    succeed() and fail()

    For expectations with more complex logic governing when success or failure occurs, you can use succeed() and fail(). These are simple wrappers around expect() that allow you to write code that looks like this:

    expect_length <- function(object, n) {
      act <- quasi_label(rlang::enquo(object), arg = "object")
    
      act$n <- length(act$val)
      if (act$n == n) {
        succeed()
        return(invisible(act$val))
      }
    
      message <- sprintf("%s has length %i, not length %i.", act$lab, act$n, n)
      fail(message)
    }

    Testing your expectations

    Use the expectations expect_success() and expect_failure() to test your expectation.

    test_that("length computed correctly", {
      expect_success(expect_length(1, 1))
      expect_failure(expect_length(1, 2), "has length 1, not length 2.")
      expect_success(expect_length(1:10, 10))
      expect_success(expect_length(letters[1:5], 5))
    })
    #> Test passed 😀
    testthat/inst/doc/third-edition.Rmd0000644000176200001440000002245714164710003017051 0ustar liggesusers--- title: "testthat 3e" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{testthat 3e} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` testthat 3.0.0 introduces the idea of an "edition" of testthat. An edition is a bundle of behaviours that you have to explicitly choose to use, allowing us to make otherwise backward incompatible changes. This is particularly important for testthat since it has a very large number of packages that use it (almost 5,000 at last count). Choosing to use the 3rd edition allows you to use our latest recommendations for ongoing and new work, while historical packages continue to use the old behaviour. (We don't anticipate creating new editions very often, and they'll always be matched with major version, i.e. if there's another edition, it'll be the fourth edition and will come with testthat 4.0.0.) This vignette shows you how to activate the 3rd edition, introduces the main features, and discusses common challenges when upgrading a package. If you have a problem that this vignette doesn't cover, please let me know, as it's likely that the problem also affects others. ```{r, message = FALSE} library(testthat) local_edition(3) ``` ## Activating The usual way to activate the 3rd edition is to add a line to your `DESCRIPTION`: Config/testthat/edition: 3 This will activate the 3rd edition for every test in your package. You can also control the edition used for individual tests with `testthat::local_edition()`: ```{r} test_that("I can use the 3rd edition", { local_edition(3) expect_true(TRUE) }) ``` This is also useful if you've switched to the 3rd edition and have a couple of tests that fail. You can use `local_edition(2)` to revert back to the old behaviour, giving you some breathing room to figure out the underlying issue. ```{r} test_that("I want to use the 2nd edition", { local_edition(2) expect_true(TRUE) }) ``` ## Changes There are three major changes in the 3rd edition: - A number of outdated functions are now **deprecated**, so you'll be warned about them every time you run your tests (but they won't cause `R CMD check` to fail). - testthat no longer silently swallows **messages**; you now need to deliberately handle them. - `expect_equal()` and `expect_identical()` now use the [**waldo**](https://waldo.r-lib.org/) package instead of `identical()` and `all.equal()`. This makes them more consistent and provides an enhanced display of differences when a test fails. ### Deprecations A number of outdated functions have been deprecated. Most of these functions have not been recommended for a number of years, but before the introduction of the edition idea, I didn't have a good way of preventing people from using them without breaking a lot of code on CRAN. - `context()` is formally deprecated. testthat has been moving away from `context()` in favour of file names for quite some time, and now you'll be strongly encouraged remove these calls from your tests. - `expect_is()` is deprecated in favour of the more specific `expect_type()`, `expect_s3_class()`, and `expect_s4_class()`. This ensures that you check the expected class along with the expected OO system. - The very old `expect_that()` syntax is now deprecated. This was an overly clever API that I regretted even before the release of testthat 1.0.0. - `expect_equivalent()` has been deprecated since it is now equivalent (HA HA) to `expect_equal(ignore_attr = TRUE)`. The main difference is that it won't ignore names; so you'll need an explicit `unname()` if you deliberately want to ignore names. - `setup()` and `teardown()` are deprecated in favour of test fixtures. See `vignette("test-fixtures")` for details. - `expect_known_output()`, `expect_known_value()`, `expect_known_hash()`, and `expect_equal_to_reference()` are all deprecated in favour of `expect_snapshot_output()` and `expect_snapshot_value()`. - `with_mock()` and `local_mock()` are deprecated; please use the [mockr](https://krlmlr.github.io/mockr/) or [mockery](https://github.com/r-lib/mockery#mockery) instead. Fixing these deprecation warnings should be straightforward. ### Warnings In the second edition, `expect_warning()` swallows all warnings regardless of whether or not they match the `regexp` or `class`: ```{r} f <- function() { warning("First warning") warning("Second warning") warning("Third warning") } local_edition(2) expect_warning(f(), "First") ``` In the third edition, `expect_warning()` captures at most one warning so the others will bubble up: ```{r} local_edition(3) expect_warning(f(), "First") ``` You can either add additional expectations to catch these warnings, or silence them all with `supressWarnings()`: ```{r} f() %>% expect_warning("First") %>% expect_warning("Second") %>% expect_warning("Third") f() %>% expect_warning("First") %>% suppressWarnings() ``` Alternatively, you might want to capture them all in a snapshot test: ```{r} test_that("f() produces expected outputs/messages/warnings", { expect_snapshot(f()) }) ``` The same principle also applies to `expect_message()`, but message handling has changed in a more radical way, as described next. ### Messages For reasons that I can no longer remember, testthat silently ignores all messages. This is inconsistent with other types of output, so as of the 3rd edition, they now bubble up to your test results. You'll have to explicit ignore them with `supressMesssages()`, or if they're important, test for their presence with `expect_message()`. ### waldo Probably the biggest day-to-day difference (and the biggest reason to upgrade!) is the use of [`waldo::compare()`](https://waldo.r-lib.org/reference/compare.html) inside of `expect_equal()` and `expect_identical()`. The goal of waldo is to find and concisely describe the difference between a pair of R objects, and it's designed specifically to help you figure out what's gone wrong in your unit tests. ```{r, error = TRUE} f1 <- factor(letters[1:3]) f2 <- ordered(letters[1:3], levels = letters[1:4]) local_edition(2) expect_equal(f1, f2) local_edition(3) expect_equal(f1, f2) ``` waldo looks even better in your console because it carefully uses colours to help highlight the differences. The use of waldo also makes precise the difference between `expect_equal()` and `expect_identical()`: `expect_equal()` sets `tolerance` so that waldo will ignore small numerical differences arising from floating point computation. Otherwise the functions are identical (HA HA). This change is likely to result in the most work during an upgrade, because waldo can give slightly different results to both `identical()` and `all.equal()` in moderately common situations. I believe on the whole the differences are meaningful and useful, so you'll need to handle them by tweaking your tests. The following changes are most likely to affect you: - `expect_equal()` previously ignored the environments of formulas and functions. This is most like to arise if you are testing models. It's worth thinking about what the correct values should be, but if that is to annoying you can opt out of the comparison with `ignore_function_env` or `ignore_formula_env`. - `expect_equal()` used a combination of `all.equal()` and a home-grown `testthat::compare()` which unfortunately used a slightly different definition of tolerance. Now `expect_equal()` always uses the same defintion of tolerance everywhere, which may require tweaks to your exising tolerance values. - `expect_equal()` previously ignored timezone differences when one object had the current timezone set implicitly (with `""`) and the other had it set explictly: ```{r, error = TRUE} dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0) attr(dt1, "tzone") <- "" attr(dt2, "tzone") <- Sys.timezone() local_edition(2) expect_equal(dt1, dt2) local_edition(3) expect_equal(dt1, dt2) ``` ### Reproducible output In the third edition, `test_that()` automatically calls `local_reproducible_output()` which automatically sets a number of options and environment variables to ensure output is as reproducible across systems. This includes setting: - `options(crayon.enabled = FALSE)` and `options(cli.unicode = FALSE)` so that the crayon and cli packages produce raw ASCII output. - `Sys.setLocale("LC_COLLATE" = "C")` so that sorting a character vector returns the same order regardless of the system language. - `options(width = 80)` so print methods always generate the same output regardless of your actual console width. See the documentation for more details. ## Alternatives You might wonder why we came up with the idea of an "edition", rather than creating a new package like testthat3. We decided against making a new package because the 2nd and 3rd edition share a very large amount of code, so making a new package would have substantially increased the maintenance burden: the majority of bugs would've needed to be fixed in two places. If you're a programmer in other languages, you might wonder why we can't rely on [semantic versioning](https://semver.org). The main reason is that CRAN checks all packages that use testthat with the latest version of testthat, so simply incrementing the major version number doesn't actually help with reducing R CMD check failures on CRAN. testthat/inst/doc/third-edition.R0000644000176200001440000000352714172347704016542 0ustar liggesusers## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- message = FALSE--------------------------------------------------------- library(testthat) local_edition(3) ## ----------------------------------------------------------------------------- test_that("I can use the 3rd edition", { local_edition(3) expect_true(TRUE) }) ## ----------------------------------------------------------------------------- test_that("I want to use the 2nd edition", { local_edition(2) expect_true(TRUE) }) ## ----------------------------------------------------------------------------- f <- function() { warning("First warning") warning("Second warning") warning("Third warning") } local_edition(2) expect_warning(f(), "First") ## ----------------------------------------------------------------------------- local_edition(3) expect_warning(f(), "First") ## ----------------------------------------------------------------------------- f() %>% expect_warning("First") %>% expect_warning("Second") %>% expect_warning("Third") f() %>% expect_warning("First") %>% suppressWarnings() ## ----------------------------------------------------------------------------- test_that("f() produces expected outputs/messages/warnings", { expect_snapshot(f()) }) ## ---- error = TRUE------------------------------------------------------------ f1 <- factor(letters[1:3]) f2 <- ordered(letters[1:3], levels = letters[1:4]) local_edition(2) expect_equal(f1, f2) local_edition(3) expect_equal(f1, f2) ## ---- error = TRUE------------------------------------------------------------ dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0) attr(dt1, "tzone") <- "" attr(dt2, "tzone") <- Sys.timezone() local_edition(2) expect_equal(dt1, dt2) local_edition(3) expect_equal(dt1, dt2) testthat/inst/doc/test-fixtures.R0000644000176200001440000001117414172347704016622 0ustar liggesusers## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", # Since after is not available prior to 3.5 eval = getRversion() >= "3.5" ) ## ----------------------------------------------------------------------------- library(testthat) ## ----include = FALSE---------------------------------------------------------- op <- options() ## ----------------------------------------------------------------------------- sloppy <- function(x, sig_digits) { options(digits = sig_digits) print(x) } pi sloppy(pi, 2) pi ## ----include = FALSE---------------------------------------------------------- options(op) ## ----------------------------------------------------------------------------- neat <- function(x, sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) print(x) } pi neat(pi, 2) pi ## ----------------------------------------------------------------------------- test_that("can print one digit of pi", { op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) expect_output(print(pi), "3") }) pi ## ---- eval = FALSE------------------------------------------------------------ # op <- options(digits = 1) # on.exit(options(op), add = TRUE, after = FALSE) ## ----------------------------------------------------------------------------- neat <- function(x, sig_digits) { op <- options(digits = sig_digits) withr::defer(options(op)) print(x) } ## ----------------------------------------------------------------------------- withr::defer(print("hi")) #> Setting deferred event(s) on global environment. #> * Execute (and clear) with `deferred_run()`. #> * Clear (without executing) with `deferred_clear()`. withr::deferred_run() #> [1] "hi" ## ----------------------------------------------------------------------------- local_digits <- function(sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) } neater <- function(x, sig_digits) { local_digits(1) print(x) } neater(pi) ## ----------------------------------------------------------------------------- local_digits <- function(sig_digits, env = parent.frame()) { op <- options(digits = sig_digits) withr::defer(options(op), env) } neater(pi) ## ----------------------------------------------------------------------------- test_that("withr lets us write custom helpers for local state manipulation", { local_digits(1) expect_output(print(exp(1)), "3") local_digits(3) expect_output(print(exp(1)), "2.72") }) print(exp(1)) ## ----------------------------------------------------------------------------- neatest <- function(x, sig_digits) { withr::local_options(list(digits = sig_digits)) print(x) } neatest(pi, 3) ## ----------------------------------------------------------------------------- message2 <- function(...) { if (!isTRUE(getOption("verbose"))) { return() } message(...) } ## ----------------------------------------------------------------------------- message3 <- function(..., verbose = getOption("verbose")) { if (!isTRUE(verbose)) { return() } message(...) } ## ----------------------------------------------------------------------------- test_that("message2() output depends on verbose option", { withr::local_options(verbose = TRUE) expect_message(message2("Hi!")) withr::local_options(verbose = FALSE) expect_message(message2("Hi!"), NA) }) ## ---- eval = FALSE------------------------------------------------------------ # local_create_package <- function(dir = file_temp(), env = parent.frame()) { # old_project <- proj_get_() # # # create new folder and package # create_package(dir, open = FALSE) # A # withr::defer(fs::dir_delete(dir), envir = env) # -A # # # change working directory # setwd(dir) # B # withr::defer(setwd(old_project), envir = env) # -B # # # switch to new usethis project # proj_set(dir) # C # withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C # # dir # } ## ----eval = FALSE------------------------------------------------------------- # test_that("use_roxygen_md() adds DESCRIPTION fields", { # pkg <- local_create_package() # use_roxygen_md() # # expect_true(uses_roxygen_md()) # expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)")) # expect_true(desc::desc_has_fields("RoxygenNote", pkg)) # }) ## ---- eval = FALSE------------------------------------------------------------ # # Run before any test # write.csv("mtcars.csv", mtcars) # # # Run after all tests # withr::defer(unlink("mtcars.csv"), teardown_env()) testthat/inst/doc/third-edition.html0000644000176200001440000007471114172347705017311 0ustar liggesusers testthat 3e

    testthat 3e

    testthat 3.0.0 introduces the idea of an “edition” of testthat. An edition is a bundle of behaviours that you have to explicitly choose to use, allowing us to make otherwise backward incompatible changes. This is particularly important for testthat since it has a very large number of packages that use it (almost 5,000 at last count). Choosing to use the 3rd edition allows you to use our latest recommendations for ongoing and new work, while historical packages continue to use the old behaviour.

    (We don’t anticipate creating new editions very often, and they’ll always be matched with major version, i.e. if there’s another edition, it’ll be the fourth edition and will come with testthat 4.0.0.)

    This vignette shows you how to activate the 3rd edition, introduces the main features, and discusses common challenges when upgrading a package. If you have a problem that this vignette doesn’t cover, please let me know, as it’s likely that the problem also affects others.

    library(testthat)
    local_edition(3)

    Activating

    The usual way to activate the 3rd edition is to add a line to your DESCRIPTION:

    Config/testthat/edition: 3

    This will activate the 3rd edition for every test in your package.

    You can also control the edition used for individual tests with testthat::local_edition():

    test_that("I can use the 3rd edition", {
      local_edition(3)
      expect_true(TRUE)
    })
    #> Test passed 🎉

    This is also useful if you’ve switched to the 3rd edition and have a couple of tests that fail. You can use local_edition(2) to revert back to the old behaviour, giving you some breathing room to figure out the underlying issue.

    test_that("I want to use the 2nd edition", {
      local_edition(2)
      expect_true(TRUE)
    })
    #> Test passed 🎊

    Changes

    There are three major changes in the 3rd edition:

    • A number of outdated functions are now deprecated, so you’ll be warned about them every time you run your tests (but they won’t cause R CMD check to fail).

    • testthat no longer silently swallows messages; you now need to deliberately handle them.

    • expect_equal() and expect_identical() now use the waldo package instead of identical() and all.equal(). This makes them more consistent and provides an enhanced display of differences when a test fails.

    Deprecations

    A number of outdated functions have been deprecated. Most of these functions have not been recommended for a number of years, but before the introduction of the edition idea, I didn’t have a good way of preventing people from using them without breaking a lot of code on CRAN.

    • context() is formally deprecated. testthat has been moving away from context() in favour of file names for quite some time, and now you’ll be strongly encouraged remove these calls from your tests.

    • expect_is() is deprecated in favour of the more specific expect_type(), expect_s3_class(), and expect_s4_class(). This ensures that you check the expected class along with the expected OO system.

    • The very old expect_that() syntax is now deprecated. This was an overly clever API that I regretted even before the release of testthat 1.0.0.

    • expect_equivalent() has been deprecated since it is now equivalent (HA HA) to expect_equal(ignore_attr = TRUE). The main difference is that it won’t ignore names; so you’ll need an explicit unname() if you deliberately want to ignore names.

    • setup() and teardown() are deprecated in favour of test fixtures. See vignette("test-fixtures") for details.

    • expect_known_output(), expect_known_value(), expect_known_hash(), and expect_equal_to_reference() are all deprecated in favour of expect_snapshot_output() and expect_snapshot_value().

    • with_mock() and local_mock() are deprecated; please use the mockr or mockery instead.

    Fixing these deprecation warnings should be straightforward.

    Warnings

    In the second edition, expect_warning() swallows all warnings regardless of whether or not they match the regexp or class:

    f <- function() {
      warning("First warning")
      warning("Second warning")
      warning("Third warning")
    }
    
    local_edition(2)
    expect_warning(f(), "First")

    In the third edition, expect_warning() captures at most one warning so the others will bubble up:

    local_edition(3)
    expect_warning(f(), "First")
    #> Warning in f(): Second warning
    #> Warning in f(): Third warning

    You can either add additional expectations to catch these warnings, or silence them all with supressWarnings():

    f() %>% 
      expect_warning("First") %>% 
      expect_warning("Second") %>% 
      expect_warning("Third")
    
    f() %>% 
      expect_warning("First") %>% 
      suppressWarnings()

    Alternatively, you might want to capture them all in a snapshot test:

    test_that("f() produces expected outputs/messages/warnings", {
      expect_snapshot(f())  
    })
    #> Can't compare snapshot to reference when testing interactively
    #> i Run `devtools::test()` or `testthat::test_file()` to see changes
    #> i Current value:
    #> Code
    #>   f()
    #> Warning <simpleWarning>
    #>   First warning
    #>   Second warning
    #>   Third warning
    #> ── Skip (???): f() produces expected outputs/messages/warnings ─────────────────
    #> Reason: empty test

    The same principle also applies to expect_message(), but message handling has changed in a more radical way, as described next.

    Messages

    For reasons that I can no longer remember, testthat silently ignores all messages. This is inconsistent with other types of output, so as of the 3rd edition, they now bubble up to your test results. You’ll have to explicit ignore them with supressMesssages(), or if they’re important, test for their presence with expect_message().

    waldo

    Probably the biggest day-to-day difference (and the biggest reason to upgrade!) is the use of waldo::compare() inside of expect_equal() and expect_identical(). The goal of waldo is to find and concisely describe the difference between a pair of R objects, and it’s designed specifically to help you figure out what’s gone wrong in your unit tests.

    f1 <- factor(letters[1:3])
    f2 <- ordered(letters[1:3], levels = letters[1:4])
    
    local_edition(2)
    expect_equal(f1, f2)
    #> Error: `f1` not equal to `f2`.
    #> Attributes: < Component "class": Lengths (1, 2) differ (string compare on first 1) >
    #> Attributes: < Component "class": 1 string mismatch >
    #> Attributes: < Component "levels": Lengths (3, 4) differ (string compare on first 3) >
    
    local_edition(3)
    expect_equal(f1, f2)
    #> Error: `f1` (`actual`) not equal to `f2` (`expected`).
    #> 
    #> `class(actual)`:   "factor"          
    #> `class(expected)`: "ordered" "factor"
    #> 
    #> `levels(actual)`:   "a" "b" "c"    
    #> `levels(expected)`: "a" "b" "c" "d"

    waldo looks even better in your console because it carefully uses colours to help highlight the differences.

    The use of waldo also makes precise the difference between expect_equal() and expect_identical(): expect_equal() sets tolerance so that waldo will ignore small numerical differences arising from floating point computation. Otherwise the functions are identical (HA HA).

    This change is likely to result in the most work during an upgrade, because waldo can give slightly different results to both identical() and all.equal() in moderately common situations. I believe on the whole the differences are meaningful and useful, so you’ll need to handle them by tweaking your tests. The following changes are most likely to affect you:

    • expect_equal() previously ignored the environments of formulas and functions. This is most like to arise if you are testing models. It’s worth thinking about what the correct values should be, but if that is to annoying you can opt out of the comparison with ignore_function_env or ignore_formula_env.

    • expect_equal() used a combination of all.equal() and a home-grown testthat::compare() which unfortunately used a slightly different definition of tolerance. Now expect_equal() always uses the same defintion of tolerance everywhere, which may require tweaks to your exising tolerance values.

    • expect_equal() previously ignored timezone differences when one object had the current timezone set implicitly (with "") and the other had it set explictly:

      dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0)
      attr(dt1, "tzone") <- ""
      attr(dt2, "tzone") <- Sys.timezone()
      
      local_edition(2)
      expect_equal(dt1, dt2)
      
      local_edition(3)
      expect_equal(dt1, dt2)
      #> Error: `dt1` (`actual`) not equal to `dt2` (`expected`).
      #> 
      #> `attr(actual, 'tzone')`:   ""               
      #> `attr(expected, 'tzone')`: "America/Chicago"

    Reproducible output

    In the third edition, test_that() automatically calls local_reproducible_output() which automatically sets a number of options and environment variables to ensure output is as reproducible across systems. This includes setting:

    • options(crayon.enabled = FALSE) and options(cli.unicode = FALSE) so that the crayon and cli packages produce raw ASCII output.

    • Sys.setLocale("LC_COLLATE" = "C") so that sorting a character vector returns the same order regardless of the system language.

    • options(width = 80) so print methods always generate the same output regardless of your actual console width.

    See the documentation for more details.

    Alternatives

    You might wonder why we came up with the idea of an “edition”, rather than creating a new package like testthat3. We decided against making a new package because the 2nd and 3rd edition share a very large amount of code, so making a new package would have substantially increased the maintenance burden: the majority of bugs would’ve needed to be fixed in two places.

    If you’re a programmer in other languages, you might wonder why we can’t rely on semantic versioning. The main reason is that CRAN checks all packages that use testthat with the latest version of testthat, so simply incrementing the major version number doesn’t actually help with reducing R CMD check failures on CRAN.

    testthat/inst/doc/skipping.R0000644000176200001440000000207614172347703015620 0ustar liggesusers## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(testthat) ## ----------------------------------------------------------------------------- skip_if_Tuesday <- function() { if (as.POSIXlt(Sys.Date())$wday != 2) { return(invisible(TRUE)) } skip("Not run on Tuesday") } ## ----------------------------------------------------------------------------- skip_if_dangerous <- function() { if (identical(Sys.getenv("DANGER"), "")) { return(invisible(TRUE)) } skip("Not run in dangerous enviromnents") } ## ----------------------------------------------------------------------------- test_that("skip_if_dangerous work", { # Test that a skip happens withr::local_envvar(DANGER = "yes") expect_condition(skip_if_dangerous(), class = "skip") # Test that a skip doesn't happen withr::local_envvar(DANGER = "") expect_condition(skip_if_dangerous(), NA, class = "skip") }) testthat/inst/doc/snapshotting.R0000644000176200001440000000553514172347703016520 0ustar liggesusers## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) set.seed(1014) ## ----setup-------------------------------------------------------------------- library(testthat) ## ----include = FALSE---------------------------------------------------------- snapper <- local_snapshotter() snapper$start_file("snapshotting.Rmd", "test") ## ----------------------------------------------------------------------------- bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } cat(bullets("a", id = "x")) ## ----------------------------------------------------------------------------- test_that("bullets", { expect_equal(bullets("a"), "
      \n
    • a
    • \n
    \n") expect_equal(bullets("a", id = "x"), "
      \n
    • a
    • \n
    \n") }) ## ----------------------------------------------------------------------------- test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ## ---- include = FALSE--------------------------------------------------------- # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ## ----------------------------------------------------------------------------- test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ## ---- include = FALSE--------------------------------------------------------- # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ## ---- error = TRUE------------------------------------------------------------ bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ## ----------------------------------------------------------------------------- f <- function() { print("Hello") message("Hi!") warning("How are you?") } ## ----------------------------------------------------------------------------- test_that("f() makes lots of noice", { expect_snapshot(f()) }) ## ---- error = TRUE------------------------------------------------------------ test_that("you can't add a number and a letter", { expect_snapshot(1 + "a") }) ## ----------------------------------------------------------------------------- test_that("you can't add a number and a letter", { expect_snapshot(1 + "a", error = TRUE) }) ## ----------------------------------------------------------------------------- test_that("you can't add weird thngs", { expect_snapshot(error = TRUE, { 1 + "a" mtcars + iris mean + sum }) }) testthat/inst/resources/0000755000176200001440000000000014164710002015074 5ustar liggesuserstestthat/inst/resources/catch-routine-registration.R0000644000176200001440000000043514164710002022476 0ustar liggesusers# This dummy function definition is included with the package to ensure that # 'tools::package_native_routine_registration_skeleton()' generates the required # registration info for the 'run_testthat_tests' symbol. (function() { .Call("run_testthat_tests", FALSE, PACKAGE = "%s") }) testthat/inst/resources/test-cpp.R0000644000176200001440000000002414164710002016752 0ustar liggesusersrun_cpp_tests("%s") testthat/inst/resources/test-example.cpp0000644000176200001440000000216012661230133020211 0ustar liggesusers/* * This file uses the Catch unit testing library, alongside * testthat's simple bindings, to test a C++ function. * * For your own packages, ensure that your test files are * placed within the `src/` folder, and that you include * `LinkingTo: testthat` within your DESCRIPTION file. */ // All test files should include the // header file. #include // Normally this would be a function from your package's // compiled library -- you might instead just include a header // file providing the definition, and let R CMD INSTALL // handle building and linking. int twoPlusTwo() { return 2 + 2; } // Initialize a unit test context. This is similar to how you // might begin an R test file with 'context()', expect the // associated context should be wrapped in braced. context("Sample unit tests") { // The format for specifying tests is similar to that of // testthat's R functions. Use 'test_that()' to define a // unit test, and use 'expect_true()' and 'expect_false()' // to test the desired conditions. test_that("two plus two equals four") { expect_true(twoPlusTwo() == 4); } } testthat/inst/resources/test-runner.cpp0000644000176200001440000000036712661230133020076 0ustar liggesusers/* * Please do not edit this file -- it ensures that your package will export a * 'run_testthat_tests()' C routine that can be used to run the Catch unit tests * available in your package. */ #define TESTTHAT_TEST_RUNNER #include testthat/inst/include/0000755000176200001440000000000013167703257014525 5ustar liggesuserstestthat/inst/include/testthat.h0000644000176200001440000000003713167703257016536 0ustar liggesusers#include testthat/inst/include/testthat/0000755000176200001440000000000014172362302016352 5ustar liggesuserstestthat/inst/include/testthat/testthat.h0000644000176200001440000001126514164710002020363 0ustar liggesusers#ifndef TESTTHAT_HPP #define TESTTHAT_HPP #define TESTTHAT_TOKEN_PASTE_IMPL(__X__, __Y__) __X__ ## __Y__ #define TESTTHAT_TOKEN_PASTE(__X__, __Y__) TESTTHAT_TOKEN_PASTE_IMPL(__X__, __Y__) #define TESTTHAT_DISABLED_FUNCTION \ static void TESTTHAT_TOKEN_PASTE(testthat_disabled_test_, __LINE__) () /** * Conditionally enable or disable 'testthat' + 'Catch'. * Force 'testthat' to be enabled by defining TESTTHAT_ENABLED. * Force 'testthat' to be disabled by defining TESTTHAT_DISABLED. * TESTTHAT_DISABLED takes precedence. * 'testthat' is disabled on Solaris by default. * * Hide symbols containing static members on gcc, to work around issues * with DLL unload due to static members in inline functions. * https://github.com/r-lib/devtools/issues/1832 */ #if defined(__GNUC__) || defined(__clang__) # define TESTTHAT_ENABLED # define TESTTHAT_ATTRIBUTE_HIDDEN __attribute__ ((visibility("hidden"))) #else # define TESTTHAT_ATTRIBUTE_HIDDEN #endif #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__sun) || defined(__SVR4) # define TESTTHAT_DISABLED #endif #ifndef TESTTHAT_ENABLED # define TESTTHAT_DISABLED #endif #ifndef TESTTHAT_DISABLED # define CATCH_CONFIG_PREFIX_ALL # define CATCH_CONFIG_NOSTDOUT # ifdef TESTTHAT_TEST_RUNNER # define CATCH_CONFIG_RUNNER # endif # include // CHAR_MAX # include // EOF # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wparentheses" # endif namespace Catch { // Avoid 'R CMD check' warnings related to the use of 'std::rand()' and // 'std::srand()'. Since we don't call any Catch APIs that use these // functions, it suffices to just override them in the Catch namespace. inline void srand(unsigned) {} inline int rand() { return 42; } // Catch has calls to 'exit' on failure, which upsets R CMD check. // We won't bump into them during normal test execution so just override // it in the Catch namespace before we include 'catch'. inline void exit(int) throw() {} } # include "vendor/catch.h" // Implement an output stream that avoids writing to stdout / stderr. extern "C" void Rprintf(const char*, ...); extern "C" void R_FlushConsole(); namespace testthat { class r_streambuf : public std::streambuf { public: r_streambuf() {} protected: virtual std::streamsize xsputn(const char* s, std::streamsize n) { if (n == 1) Rprintf("%c", *s); else Rprintf("%.*s", n, s); return n; } virtual int overflow(int c = EOF) { if (c == EOF) return c; if (c > CHAR_MAX) return c; Rprintf("%c", (char) c); return c; } virtual int sync() { R_FlushConsole(); return 0; } }; class r_ostream : public std::ostream { public: r_ostream() : std::ostream(new r_streambuf) {} ~r_ostream() { delete rdbuf(); } }; // Allow client packages to access the Catch::Session // exported by testthat. # ifdef CATCH_CONFIG_RUNNER TESTTHAT_ATTRIBUTE_HIDDEN inline Catch::Session& catchSession() { static Catch::Session instance; return instance; } inline bool run_tests(bool use_xml) { if (use_xml) { const char* argv[] = {"catch", "-r", "xml"}; return catchSession().run(3, argv) == 0; } else { return catchSession().run() == 0; } } # endif // CATCH_CONFIG_RUNNER } // namespace testthat namespace Catch { TESTTHAT_ATTRIBUTE_HIDDEN inline std::ostream& cout() { static testthat::r_ostream instance; return instance; } TESTTHAT_ATTRIBUTE_HIDDEN inline std::ostream& cerr() { static testthat::r_ostream instance; return instance; } } // namespace Catch # ifdef TESTTHAT_TEST_RUNNER // ERROR will be redefined by R; avoid compiler warnings # ifdef ERROR # undef ERROR # endif # include # include extern "C" SEXP run_testthat_tests(SEXP use_xml_sxp) { bool use_xml = LOGICAL(use_xml_sxp)[0]; bool success = testthat::run_tests(use_xml); return ScalarLogical(success); } # endif // TESTTHAT_TEST_RUNNER # define context(__X__) CATCH_TEST_CASE(__X__ " | " __FILE__) # define test_that CATCH_SECTION # define expect_true CATCH_CHECK # define expect_false CATCH_CHECK_FALSE # define expect_error CATCH_CHECK_THROWS # define expect_error_as CATCH_CHECK_THROWS_AS #else // TESTTHAT_DISABLED # define context(__X__) TESTTHAT_DISABLED_FUNCTION # define test_that(__X__) if (false) # define expect_true(__X__) (void) (__X__) # define expect_false(__X__) (void) (__X__) # define expect_error(__X__) (void) (__X__) # define expect_error_as(__X__, __Y__) (void) (__X__) # ifdef TESTTHAT_TEST_RUNNER # include # include extern "C" SEXP run_testthat_tests() { return ScalarLogical(true); } # endif // TESTTHAT_TEST_RUNNER #endif // TESTTHAT_DISABLED #endif /* TESTTHAT_HPP */ testthat/inst/include/testthat/vendor/0000755000176200001440000000000014164710002017642 5ustar liggesuserstestthat/inst/include/testthat/vendor/catch.h0000644000176200001440000146576614164710002021126 0ustar liggesusers/* * Catch v1.9.6 * Generated: 2017-06-27 12:19:54.557875 * ---------------------------------------------------------- * This file has been merged from multiple headers. Please don't edit it directly * Copyright (c) 2012 Two Blue Cubes Ltd. All rights reserved. * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED #define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED #define TWOBLUECUBES_CATCH_HPP_INCLUDED #ifdef __clang__ # pragma clang system_header #elif defined __GNUC__ # pragma GCC system_header #endif // #included from: internal/catch_suppress_warnings.h #ifdef __clang__ # ifdef __ICC // icpc defines the __clang__ macro # pragma warning(push) # pragma warning(disable: 161 1682) # else // __ICC # pragma clang diagnostic ignored "-Wglobal-constructors" # pragma clang diagnostic ignored "-Wvariadic-macros" # pragma clang diagnostic ignored "-Wc99-extensions" # pragma clang diagnostic ignored "-Wunused-variable" # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" # pragma clang diagnostic ignored "-Wc++98-compat" # pragma clang diagnostic ignored "-Wc++98-compat-pedantic" # pragma clang diagnostic ignored "-Wswitch-enum" # pragma clang diagnostic ignored "-Wcovered-switch-default" # endif #elif defined __GNUC__ # pragma GCC diagnostic ignored "-Wvariadic-macros" # pragma GCC diagnostic ignored "-Wunused-variable" # pragma GCC diagnostic ignored "-Wparentheses" # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wpadded" #endif #if defined(CATCH_CONFIG_MAIN) || defined(CATCH_CONFIG_RUNNER) # define CATCH_IMPL #endif #ifdef CATCH_IMPL # ifndef CLARA_CONFIG_MAIN # define CLARA_CONFIG_MAIN_NOT_DEFINED # define CLARA_CONFIG_MAIN # endif #endif // #included from: internal/catch_notimplemented_exception.h #define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_H_INCLUDED // #included from: catch_common.h #define TWOBLUECUBES_CATCH_COMMON_H_INCLUDED // #included from: catch_compiler_capabilities.h #define TWOBLUECUBES_CATCH_COMPILER_CAPABILITIES_HPP_INCLUDED // Detect a number of compiler features - mostly C++11/14 conformance - by compiler // The following features are defined: // // CATCH_CONFIG_CPP11_NULLPTR : is nullptr supported? // CATCH_CONFIG_CPP11_NOEXCEPT : is noexcept supported? // CATCH_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods // CATCH_CONFIG_CPP11_IS_ENUM : std::is_enum is supported? // CATCH_CONFIG_CPP11_TUPLE : std::tuple is supported // CATCH_CONFIG_CPP11_LONG_LONG : is long long supported? // CATCH_CONFIG_CPP11_OVERRIDE : is override supported? // CATCH_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) // CATCH_CONFIG_CPP11_SHUFFLE : is std::shuffle supported? // CATCH_CONFIG_CPP11_TYPE_TRAITS : are type_traits and enable_if supported? // CATCH_CONFIG_CPP11_OR_GREATER : Is C++11 supported? // CATCH_CONFIG_VARIADIC_MACROS : are variadic macros supported? // CATCH_CONFIG_COUNTER : is the __COUNTER__ macro supported? // CATCH_CONFIG_WINDOWS_SEH : is Windows SEH supported? // CATCH_CONFIG_POSIX_SIGNALS : are POSIX signals supported? // **************** // Note to maintainers: if new toggles are added please document them // in configuration.md, too // **************** // In general each macro has a _NO_ form // (e.g. CATCH_CONFIG_CPP11_NO_NULLPTR) which disables the feature. // Many features, at point of detection, define an _INTERNAL_ macro, so they // can be combined, en-mass, with the _NO_ forms later. // All the C++11 features can be disabled with CATCH_CONFIG_NO_CPP11 #ifdef __cplusplus # if __cplusplus >= 201103L # define CATCH_CPP11_OR_GREATER # endif # if __cplusplus >= 201402L # define CATCH_CPP14_OR_GREATER # endif #endif #ifdef __clang__ # if __has_feature(cxx_nullptr) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif # if __has_feature(cxx_noexcept) # define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # endif # if defined(CATCH_CPP11_OR_GREATER) # define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ _Pragma( "clang diagnostic push" ) \ _Pragma( "clang diagnostic ignored \"-Wexit-time-destructors\"" ) # define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ _Pragma( "clang diagnostic pop" ) # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ _Pragma( "clang diagnostic push" ) \ _Pragma( "clang diagnostic ignored \"-Wparentheses\"" ) # define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ _Pragma( "clang diagnostic pop" ) # endif #endif // __clang__ //////////////////////////////////////////////////////////////////////////////// // We know some environments not to support full POSIX signals #if defined(__CYGWIN__) || defined(__QNX__) # if !defined(CATCH_CONFIG_POSIX_SIGNALS) # define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS # endif #endif //////////////////////////////////////////////////////////////////////////////// // Cygwin #ifdef __CYGWIN__ // Required for some versions of Cygwin to declare gettimeofday // see: http://stackoverflow.com/questions/36901803/gettimeofday-not-declared-in-this-scope-cygwin # define _BSD_SOURCE #endif // __CYGWIN__ //////////////////////////////////////////////////////////////////////////////// // Borland #ifdef __BORLANDC__ #endif // __BORLANDC__ //////////////////////////////////////////////////////////////////////////////// // EDG #ifdef __EDG_VERSION__ #endif // __EDG_VERSION__ //////////////////////////////////////////////////////////////////////////////// // Digital Mars #ifdef __DMC__ #endif // __DMC__ //////////////////////////////////////////////////////////////////////////////// // GCC #ifdef __GNUC__ # if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif // - otherwise more recent versions define __cplusplus >= 201103L // and will get picked up below #endif // __GNUC__ //////////////////////////////////////////////////////////////////////////////// // Visual C++ #ifdef _MSC_VER #define CATCH_INTERNAL_CONFIG_WINDOWS_SEH #if (_MSC_VER >= 1600) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) #define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE #define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS #endif #endif // _MSC_VER //////////////////////////////////////////////////////////////////////////////// // Use variadic macros if the compiler supports them #if ( defined _MSC_VER && _MSC_VER > 1400 && !defined __EDGE__) || \ ( defined __WAVE__ && __WAVE_HAS_VARIADICS ) || \ ( defined __GNUC__ && __GNUC__ >= 3 ) || \ ( !defined __cplusplus && __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L ) #define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS #endif // Use __COUNTER__ if the compiler supports it #if ( defined _MSC_VER && _MSC_VER >= 1300 ) || \ ( defined __GNUC__ && ( __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3 )) ) || \ ( defined __clang__ && __clang_major__ >= 3 ) #define CATCH_INTERNAL_CONFIG_COUNTER #endif //////////////////////////////////////////////////////////////////////////////// // C++ language feature support // catch all support for C++11 #if defined(CATCH_CPP11_OR_GREATER) # if !defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS # define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM # define CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_TUPLE # define CATCH_INTERNAL_CONFIG_CPP11_TUPLE # endif # ifndef CATCH_INTERNAL_CONFIG_VARIADIC_MACROS # define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) # define CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) # define CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) # define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) # define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) # define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS # endif #endif // __cplusplus >= 201103L // Now set the actual defines based on the above + anything the user has configured #if defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NO_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_NULLPTR #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_NOEXCEPT #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_GENERATED_METHODS #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_NO_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_IS_ENUM #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_CPP11_NO_TUPLE) && !defined(CATCH_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_TUPLE #endif #if defined(CATCH_INTERNAL_CONFIG_VARIADIC_MACROS) && !defined(CATCH_CONFIG_NO_VARIADIC_MACROS) && !defined(CATCH_CONFIG_VARIADIC_MACROS) # define CATCH_CONFIG_VARIADIC_MACROS #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_NO_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_LONG_LONG #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_NO_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_OVERRIDE #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_NO_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_UNIQUE_PTR #endif // Use of __COUNTER__ is suppressed if __JETBRAINS_IDE__ is #defined (meaning we're being parsed by a JetBrains IDE for // analytics) because, at time of writing, __COUNTER__ is not properly handled by it. // This does not affect compilation #if defined(CATCH_INTERNAL_CONFIG_COUNTER) && !defined(CATCH_CONFIG_NO_COUNTER) && !defined(CATCH_CONFIG_COUNTER) && !defined(__JETBRAINS_IDE__) # define CATCH_CONFIG_COUNTER #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_NO_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_SHUFFLE #endif # if defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_NO_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_TYPE_TRAITS # endif #if defined(CATCH_INTERNAL_CONFIG_WINDOWS_SEH) && !defined(CATCH_CONFIG_NO_WINDOWS_SEH) && !defined(CATCH_CONFIG_WINDOWS_SEH) # define CATCH_CONFIG_WINDOWS_SEH #endif // This is set by default, because we assume that unix compilers are posix-signal-compatible by default. #if !defined(CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_POSIX_SIGNALS) # define CATCH_CONFIG_POSIX_SIGNALS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS # define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS # define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #endif // noexcept support: #if defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_NOEXCEPT) # define CATCH_NOEXCEPT noexcept # define CATCH_NOEXCEPT_IS(x) noexcept(x) #else # define CATCH_NOEXCEPT throw() # define CATCH_NOEXCEPT_IS(x) #endif // nullptr support #ifdef CATCH_CONFIG_CPP11_NULLPTR # define CATCH_NULL nullptr #else # define CATCH_NULL NULL #endif // override support #ifdef CATCH_CONFIG_CPP11_OVERRIDE # define CATCH_OVERRIDE override #else # define CATCH_OVERRIDE #endif // unique_ptr support #ifdef CATCH_CONFIG_CPP11_UNIQUE_PTR # define CATCH_AUTO_PTR( T ) std::unique_ptr #else # define CATCH_AUTO_PTR( T ) std::auto_ptr #endif #define INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) name##line #define INTERNAL_CATCH_UNIQUE_NAME_LINE( name, line ) INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) #ifdef CATCH_CONFIG_COUNTER # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __COUNTER__ ) #else # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ ) #endif #define INTERNAL_CATCH_STRINGIFY2( expr ) #expr #define INTERNAL_CATCH_STRINGIFY( expr ) INTERNAL_CATCH_STRINGIFY2( expr ) #include #include namespace Catch { struct IConfig; struct CaseSensitive { enum Choice { Yes, No }; }; class NonCopyable { #ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS NonCopyable( NonCopyable const& ) = delete; NonCopyable( NonCopyable && ) = delete; NonCopyable& operator = ( NonCopyable const& ) = delete; NonCopyable& operator = ( NonCopyable && ) = delete; #else NonCopyable( NonCopyable const& info ); NonCopyable& operator = ( NonCopyable const& ); #endif protected: NonCopyable() {} virtual ~NonCopyable(); }; class SafeBool { public: typedef void (SafeBool::*type)() const; static type makeSafe( bool value ) { return value ? &SafeBool::trueValue : 0; } private: void trueValue() const {} }; template inline void deleteAll( ContainerT& container ) { typename ContainerT::const_iterator it = container.begin(); typename ContainerT::const_iterator itEnd = container.end(); for(; it != itEnd; ++it ) delete *it; } template inline void deleteAllValues( AssociativeContainerT& container ) { typename AssociativeContainerT::const_iterator it = container.begin(); typename AssociativeContainerT::const_iterator itEnd = container.end(); for(; it != itEnd; ++it ) delete it->second; } bool startsWith( std::string const& s, std::string const& prefix ); bool startsWith( std::string const& s, char prefix ); bool endsWith( std::string const& s, std::string const& suffix ); bool endsWith( std::string const& s, char suffix ); bool contains( std::string const& s, std::string const& infix ); void toLowerInPlace( std::string& s ); std::string toLower( std::string const& s ); std::string trim( std::string const& str ); bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ); struct pluralise { pluralise( std::size_t count, std::string const& label ); friend std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ); std::size_t m_count; std::string m_label; }; struct SourceLineInfo { SourceLineInfo(); SourceLineInfo( char const* _file, std::size_t _line ); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS SourceLineInfo(SourceLineInfo const& other) = default; SourceLineInfo( SourceLineInfo && ) = default; SourceLineInfo& operator = ( SourceLineInfo const& ) = default; SourceLineInfo& operator = ( SourceLineInfo && ) = default; # endif bool empty() const; bool operator == ( SourceLineInfo const& other ) const; bool operator < ( SourceLineInfo const& other ) const; char const* file; std::size_t line; }; std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ); // This is just here to avoid compiler warnings with macro constants and boolean literals inline bool isTrue( bool value ){ return value; } inline bool alwaysTrue() { return true; } inline bool alwaysFalse() { return false; } void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ); void seedRng( IConfig const& config ); unsigned int rngSeed(); // Use this in variadic streaming macros to allow // >> +StreamEndStop // as well as // >> stuff +StreamEndStop struct StreamEndStop { std::string operator+() { return std::string(); } }; template T const& operator + ( T const& value, StreamEndStop ) { return value; } } #define CATCH_INTERNAL_LINEINFO ::Catch::SourceLineInfo( __FILE__, static_cast( __LINE__ ) ) #define CATCH_INTERNAL_ERROR( msg ) ::Catch::throwLogicError( msg, CATCH_INTERNAL_LINEINFO ); namespace Catch { class NotImplementedException : public std::exception { public: NotImplementedException( SourceLineInfo const& lineInfo ); NotImplementedException( NotImplementedException const& ) {} virtual ~NotImplementedException() CATCH_NOEXCEPT {} virtual const char* what() const CATCH_NOEXCEPT; private: std::string m_what; SourceLineInfo m_lineInfo; }; } // end namespace Catch /////////////////////////////////////////////////////////////////////////////// #define CATCH_NOT_IMPLEMENTED throw Catch::NotImplementedException( CATCH_INTERNAL_LINEINFO ) // #included from: internal/catch_context.h #define TWOBLUECUBES_CATCH_CONTEXT_H_INCLUDED // #included from: catch_interfaces_generators.h #define TWOBLUECUBES_CATCH_INTERFACES_GENERATORS_H_INCLUDED #include namespace Catch { struct IGeneratorInfo { virtual ~IGeneratorInfo(); virtual bool moveNext() = 0; virtual std::size_t getCurrentIndex() const = 0; }; struct IGeneratorsForTest { virtual ~IGeneratorsForTest(); virtual IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) = 0; virtual bool moveNext() = 0; }; IGeneratorsForTest* createGeneratorsForTest(); } // end namespace Catch // #included from: catch_ptr.hpp #define TWOBLUECUBES_CATCH_PTR_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif namespace Catch { // An intrusive reference counting smart pointer. // T must implement addRef() and release() methods // typically implementing the IShared interface template class Ptr { public: Ptr() : m_p( CATCH_NULL ){} Ptr( T* p ) : m_p( p ){ if( m_p ) m_p->addRef(); } Ptr( Ptr const& other ) : m_p( other.m_p ){ if( m_p ) m_p->addRef(); } ~Ptr(){ if( m_p ) m_p->release(); } void reset() { if( m_p ) m_p->release(); m_p = CATCH_NULL; } Ptr& operator = ( T* p ){ Ptr temp( p ); swap( temp ); return *this; } Ptr& operator = ( Ptr const& other ){ Ptr temp( other ); swap( temp ); return *this; } void swap( Ptr& other ) { std::swap( m_p, other.m_p ); } T* get() const{ return m_p; } T& operator*() const { return *m_p; } T* operator->() const { return m_p; } bool operator !() const { return m_p == CATCH_NULL; } operator SafeBool::type() const { return SafeBool::makeSafe( m_p != CATCH_NULL ); } private: T* m_p; }; struct IShared : NonCopyable { virtual ~IShared(); virtual void addRef() const = 0; virtual void release() const = 0; }; template struct SharedImpl : T { SharedImpl() : m_rc( 0 ){} virtual void addRef() const { ++m_rc; } virtual void release() const { if( --m_rc == 0 ) delete this; } mutable unsigned int m_rc; }; } // end namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif namespace Catch { class TestCase; class Stream; struct IResultCapture; struct IRunner; struct IGeneratorsForTest; struct IConfig; struct IContext { virtual ~IContext(); virtual IResultCapture* getResultCapture() = 0; virtual IRunner* getRunner() = 0; virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) = 0; virtual bool advanceGeneratorsForCurrentTest() = 0; virtual Ptr getConfig() const = 0; }; struct IMutableContext : IContext { virtual ~IMutableContext(); virtual void setResultCapture( IResultCapture* resultCapture ) = 0; virtual void setRunner( IRunner* runner ) = 0; virtual void setConfig( Ptr const& config ) = 0; }; IContext& getCurrentContext(); IMutableContext& getCurrentMutableContext(); void cleanUpContext(); Stream createStream( std::string const& streamName ); } // #included from: internal/catch_test_registry.hpp #define TWOBLUECUBES_CATCH_TEST_REGISTRY_HPP_INCLUDED // #included from: catch_interfaces_testcase.h #define TWOBLUECUBES_CATCH_INTERFACES_TESTCASE_H_INCLUDED #include namespace Catch { class TestSpec; struct ITestCase : IShared { virtual void invoke () const = 0; protected: virtual ~ITestCase(); }; class TestCase; struct IConfig; struct ITestCaseRegistry { virtual ~ITestCaseRegistry(); virtual std::vector const& getAllTests() const = 0; virtual std::vector const& getAllTestsSorted( IConfig const& config ) const = 0; }; bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ); std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ); std::vector const& getAllTestCasesSorted( IConfig const& config ); } namespace Catch { template class MethodTestCase : public SharedImpl { public: MethodTestCase( void (C::*method)() ) : m_method( method ) {} virtual void invoke() const { C obj; (obj.*m_method)(); } private: virtual ~MethodTestCase() {} void (C::*m_method)(); }; typedef void(*TestFunction)(); struct NameAndDesc { NameAndDesc( const char* _name = "", const char* _description= "" ) : name( _name ), description( _description ) {} const char* name; const char* description; }; void registerTestCase ( ITestCase* testCase, char const* className, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ); struct AutoReg { AutoReg ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ); template AutoReg ( void (C::*method)(), char const* className, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ) { registerTestCase ( new MethodTestCase( method ), className, nameAndDesc, lineInfo ); } ~AutoReg(); private: AutoReg( AutoReg const& ); void operator= ( AutoReg const& ); }; void registerTestCaseFunction ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ); } // end namespace Catch #ifdef CATCH_CONFIG_VARIADIC_MACROS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TESTCASE2( TestName, ... ) \ static void TestName(); \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ static void TestName() #define INTERNAL_CATCH_TESTCASE( ... ) \ INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), __VA_ARGS__ ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, ... ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestName, ClassName, ... )\ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ \ struct TestName : ClassName{ \ void test(); \ }; \ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestName::test, #ClassName, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); \ } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ void TestName::test() #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, ... ) \ INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, __VA_ARGS__ ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, ... ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #else /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TESTCASE2( TestName, Name, Desc ) \ static void TestName(); \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); }\ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ static void TestName() #define INTERNAL_CATCH_TESTCASE( Name, Desc ) \ INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), Name, Desc ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, Name, Desc ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( Name, Desc ), CATCH_INTERNAL_LINEINFO ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestCaseName, ClassName, TestName, Desc )\ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ \ struct TestCaseName : ClassName{ \ void test(); \ }; \ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestCaseName::test, #ClassName, Catch::NameAndDesc( TestName, Desc ), CATCH_INTERNAL_LINEINFO ); \ } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ void TestCaseName::test() #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, TestName, Desc )\ INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, TestName, Desc ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, Name, Desc ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #endif // #included from: internal/catch_capture.hpp #define TWOBLUECUBES_CATCH_CAPTURE_HPP_INCLUDED // #included from: catch_result_builder.h #define TWOBLUECUBES_CATCH_RESULT_BUILDER_H_INCLUDED // #included from: catch_result_type.h #define TWOBLUECUBES_CATCH_RESULT_TYPE_H_INCLUDED namespace Catch { // ResultWas::OfType enum struct ResultWas { enum OfType { Unknown = -1, Ok = 0, Info = 1, Warning = 2, FailureBit = 0x10, ExpressionFailed = FailureBit | 1, ExplicitFailure = FailureBit | 2, Exception = 0x100 | FailureBit, ThrewException = Exception | 1, DidntThrowException = Exception | 2, FatalErrorCondition = 0x200 | FailureBit }; }; inline bool isOk( ResultWas::OfType resultType ) { return ( resultType & ResultWas::FailureBit ) == 0; } inline bool isJustInfo( int flags ) { return flags == ResultWas::Info; } // ResultDisposition::Flags enum struct ResultDisposition { enum Flags { Normal = 0x01, ContinueOnFailure = 0x02, // Failures fail test, but execution continues FalseTest = 0x04, // Prefix expression with ! SuppressFail = 0x08 // Failures are reported but do not fail the test }; }; inline ResultDisposition::Flags operator | ( ResultDisposition::Flags lhs, ResultDisposition::Flags rhs ) { return static_cast( static_cast( lhs ) | static_cast( rhs ) ); } inline bool shouldContinueOnFailure( int flags ) { return ( flags & ResultDisposition::ContinueOnFailure ) != 0; } inline bool isFalseTest( int flags ) { return ( flags & ResultDisposition::FalseTest ) != 0; } inline bool shouldSuppressFailure( int flags ) { return ( flags & ResultDisposition::SuppressFail ) != 0; } } // end namespace Catch // #included from: catch_assertionresult.h #define TWOBLUECUBES_CATCH_ASSERTIONRESULT_H_INCLUDED #include namespace Catch { struct STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison; struct DecomposedExpression { virtual ~DecomposedExpression() {} virtual bool isBinaryExpression() const { return false; } virtual void reconstructExpression( std::string& dest ) const = 0; // Only simple binary comparisons can be decomposed. // If more complex check is required then wrap sub-expressions in parentheses. template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator + ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator - ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator * ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator / ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator % ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator && ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator || ( T const& ); private: DecomposedExpression& operator = (DecomposedExpression const&); }; struct AssertionInfo { AssertionInfo() {} AssertionInfo( char const * _macroName, SourceLineInfo const& _lineInfo, char const * _capturedExpression, ResultDisposition::Flags _resultDisposition, char const * _secondArg = ""); char const * macroName; SourceLineInfo lineInfo; char const * capturedExpression; ResultDisposition::Flags resultDisposition; char const * secondArg; }; struct AssertionResultData { AssertionResultData() : decomposedExpression( CATCH_NULL ) , resultType( ResultWas::Unknown ) , negated( false ) , parenthesized( false ) {} void negate( bool parenthesize ) { negated = !negated; parenthesized = parenthesize; if( resultType == ResultWas::Ok ) resultType = ResultWas::ExpressionFailed; else if( resultType == ResultWas::ExpressionFailed ) resultType = ResultWas::Ok; } std::string const& reconstructExpression() const { if( decomposedExpression != CATCH_NULL ) { decomposedExpression->reconstructExpression( reconstructedExpression ); if( parenthesized ) { reconstructedExpression.insert( 0, 1, '(' ); reconstructedExpression.append( 1, ')' ); } if( negated ) { reconstructedExpression.insert( 0, 1, '!' ); } decomposedExpression = CATCH_NULL; } return reconstructedExpression; } mutable DecomposedExpression const* decomposedExpression; mutable std::string reconstructedExpression; std::string message; ResultWas::OfType resultType; bool negated; bool parenthesized; }; class AssertionResult { public: AssertionResult(); AssertionResult( AssertionInfo const& info, AssertionResultData const& data ); ~AssertionResult(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS AssertionResult( AssertionResult const& ) = default; AssertionResult( AssertionResult && ) = default; AssertionResult& operator = ( AssertionResult const& ) = default; AssertionResult& operator = ( AssertionResult && ) = default; # endif bool isOk() const; bool succeeded() const; ResultWas::OfType getResultType() const; bool hasExpression() const; bool hasMessage() const; std::string getExpression() const; std::string getExpressionInMacro() const; bool hasExpandedExpression() const; std::string getExpandedExpression() const; std::string getMessage() const; SourceLineInfo getSourceInfo() const; std::string getTestMacroName() const; void discardDecomposedExpression() const; void expandDecomposedExpression() const; protected: AssertionInfo m_info; AssertionResultData m_resultData; }; } // end namespace Catch // #included from: catch_matchers.hpp #define TWOBLUECUBES_CATCH_MATCHERS_HPP_INCLUDED namespace Catch { namespace Matchers { namespace Impl { template struct MatchAllOf; template struct MatchAnyOf; template struct MatchNotOf; class MatcherUntypedBase { public: std::string toString() const { if( m_cachedToString.empty() ) m_cachedToString = describe(); return m_cachedToString; } protected: virtual ~MatcherUntypedBase(); virtual std::string describe() const = 0; mutable std::string m_cachedToString; private: MatcherUntypedBase& operator = ( MatcherUntypedBase const& ); }; template struct MatcherMethod { virtual bool match( ObjectT const& arg ) const = 0; }; template struct MatcherMethod { virtual bool match( PtrT* arg ) const = 0; }; template struct MatcherBase : MatcherUntypedBase, MatcherMethod { MatchAllOf operator && ( MatcherBase const& other ) const; MatchAnyOf operator || ( MatcherBase const& other ) const; MatchNotOf operator ! () const; }; template struct MatchAllOf : MatcherBase { virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if (!m_matchers[i]->match(arg)) return false; } return true; } virtual std::string describe() const CATCH_OVERRIDE { std::string description; description.reserve( 4 + m_matchers.size()*32 ); description += "( "; for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if( i != 0 ) description += " and "; description += m_matchers[i]->toString(); } description += " )"; return description; } MatchAllOf& operator && ( MatcherBase const& other ) { m_matchers.push_back( &other ); return *this; } std::vector const*> m_matchers; }; template struct MatchAnyOf : MatcherBase { virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if (m_matchers[i]->match(arg)) return true; } return false; } virtual std::string describe() const CATCH_OVERRIDE { std::string description; description.reserve( 4 + m_matchers.size()*32 ); description += "( "; for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if( i != 0 ) description += " or "; description += m_matchers[i]->toString(); } description += " )"; return description; } MatchAnyOf& operator || ( MatcherBase const& other ) { m_matchers.push_back( &other ); return *this; } std::vector const*> m_matchers; }; template struct MatchNotOf : MatcherBase { MatchNotOf( MatcherBase const& underlyingMatcher ) : m_underlyingMatcher( underlyingMatcher ) {} virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { return !m_underlyingMatcher.match( arg ); } virtual std::string describe() const CATCH_OVERRIDE { return "not " + m_underlyingMatcher.toString(); } MatcherBase const& m_underlyingMatcher; }; template MatchAllOf MatcherBase::operator && ( MatcherBase const& other ) const { return MatchAllOf() && *this && other; } template MatchAnyOf MatcherBase::operator || ( MatcherBase const& other ) const { return MatchAnyOf() || *this || other; } template MatchNotOf MatcherBase::operator ! () const { return MatchNotOf( *this ); } } // namespace Impl // The following functions create the actual matcher objects. // This allows the types to be inferred // - deprecated: prefer ||, && and ! template inline Impl::MatchNotOf Not( Impl::MatcherBase const& underlyingMatcher ) { return Impl::MatchNotOf( underlyingMatcher ); } template inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { return Impl::MatchAllOf() && m1 && m2; } template inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { return Impl::MatchAllOf() && m1 && m2 && m3; } template inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { return Impl::MatchAnyOf() || m1 || m2; } template inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { return Impl::MatchAnyOf() || m1 || m2 || m3; } } // namespace Matchers using namespace Matchers; using Matchers::Impl::MatcherBase; } // namespace Catch namespace Catch { struct TestFailureException{}; template class ExpressionLhs; struct CopyableStream { CopyableStream() {} CopyableStream( CopyableStream const& other ) { oss << other.oss.str(); } CopyableStream& operator=( CopyableStream const& other ) { oss.str(std::string()); oss << other.oss.str(); return *this; } std::ostringstream oss; }; class ResultBuilder : public DecomposedExpression { public: ResultBuilder( char const* macroName, SourceLineInfo const& lineInfo, char const* capturedExpression, ResultDisposition::Flags resultDisposition, char const* secondArg = "" ); ~ResultBuilder(); template ExpressionLhs operator <= ( T const& operand ); ExpressionLhs operator <= ( bool value ); template ResultBuilder& operator << ( T const& value ) { m_stream().oss << value; return *this; } ResultBuilder& setResultType( ResultWas::OfType result ); ResultBuilder& setResultType( bool result ); void endExpression( DecomposedExpression const& expr ); virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE; AssertionResult build() const; AssertionResult build( DecomposedExpression const& expr ) const; void useActiveException( ResultDisposition::Flags resultDisposition = ResultDisposition::Normal ); void captureResult( ResultWas::OfType resultType ); void captureExpression(); void captureExpectedException( std::string const& expectedMessage ); void captureExpectedException( Matchers::Impl::MatcherBase const& matcher ); void handleResult( AssertionResult const& result ); void react(); bool shouldDebugBreak() const; bool allowThrows() const; template void captureMatch( ArgT const& arg, MatcherT const& matcher, char const* matcherString ); void setExceptionGuard(); void unsetExceptionGuard(); private: AssertionInfo m_assertionInfo; AssertionResultData m_data; static CopyableStream &m_stream() { static CopyableStream s; return s; } bool m_shouldDebugBreak; bool m_shouldThrow; bool m_guardException; }; } // namespace Catch // Include after due to circular dependency: // #included from: catch_expression_lhs.hpp #define TWOBLUECUBES_CATCH_EXPRESSION_LHS_HPP_INCLUDED // #included from: catch_evaluate.hpp #define TWOBLUECUBES_CATCH_EVALUATE_HPP_INCLUDED #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable:4389) // '==' : signed/unsigned mismatch # pragma warning(disable:4312) // Converting int to T* using reinterpret_cast (issue on x64 platform) #endif #include namespace Catch { namespace Internal { enum Operator { IsEqualTo, IsNotEqualTo, IsLessThan, IsGreaterThan, IsLessThanOrEqualTo, IsGreaterThanOrEqualTo }; template struct OperatorTraits { static const char* getName(){ return "*error*"; } }; template<> struct OperatorTraits { static const char* getName(){ return "=="; } }; template<> struct OperatorTraits { static const char* getName(){ return "!="; } }; template<> struct OperatorTraits { static const char* getName(){ return "<"; } }; template<> struct OperatorTraits { static const char* getName(){ return ">"; } }; template<> struct OperatorTraits { static const char* getName(){ return "<="; } }; template<> struct OperatorTraits{ static const char* getName(){ return ">="; } }; template inline T& opCast(T const& t) { return const_cast(t); } // nullptr_t support based on pull request #154 from Konstantin Baumann #ifdef CATCH_CONFIG_CPP11_NULLPTR inline std::nullptr_t opCast(std::nullptr_t) { return nullptr; } #endif // CATCH_CONFIG_CPP11_NULLPTR // So the compare overloads can be operator agnostic we convey the operator as a template // enum, which is used to specialise an Evaluator for doing the comparison. template class Evaluator{}; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs) { return bool( opCast( lhs ) == opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) != opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) < opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) > opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) >= opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) <= opCast( rhs ) ); } }; template bool applyEvaluator( T1 const& lhs, T2 const& rhs ) { return Evaluator::evaluate( lhs, rhs ); } // This level of indirection allows us to specialise for integer types // to avoid signed/ unsigned warnings // "base" overload template bool compare( T1 const& lhs, T2 const& rhs ) { return Evaluator::evaluate( lhs, rhs ); } // unsigned X to int template bool compare( unsigned int lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned long lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned char lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } // unsigned X to long template bool compare( unsigned int lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned long lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned char lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } // int to unsigned X template bool compare( int lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( int lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( int lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // long to unsigned X template bool compare( long lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // pointer to long (when comparing against NULL) template bool compare( long lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, long rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } // pointer to int (when comparing against NULL) template bool compare( int lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, int rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } #ifdef CATCH_CONFIG_CPP11_LONG_LONG // long long to unsigned X template bool compare( long long lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned long long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // unsigned long long to X template bool compare( unsigned long long lhs, int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, long long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // pointer to long long (when comparing against NULL) template bool compare( long long lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, long long rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } #endif // CATCH_CONFIG_CPP11_LONG_LONG #ifdef CATCH_CONFIG_CPP11_NULLPTR // pointer to nullptr_t (when comparing against nullptr) template bool compare( std::nullptr_t, T* rhs ) { return Evaluator::evaluate( nullptr, rhs ); } template bool compare( T* lhs, std::nullptr_t ) { return Evaluator::evaluate( lhs, nullptr ); } #endif // CATCH_CONFIG_CPP11_NULLPTR } // end of namespace Internal } // end of namespace Catch #ifdef _MSC_VER # pragma warning(pop) #endif // #included from: catch_tostring.h #define TWOBLUECUBES_CATCH_TOSTRING_H_INCLUDED #include #include #include #include #include #ifdef __OBJC__ // #included from: catch_objc_arc.hpp #define TWOBLUECUBES_CATCH_OBJC_ARC_HPP_INCLUDED #import #ifdef __has_feature #define CATCH_ARC_ENABLED __has_feature(objc_arc) #else #define CATCH_ARC_ENABLED 0 #endif void arcSafeRelease( NSObject* obj ); id performOptionalSelector( id obj, SEL sel ); #if !CATCH_ARC_ENABLED inline void arcSafeRelease( NSObject* obj ) { [obj release]; } inline id performOptionalSelector( id obj, SEL sel ) { if( [obj respondsToSelector: sel] ) return [obj performSelector: sel]; return nil; } #define CATCH_UNSAFE_UNRETAINED #define CATCH_ARC_STRONG #else inline void arcSafeRelease( NSObject* ){} inline id performOptionalSelector( id obj, SEL sel ) { #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Warc-performSelector-leaks" #endif if( [obj respondsToSelector: sel] ) return [obj performSelector: sel]; #ifdef __clang__ # pragma clang diagnostic pop #endif return nil; } #define CATCH_UNSAFE_UNRETAINED __unsafe_unretained #define CATCH_ARC_STRONG __strong #endif #endif #ifdef CATCH_CONFIG_CPP11_TUPLE #include #endif #ifdef CATCH_CONFIG_CPP11_IS_ENUM #include #endif namespace Catch { // Why we're here. template std::string toString( T const& value ); // Built in overloads std::string toString( std::string const& value ); std::string toString( std::wstring const& value ); std::string toString( const char* const value ); std::string toString( char* const value ); std::string toString( const wchar_t* const value ); std::string toString( wchar_t* const value ); std::string toString( int value ); std::string toString( unsigned long value ); std::string toString( unsigned int value ); std::string toString( const double value ); std::string toString( const float value ); std::string toString( bool value ); std::string toString( char value ); std::string toString( signed char value ); std::string toString( unsigned char value ); #ifdef CATCH_CONFIG_CPP11_LONG_LONG std::string toString( long long value ); std::string toString( unsigned long long value ); #endif #ifdef CATCH_CONFIG_CPP11_NULLPTR std::string toString( std::nullptr_t ); #endif #ifdef __OBJC__ std::string toString( NSString const * const& nsstring ); std::string toString( NSString * CATCH_ARC_STRONG & nsstring ); std::string toString( NSObject* const& nsObject ); #endif namespace Detail { extern const std::string unprintableString; #if !defined(CATCH_CONFIG_CPP11_STREAM_INSERTABLE_CHECK) struct BorgType { template BorgType( T const& ); }; struct TrueType { char sizer[1]; }; struct FalseType { char sizer[2]; }; TrueType& testStreamable( std::ostream& ); FalseType testStreamable( FalseType ); FalseType operator<<( std::ostream const&, BorgType const& ); template struct IsStreamInsertable { static std::ostream &s; static T const&t; enum { value = sizeof( testStreamable(s << t) ) == sizeof( TrueType ) }; }; #else template class IsStreamInsertable { template static auto test(int) -> decltype( std::declval() << std::declval(), std::true_type() ); template static auto test(...) -> std::false_type; public: static const bool value = decltype(test(0))::value; }; #endif #if defined(CATCH_CONFIG_CPP11_IS_ENUM) template::value > struct EnumStringMaker { static std::string convert( T const& ) { return unprintableString; } }; template struct EnumStringMaker { static std::string convert( T const& v ) { return ::Catch::toString( static_cast::type>(v) ); } }; #endif template struct StringMakerBase { #if defined(CATCH_CONFIG_CPP11_IS_ENUM) template static std::string convert( T const& v ) { return EnumStringMaker::convert( v ); } #else template static std::string convert( T const& ) { return unprintableString; } #endif }; template<> struct StringMakerBase { template static std::string convert( T const& _value ) { std::ostringstream oss; oss << _value; return oss.str(); } }; std::string rawMemoryToString( const void *object, std::size_t size ); template inline std::string rawMemoryToString( const T& object ) { return rawMemoryToString( &object, sizeof(object) ); } } // end namespace Detail template struct StringMaker : Detail::StringMakerBase::value> {}; template struct StringMaker { template static std::string convert( U* p ) { if( !p ) return "NULL"; else return Detail::rawMemoryToString( p ); } }; template struct StringMaker { static std::string convert( R C::* p ) { if( !p ) return "NULL"; else return Detail::rawMemoryToString( p ); } }; namespace Detail { template std::string rangeToString( InputIterator first, InputIterator last ); } //template //struct StringMaker > { // static std::string convert( std::vector const& v ) { // return Detail::rangeToString( v.begin(), v.end() ); // } //}; template std::string toString( std::vector const& v ) { return Detail::rangeToString( v.begin(), v.end() ); } #ifdef CATCH_CONFIG_CPP11_TUPLE // toString for tuples namespace TupleDetail { template< typename Tuple, std::size_t N = 0, bool = (N < std::tuple_size::value) > struct ElementPrinter { static void print( const Tuple& tuple, std::ostream& os ) { os << ( N ? ", " : " " ) << Catch::toString(std::get(tuple)); ElementPrinter::print(tuple,os); } }; template< typename Tuple, std::size_t N > struct ElementPrinter { static void print( const Tuple&, std::ostream& ) {} }; } template struct StringMaker> { static std::string convert( const std::tuple& tuple ) { std::ostringstream os; os << '{'; TupleDetail::ElementPrinter>::print( tuple, os ); os << " }"; return os.str(); } }; #endif // CATCH_CONFIG_CPP11_TUPLE namespace Detail { template std::string makeString( T const& value ) { return StringMaker::convert( value ); } } // end namespace Detail /// \brief converts any type to a string /// /// The default template forwards on to ostringstream - except when an /// ostringstream overload does not exist - in which case it attempts to detect /// that and writes {?}. /// Overload (not specialise) this template for custom typs that you don't want /// to provide an ostream overload for. template std::string toString( T const& value ) { return StringMaker::convert( value ); } namespace Detail { template std::string rangeToString( InputIterator first, InputIterator last ) { std::ostringstream oss; oss << "{ "; if( first != last ) { oss << Catch::toString( *first ); for( ++first ; first != last ; ++first ) oss << ", " << Catch::toString( *first ); } oss << " }"; return oss.str(); } } } // end namespace Catch namespace Catch { template class BinaryExpression; template class MatchExpression; // Wraps the LHS of an expression and overloads comparison operators // for also capturing those and RHS (if any) template class ExpressionLhs : public DecomposedExpression { public: ExpressionLhs( ResultBuilder& rb, T lhs ) : m_rb( rb ), m_lhs( lhs ), m_truthy(false) {} ExpressionLhs& operator = ( const ExpressionLhs& ); template BinaryExpression operator == ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator != ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator < ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator > ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator <= ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator >= ( RhsT const& rhs ) { return captureExpression( rhs ); } BinaryExpression operator == ( bool rhs ) { return captureExpression( rhs ); } BinaryExpression operator != ( bool rhs ) { return captureExpression( rhs ); } void endExpression() { m_truthy = m_lhs ? true : false; m_rb .setResultType( m_truthy ) .endExpression( *this ); } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { dest = Catch::toString( m_lhs ); } private: template BinaryExpression captureExpression( RhsT& rhs ) const { return BinaryExpression( m_rb, m_lhs, rhs ); } template BinaryExpression captureExpression( bool rhs ) const { return BinaryExpression( m_rb, m_lhs, rhs ); } private: ResultBuilder& m_rb; T m_lhs; bool m_truthy; }; template class BinaryExpression : public DecomposedExpression { public: BinaryExpression( ResultBuilder& rb, LhsT lhs, RhsT rhs ) : m_rb( rb ), m_lhs( lhs ), m_rhs( rhs ) {} BinaryExpression& operator = ( BinaryExpression& ); void endExpression() const { m_rb .setResultType( Internal::compare( m_lhs, m_rhs ) ) .endExpression( *this ); } virtual bool isBinaryExpression() const CATCH_OVERRIDE { return true; } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { std::string lhs = Catch::toString( m_lhs ); std::string rhs = Catch::toString( m_rhs ); char delim = lhs.size() + rhs.size() < 40 && lhs.find('\n') == std::string::npos && rhs.find('\n') == std::string::npos ? ' ' : '\n'; dest.reserve( 7 + lhs.size() + rhs.size() ); // 2 for spaces around operator // 2 for operator // 2 for parentheses (conditionally added later) // 1 for negation (conditionally added later) dest = lhs; dest += delim; dest += Internal::OperatorTraits::getName(); dest += delim; dest += rhs; } private: ResultBuilder& m_rb; LhsT m_lhs; RhsT m_rhs; }; template class MatchExpression : public DecomposedExpression { public: MatchExpression( ArgT arg, MatcherT matcher, char const* matcherString ) : m_arg( arg ), m_matcher( matcher ), m_matcherString( matcherString ) {} virtual bool isBinaryExpression() const CATCH_OVERRIDE { return true; } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { std::string matcherAsString = m_matcher.toString(); dest = Catch::toString( m_arg ); dest += ' '; if( matcherAsString == Detail::unprintableString ) dest += m_matcherString; else dest += matcherAsString; } private: ArgT m_arg; MatcherT m_matcher; char const* m_matcherString; }; } // end namespace Catch namespace Catch { template inline ExpressionLhs ResultBuilder::operator <= ( T const& operand ) { return ExpressionLhs( *this, operand ); } inline ExpressionLhs ResultBuilder::operator <= ( bool value ) { return ExpressionLhs( *this, value ); } template inline void ResultBuilder::captureMatch( ArgT const& arg, MatcherT const& matcher, char const* matcherString ) { MatchExpression expr( arg, matcher, matcherString ); setResultType( matcher.match( arg ) ); endExpression( expr ); } } // namespace Catch // #included from: catch_message.h #define TWOBLUECUBES_CATCH_MESSAGE_H_INCLUDED #include namespace Catch { struct MessageInfo { MessageInfo( std::string const& _macroName, SourceLineInfo const& _lineInfo, ResultWas::OfType _type ); std::string macroName; SourceLineInfo lineInfo; ResultWas::OfType type; std::string message; unsigned int sequence; bool operator == ( MessageInfo const& other ) const { return sequence == other.sequence; } bool operator < ( MessageInfo const& other ) const { return sequence < other.sequence; } private: static unsigned int globalCount; }; struct MessageBuilder { MessageBuilder( std::string const& macroName, SourceLineInfo const& lineInfo, ResultWas::OfType type ) : m_info( macroName, lineInfo, type ) {} template MessageBuilder& operator << ( T const& value ) { m_stream << value; return *this; } MessageInfo m_info; std::ostringstream m_stream; }; class ScopedMessage { public: ScopedMessage( MessageBuilder const& builder ); ScopedMessage( ScopedMessage const& other ); ~ScopedMessage(); MessageInfo m_info; }; } // end namespace Catch // #included from: catch_interfaces_capture.h #define TWOBLUECUBES_CATCH_INTERFACES_CAPTURE_H_INCLUDED #include namespace Catch { class TestCase; class AssertionResult; struct AssertionInfo; struct SectionInfo; struct SectionEndInfo; struct MessageInfo; class ScopedMessageBuilder; struct Counts; struct IResultCapture { virtual ~IResultCapture(); virtual void assertionEnded( AssertionResult const& result ) = 0; virtual bool sectionStarted( SectionInfo const& sectionInfo, Counts& assertions ) = 0; virtual void sectionEnded( SectionEndInfo const& endInfo ) = 0; virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) = 0; virtual void pushScopedMessage( MessageInfo const& message ) = 0; virtual void popScopedMessage( MessageInfo const& message ) = 0; virtual std::string getCurrentTestName() const = 0; virtual const AssertionResult* getLastResult() const = 0; virtual void exceptionEarlyReported() = 0; virtual void handleFatalErrorCondition( std::string const& message ) = 0; }; IResultCapture& getResultCapture(); } // #included from: catch_debugger.h #define TWOBLUECUBES_CATCH_DEBUGGER_H_INCLUDED // #included from: catch_platform.h #define TWOBLUECUBES_CATCH_PLATFORM_H_INCLUDED #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # define CATCH_PLATFORM_MAC #elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED) # define CATCH_PLATFORM_IPHONE #elif defined(linux) || defined(__linux) || defined(__linux__) # define CATCH_PLATFORM_LINUX #elif defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) # define CATCH_PLATFORM_WINDOWS # if !defined(NOMINMAX) && !defined(CATCH_CONFIG_NO_NOMINMAX) # define CATCH_DEFINES_NOMINMAX # endif # if !defined(WIN32_LEAN_AND_MEAN) && !defined(CATCH_CONFIG_NO_WIN32_LEAN_AND_MEAN) # define CATCH_DEFINES_WIN32_LEAN_AND_MEAN # endif #endif #include namespace Catch{ bool isDebuggerActive(); void writeToDebugConsole( std::string const& text ); } #ifdef CATCH_PLATFORM_MAC // The following code snippet based on: // http://cocoawithlove.com/2008/03/break-into-debugger.html #if defined(__ppc64__) || defined(__ppc__) #define CATCH_TRAP() \ __asm__("li r0, 20\nsc\nnop\nli r0, 37\nli r4, 2\nsc\nnop\n" \ : : : "memory","r0","r3","r4" ) // backported from Catch2 // revision b9853b4b356b83bb580c746c3a1f11101f9af54f // src/catch2/internal/catch_debugger.hpp #elif defined(__i386__) || defined(__x86_64__) #define CATCH_TRAP() __asm__("int $3\n" : : ) /* NOLINT */ #elif defined(__aarch64__) #define CATCH_TRAP() __asm__(".inst 0xd4200000") #endif #elif defined(CATCH_PLATFORM_LINUX) // If we can use inline assembler, do it because this allows us to break // directly at the location of the failing check instead of breaking inside // raise() called from it, i.e. one stack frame below. #if defined(__GNUC__) && (defined(__i386) || defined(__x86_64)) #define CATCH_TRAP() asm volatile ("int $3") #else // Fall back to the generic way. #include #define CATCH_TRAP() raise(SIGTRAP) #endif #elif defined(_MSC_VER) #define CATCH_TRAP() __debugbreak() #elif defined(__MINGW32__) extern "C" __declspec(dllimport) void __stdcall DebugBreak(); #define CATCH_TRAP() DebugBreak() #endif #ifdef CATCH_TRAP #define CATCH_BREAK_INTO_DEBUGGER() if( Catch::isDebuggerActive() ) { CATCH_TRAP(); } #else #define CATCH_BREAK_INTO_DEBUGGER() Catch::alwaysTrue(); #endif // #included from: catch_interfaces_runner.h #define TWOBLUECUBES_CATCH_INTERFACES_RUNNER_H_INCLUDED namespace Catch { class TestCase; struct IRunner { virtual ~IRunner(); virtual bool aborting() const = 0; }; } #if defined(CATCH_CONFIG_FAST_COMPILE) /////////////////////////////////////////////////////////////////////////////// // We can speedup compilation significantly by breaking into debugger lower in // the callstack, because then we don't have to expand CATCH_BREAK_INTO_DEBUGGER // macro in each assertion #define INTERNAL_CATCH_REACT( resultBuilder ) \ resultBuilder.react(); /////////////////////////////////////////////////////////////////////////////// // Another way to speed-up compilation is to omit local try-catch for REQUIRE* // macros. // This can potentially cause false negative, if the test code catches // the exception before it propagates back up to the runner. #define INTERNAL_CATCH_TEST_NO_TRY( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ __catchResult.setExceptionGuard(); \ CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ ( __catchResult <= expr ).endExpression(); \ CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ __catchResult.unsetExceptionGuard(); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look // The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. #define INTERNAL_CHECK_THAT_NO_TRY( macroName, matcher, resultDisposition, arg ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ __catchResult.setExceptionGuard(); \ __catchResult.captureMatch( arg, matcher, #matcher ); \ __catchResult.unsetExceptionGuard(); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #else /////////////////////////////////////////////////////////////////////////////// // In the event of a failure works out if the debugger needs to be invoked // and/or an exception thrown and takes appropriate action. // This needs to be done as a macro so the debugger will stop in the user // source code rather than in Catch library code #define INTERNAL_CATCH_REACT( resultBuilder ) \ if( resultBuilder.shouldDebugBreak() ) CATCH_BREAK_INTO_DEBUGGER(); \ resultBuilder.react(); #endif /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ try { \ CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ ( __catchResult <= expr ).endExpression(); \ CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look // The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_IF( macroName, resultDisposition, expr ) \ INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ if( Catch::getResultCapture().getLastResult()->succeeded() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_ELSE( macroName, resultDisposition, expr ) \ INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ if( !Catch::getResultCapture().getLastResult()->succeeded() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_NO_THROW( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_THROWS( macroName, resultDisposition, matcher, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition, #matcher ); \ if( __catchResult.allowThrows() ) \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ } \ catch( ... ) { \ __catchResult.captureExpectedException( matcher ); \ } \ else \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_THROWS_AS( macroName, exceptionType, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr ", " #exceptionType, resultDisposition ); \ if( __catchResult.allowThrows() ) \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ } \ catch( const exceptionType& ) { \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ else \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #ifdef CATCH_CONFIG_VARIADIC_MACROS #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, ... ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ __catchResult << __VA_ARGS__ + ::Catch::StreamEndStop(); \ __catchResult.captureResult( messageType ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #else #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, log ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ __catchResult << log + ::Catch::StreamEndStop(); \ __catchResult.captureResult( messageType ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #endif /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_INFO( macroName, log ) \ Catch::ScopedMessage INTERNAL_CATCH_UNIQUE_NAME( scopedMessage ) = Catch::MessageBuilder( macroName, CATCH_INTERNAL_LINEINFO, Catch::ResultWas::Info ) << log; /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CHECK_THAT( macroName, matcher, resultDisposition, arg ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ try { \ __catchResult.captureMatch( arg, matcher, #matcher ); \ } catch( ... ) { \ __catchResult.useActiveException( resultDisposition | Catch::ResultDisposition::ContinueOnFailure ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) // #included from: internal/catch_section.h #define TWOBLUECUBES_CATCH_SECTION_H_INCLUDED // #included from: catch_section_info.h #define TWOBLUECUBES_CATCH_SECTION_INFO_H_INCLUDED // #included from: catch_totals.hpp #define TWOBLUECUBES_CATCH_TOTALS_HPP_INCLUDED #include namespace Catch { struct Counts { Counts() : passed( 0 ), failed( 0 ), failedButOk( 0 ) {} Counts operator - ( Counts const& other ) const { Counts diff; diff.passed = passed - other.passed; diff.failed = failed - other.failed; diff.failedButOk = failedButOk - other.failedButOk; return diff; } Counts& operator += ( Counts const& other ) { passed += other.passed; failed += other.failed; failedButOk += other.failedButOk; return *this; } std::size_t total() const { return passed + failed + failedButOk; } bool allPassed() const { return failed == 0 && failedButOk == 0; } bool allOk() const { return failed == 0; } std::size_t passed; std::size_t failed; std::size_t failedButOk; }; struct Totals { Totals operator - ( Totals const& other ) const { Totals diff; diff.assertions = assertions - other.assertions; diff.testCases = testCases - other.testCases; return diff; } Totals delta( Totals const& prevTotals ) const { Totals diff = *this - prevTotals; if( diff.assertions.failed > 0 ) ++diff.testCases.failed; else if( diff.assertions.failedButOk > 0 ) ++diff.testCases.failedButOk; else ++diff.testCases.passed; return diff; } Totals& operator += ( Totals const& other ) { assertions += other.assertions; testCases += other.testCases; return *this; } Counts assertions; Counts testCases; }; } #include namespace Catch { struct SectionInfo { SectionInfo ( SourceLineInfo const& _lineInfo, std::string const& _name, std::string const& _description = std::string() ); std::string name; std::string description; SourceLineInfo lineInfo; }; struct SectionEndInfo { SectionEndInfo( SectionInfo const& _sectionInfo, Counts const& _prevAssertions, double _durationInSeconds ) : sectionInfo( _sectionInfo ), prevAssertions( _prevAssertions ), durationInSeconds( _durationInSeconds ) {} SectionInfo sectionInfo; Counts prevAssertions; double durationInSeconds; }; } // end namespace Catch // #included from: catch_timer.h #define TWOBLUECUBES_CATCH_TIMER_H_INCLUDED #ifdef _MSC_VER namespace Catch { typedef unsigned long long UInt64; } #else #include namespace Catch { typedef uint64_t UInt64; } #endif namespace Catch { class Timer { public: Timer() : m_ticks( 0 ) {} void start(); unsigned int getElapsedMicroseconds() const; unsigned int getElapsedMilliseconds() const; double getElapsedSeconds() const; private: UInt64 m_ticks; }; } // namespace Catch #include namespace Catch { class Section : NonCopyable { public: Section( SectionInfo const& info ); ~Section(); // This indicates whether the section should be executed or not operator bool() const; private: SectionInfo m_info; std::string m_name; Counts m_assertions; bool m_sectionIncluded; Timer m_timer; }; } // end namespace Catch #ifdef CATCH_CONFIG_VARIADIC_MACROS #define INTERNAL_CATCH_SECTION( ... ) \ if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, __VA_ARGS__ ) ) #else #define INTERNAL_CATCH_SECTION( name, desc ) \ if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, name, desc ) ) #endif // #included from: internal/catch_generators.hpp #define TWOBLUECUBES_CATCH_GENERATORS_HPP_INCLUDED #include #include #include namespace Catch { template struct IGenerator { virtual ~IGenerator() {} virtual T getValue( std::size_t index ) const = 0; virtual std::size_t size () const = 0; }; template class BetweenGenerator : public IGenerator { public: BetweenGenerator( T from, T to ) : m_from( from ), m_to( to ){} virtual T getValue( std::size_t index ) const { return m_from+static_cast( index ); } virtual std::size_t size() const { return static_cast( 1+m_to-m_from ); } private: T m_from; T m_to; }; template class ValuesGenerator : public IGenerator { public: ValuesGenerator(){} void add( T value ) { m_values.push_back( value ); } virtual T getValue( std::size_t index ) const { return m_values[index]; } virtual std::size_t size() const { return m_values.size(); } private: std::vector m_values; }; template class CompositeGenerator { public: CompositeGenerator() : m_totalSize( 0 ) {} // *** Move semantics, similar to auto_ptr *** CompositeGenerator( CompositeGenerator& other ) : m_fileInfo( other.m_fileInfo ), m_totalSize( 0 ) { move( other ); } CompositeGenerator& setFileInfo( const char* fileInfo ) { m_fileInfo = fileInfo; return *this; } ~CompositeGenerator() { deleteAll( m_composed ); } operator T () const { size_t overallIndex = getCurrentContext().getGeneratorIndex( m_fileInfo, m_totalSize ); typename std::vector*>::const_iterator it = m_composed.begin(); typename std::vector*>::const_iterator itEnd = m_composed.end(); for( size_t index = 0; it != itEnd; ++it ) { const IGenerator* generator = *it; if( overallIndex >= index && overallIndex < index + generator->size() ) { return generator->getValue( overallIndex-index ); } index += generator->size(); } CATCH_INTERNAL_ERROR( "Indexed past end of generated range" ); return T(); // Suppress spurious "not all control paths return a value" warning in Visual Studio - if you know how to fix this please do so } void add( const IGenerator* generator ) { m_totalSize += generator->size(); m_composed.push_back( generator ); } CompositeGenerator& then( CompositeGenerator& other ) { move( other ); return *this; } CompositeGenerator& then( T value ) { ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( value ); add( valuesGen ); return *this; } private: void move( CompositeGenerator& other ) { m_composed.insert( m_composed.end(), other.m_composed.begin(), other.m_composed.end() ); m_totalSize += other.m_totalSize; other.m_composed.clear(); } std::vector*> m_composed; std::string m_fileInfo; size_t m_totalSize; }; namespace Generators { template CompositeGenerator between( T from, T to ) { CompositeGenerator generators; generators.add( new BetweenGenerator( from, to ) ); return generators; } template CompositeGenerator values( T val1, T val2 ) { CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); generators.add( valuesGen ); return generators; } template CompositeGenerator values( T val1, T val2, T val3 ){ CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); valuesGen->add( val3 ); generators.add( valuesGen ); return generators; } template CompositeGenerator values( T val1, T val2, T val3, T val4 ) { CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); valuesGen->add( val3 ); valuesGen->add( val4 ); generators.add( valuesGen ); return generators; } } // end namespace Generators using namespace Generators; } // end namespace Catch #define INTERNAL_CATCH_LINESTR2( line ) #line #define INTERNAL_CATCH_LINESTR( line ) INTERNAL_CATCH_LINESTR2( line ) #define INTERNAL_CATCH_GENERATE( expr ) expr.setFileInfo( __FILE__ "(" INTERNAL_CATCH_LINESTR( __LINE__ ) ")" ) // #included from: internal/catch_interfaces_exception.h #define TWOBLUECUBES_CATCH_INTERFACES_EXCEPTION_H_INCLUDED #include #include // #included from: catch_interfaces_registry_hub.h #define TWOBLUECUBES_CATCH_INTERFACES_REGISTRY_HUB_H_INCLUDED #include namespace Catch { class TestCase; struct ITestCaseRegistry; struct IExceptionTranslatorRegistry; struct IExceptionTranslator; struct IReporterRegistry; struct IReporterFactory; struct ITagAliasRegistry; struct IRegistryHub { virtual ~IRegistryHub(); virtual IReporterRegistry const& getReporterRegistry() const = 0; virtual ITestCaseRegistry const& getTestCaseRegistry() const = 0; virtual ITagAliasRegistry const& getTagAliasRegistry() const = 0; virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() = 0; }; struct IMutableRegistryHub { virtual ~IMutableRegistryHub(); virtual void registerReporter( std::string const& name, Ptr const& factory ) = 0; virtual void registerListener( Ptr const& factory ) = 0; virtual void registerTest( TestCase const& testInfo ) = 0; virtual void registerTranslator( const IExceptionTranslator* translator ) = 0; virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) = 0; }; IRegistryHub& getRegistryHub(); IMutableRegistryHub& getMutableRegistryHub(); void cleanUp(); std::string translateActiveException(); } namespace Catch { typedef std::string(*exceptionTranslateFunction)(); struct IExceptionTranslator; typedef std::vector ExceptionTranslators; struct IExceptionTranslator { virtual ~IExceptionTranslator(); virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const = 0; }; struct IExceptionTranslatorRegistry { virtual ~IExceptionTranslatorRegistry(); virtual std::string translateActiveException() const = 0; }; class ExceptionTranslatorRegistrar { template class ExceptionTranslator : public IExceptionTranslator { public: ExceptionTranslator( std::string(*translateFunction)( T& ) ) : m_translateFunction( translateFunction ) {} virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const CATCH_OVERRIDE { try { if( it == itEnd ) throw; else return (*it)->translate( it+1, itEnd ); } catch( T& ex ) { return m_translateFunction( ex ); } } protected: std::string(*m_translateFunction)( T& ); }; public: template ExceptionTranslatorRegistrar( std::string(*translateFunction)( T& ) ) { getMutableRegistryHub().registerTranslator ( new ExceptionTranslator( translateFunction ) ); } }; } /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TRANSLATE_EXCEPTION2( translatorName, signature ) \ static std::string translatorName( signature ); \ namespace{ Catch::ExceptionTranslatorRegistrar INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionRegistrar )( &translatorName ); }\ static std::string translatorName( signature ) #define INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION2( INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionTranslator ), signature ) // #included from: internal/catch_approx.hpp #define TWOBLUECUBES_CATCH_APPROX_HPP_INCLUDED #include #include #if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) #include #endif namespace Catch { namespace Detail { class Approx { public: explicit Approx ( double value ) : m_epsilon( std::numeric_limits::epsilon()*100 ), m_margin( 0.0 ), m_scale( 1.0 ), m_value( value ) {} Approx( Approx const& other ) : m_epsilon( other.m_epsilon ), m_margin( other.m_margin ), m_scale( other.m_scale ), m_value( other.m_value ) {} static Approx custom() { return Approx( 0 ); } #if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) template ::value>::type> Approx operator()( T value ) { Approx approx( static_cast(value) ); approx.epsilon( m_epsilon ); approx.margin( m_margin ); approx.scale( m_scale ); return approx; } template ::value>::type> explicit Approx( T value ): Approx(static_cast(value)) {} template ::value>::type> friend bool operator == ( const T& lhs, Approx const& rhs ) { // Thanks to Richard Harris for his help refining this formula auto lhs_v = double(lhs); bool relativeOK = std::fabs(lhs_v - rhs.m_value) < rhs.m_epsilon * (rhs.m_scale + (std::max)(std::fabs(lhs_v), std::fabs(rhs.m_value))); if (relativeOK) { return true; } return std::fabs(lhs_v - rhs.m_value) < rhs.m_margin; } template ::value>::type> friend bool operator == ( Approx const& lhs, const T& rhs ) { return operator==( rhs, lhs ); } template ::value>::type> friend bool operator != ( T lhs, Approx const& rhs ) { return !operator==( lhs, rhs ); } template ::value>::type> friend bool operator != ( Approx const& lhs, T rhs ) { return !operator==( rhs, lhs ); } template ::value>::type> friend bool operator <= ( T lhs, Approx const& rhs ) { return double(lhs) < rhs.m_value || lhs == rhs; } template ::value>::type> friend bool operator <= ( Approx const& lhs, T rhs ) { return lhs.m_value < double(rhs) || lhs == rhs; } template ::value>::type> friend bool operator >= ( T lhs, Approx const& rhs ) { return double(lhs) > rhs.m_value || lhs == rhs; } template ::value>::type> friend bool operator >= ( Approx const& lhs, T rhs ) { return lhs.m_value > double(rhs) || lhs == rhs; } template ::value>::type> Approx& epsilon( T newEpsilon ) { m_epsilon = double(newEpsilon); return *this; } template ::value>::type> Approx& margin( T newMargin ) { m_margin = double(newMargin); return *this; } template ::value>::type> Approx& scale( T newScale ) { m_scale = double(newScale); return *this; } #else Approx operator()( double value ) { Approx approx( value ); approx.epsilon( m_epsilon ); approx.margin( m_margin ); approx.scale( m_scale ); return approx; } friend bool operator == ( double lhs, Approx const& rhs ) { // Thanks to Richard Harris for his help refining this formula bool relativeOK = std::fabs( lhs - rhs.m_value ) < rhs.m_epsilon * (rhs.m_scale + (std::max)( std::fabs(lhs), std::fabs(rhs.m_value) ) ); if (relativeOK) { return true; } return std::fabs(lhs - rhs.m_value) < rhs.m_margin; } friend bool operator == ( Approx const& lhs, double rhs ) { return operator==( rhs, lhs ); } friend bool operator != ( double lhs, Approx const& rhs ) { return !operator==( lhs, rhs ); } friend bool operator != ( Approx const& lhs, double rhs ) { return !operator==( rhs, lhs ); } friend bool operator <= ( double lhs, Approx const& rhs ) { return lhs < rhs.m_value || lhs == rhs; } friend bool operator <= ( Approx const& lhs, double rhs ) { return lhs.m_value < rhs || lhs == rhs; } friend bool operator >= ( double lhs, Approx const& rhs ) { return lhs > rhs.m_value || lhs == rhs; } friend bool operator >= ( Approx const& lhs, double rhs ) { return lhs.m_value > rhs || lhs == rhs; } Approx& epsilon( double newEpsilon ) { m_epsilon = newEpsilon; return *this; } Approx& margin( double newMargin ) { m_margin = newMargin; return *this; } Approx& scale( double newScale ) { m_scale = newScale; return *this; } #endif std::string toString() const { std::ostringstream oss; oss << "Approx( " << Catch::toString( m_value ) << " )"; return oss.str(); } private: double m_epsilon; double m_margin; double m_scale; double m_value; }; } template<> inline std::string toString( Detail::Approx const& value ) { return value.toString(); } } // end namespace Catch // #included from: internal/catch_matchers_string.h #define TWOBLUECUBES_CATCH_MATCHERS_STRING_H_INCLUDED namespace Catch { namespace Matchers { namespace StdString { struct CasedString { CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ); std::string adjustString( std::string const& str ) const; std::string caseSensitivitySuffix() const; CaseSensitive::Choice m_caseSensitivity; std::string m_str; }; struct StringMatcherBase : MatcherBase { StringMatcherBase( std::string const& operation, CasedString const& comparator ); virtual std::string describe() const CATCH_OVERRIDE; CasedString m_comparator; std::string m_operation; }; struct EqualsMatcher : StringMatcherBase { EqualsMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct ContainsMatcher : StringMatcherBase { ContainsMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct StartsWithMatcher : StringMatcherBase { StartsWithMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct EndsWithMatcher : StringMatcherBase { EndsWithMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; } // namespace StdString // The following functions create the actual matcher objects. // This allows the types to be inferred StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); } // namespace Matchers } // namespace Catch // #included from: internal/catch_matchers_vector.h #define TWOBLUECUBES_CATCH_MATCHERS_VECTOR_H_INCLUDED namespace Catch { namespace Matchers { namespace Vector { template struct ContainsElementMatcher : MatcherBase, T> { ContainsElementMatcher(T const &comparator) : m_comparator( comparator) {} bool match(std::vector const &v) const CATCH_OVERRIDE { return std::find(v.begin(), v.end(), m_comparator) != v.end(); } virtual std::string describe() const CATCH_OVERRIDE { return "Contains: " + Catch::toString( m_comparator ); } T const& m_comparator; }; template struct ContainsMatcher : MatcherBase, std::vector > { ContainsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} bool match(std::vector const &v) const CATCH_OVERRIDE { // !TBD: see note in EqualsMatcher if (m_comparator.size() > v.size()) return false; for (size_t i = 0; i < m_comparator.size(); ++i) if (std::find(v.begin(), v.end(), m_comparator[i]) == v.end()) return false; return true; } virtual std::string describe() const CATCH_OVERRIDE { return "Contains: " + Catch::toString( m_comparator ); } std::vector const& m_comparator; }; template struct EqualsMatcher : MatcherBase, std::vector > { EqualsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} bool match(std::vector const &v) const CATCH_OVERRIDE { // !TBD: This currently works if all elements can be compared using != // - a more general approach would be via a compare template that defaults // to using !=. but could be specialised for, e.g. std::vector etc // - then just call that directly if (m_comparator.size() != v.size()) return false; for (size_t i = 0; i < v.size(); ++i) if (m_comparator[i] != v[i]) return false; return true; } virtual std::string describe() const CATCH_OVERRIDE { return "Equals: " + Catch::toString( m_comparator ); } std::vector const& m_comparator; }; } // namespace Vector // The following functions create the actual matcher objects. // This allows the types to be inferred template Vector::ContainsMatcher Contains( std::vector const& comparator ) { return Vector::ContainsMatcher( comparator ); } template Vector::ContainsElementMatcher VectorContains( T const& comparator ) { return Vector::ContainsElementMatcher( comparator ); } template Vector::EqualsMatcher Equals( std::vector const& comparator ) { return Vector::EqualsMatcher( comparator ); } } // namespace Matchers } // namespace Catch // #included from: internal/catch_interfaces_tag_alias_registry.h #define TWOBLUECUBES_CATCH_INTERFACES_TAG_ALIAS_REGISTRY_H_INCLUDED // #included from: catch_tag_alias.h #define TWOBLUECUBES_CATCH_TAG_ALIAS_H_INCLUDED #include namespace Catch { struct TagAlias { TagAlias( std::string const& _tag, SourceLineInfo _lineInfo ) : tag( _tag ), lineInfo( _lineInfo ) {} std::string tag; SourceLineInfo lineInfo; }; struct RegistrarForTagAliases { RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ); }; } // end namespace Catch #define CATCH_REGISTER_TAG_ALIAS( alias, spec ) namespace{ Catch::RegistrarForTagAliases INTERNAL_CATCH_UNIQUE_NAME( AutoRegisterTagAlias )( alias, spec, CATCH_INTERNAL_LINEINFO ); } // #included from: catch_option.hpp #define TWOBLUECUBES_CATCH_OPTION_HPP_INCLUDED namespace Catch { // An optional type template class Option { public: Option() : nullableValue( CATCH_NULL ) {} Option( T const& _value ) : nullableValue( new( storage ) T( _value ) ) {} Option( Option const& _other ) : nullableValue( _other ? new( storage ) T( *_other ) : CATCH_NULL ) {} ~Option() { reset(); } Option& operator= ( Option const& _other ) { if( &_other != this ) { reset(); if( _other ) nullableValue = new( storage ) T( *_other ); } return *this; } Option& operator = ( T const& _value ) { reset(); nullableValue = new( storage ) T( _value ); return *this; } void reset() { if( nullableValue ) nullableValue->~T(); nullableValue = CATCH_NULL; } T& operator*() { return *nullableValue; } T const& operator*() const { return *nullableValue; } T* operator->() { return nullableValue; } const T* operator->() const { return nullableValue; } T valueOr( T const& defaultValue ) const { return nullableValue ? *nullableValue : defaultValue; } bool some() const { return nullableValue != CATCH_NULL; } bool none() const { return nullableValue == CATCH_NULL; } bool operator !() const { return nullableValue == CATCH_NULL; } operator SafeBool::type() const { return SafeBool::makeSafe( some() ); } private: T *nullableValue; union { char storage[sizeof(T)]; // These are here to force alignment for the storage long double dummy1; void (*dummy2)(); long double dummy3; #ifdef CATCH_CONFIG_CPP11_LONG_LONG long long dummy4; #endif }; }; } // end namespace Catch namespace Catch { struct ITagAliasRegistry { virtual ~ITagAliasRegistry(); virtual Option find( std::string const& alias ) const = 0; virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const = 0; static ITagAliasRegistry const& get(); }; } // end namespace Catch // These files are included here so the single_include script doesn't put them // in the conditionally compiled sections // #included from: internal/catch_test_case_info.h #define TWOBLUECUBES_CATCH_TEST_CASE_INFO_H_INCLUDED #include #include #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif namespace Catch { struct ITestCase; struct TestCaseInfo { enum SpecialProperties{ None = 0, IsHidden = 1 << 1, ShouldFail = 1 << 2, MayFail = 1 << 3, Throws = 1 << 4, NonPortable = 1 << 5 }; TestCaseInfo( std::string const& _name, std::string const& _className, std::string const& _description, std::set const& _tags, SourceLineInfo const& _lineInfo ); TestCaseInfo( TestCaseInfo const& other ); friend void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ); bool isHidden() const; bool throws() const; bool okToFail() const; bool expectedToFail() const; std::string name; std::string className; std::string description; std::set tags; std::set lcaseTags; std::string tagsAsString; SourceLineInfo lineInfo; SpecialProperties properties; }; class TestCase : public TestCaseInfo { public: TestCase( ITestCase* testCase, TestCaseInfo const& info ); TestCase( TestCase const& other ); TestCase withName( std::string const& _newName ) const; void invoke() const; TestCaseInfo const& getTestCaseInfo() const; void swap( TestCase& other ); bool operator == ( TestCase const& other ) const; bool operator < ( TestCase const& other ) const; TestCase& operator = ( TestCase const& other ); private: Ptr test; }; TestCase makeTestCase( ITestCase* testCase, std::string const& className, std::string const& name, std::string const& description, SourceLineInfo const& lineInfo ); } #ifdef __clang__ # pragma clang diagnostic pop #endif #ifdef __OBJC__ // #included from: internal/catch_objc.hpp #define TWOBLUECUBES_CATCH_OBJC_HPP_INCLUDED #import #include // NB. Any general catch headers included here must be included // in catch.hpp first to make sure they are included by the single // header for non obj-usage /////////////////////////////////////////////////////////////////////////////// // This protocol is really only here for (self) documenting purposes, since // all its methods are optional. @protocol OcFixture @optional -(void) setUp; -(void) tearDown; @end namespace Catch { class OcMethod : public SharedImpl { public: OcMethod( Class cls, SEL sel ) : m_cls( cls ), m_sel( sel ) {} virtual void invoke() const { id obj = [[m_cls alloc] init]; performOptionalSelector( obj, @selector(setUp) ); performOptionalSelector( obj, m_sel ); performOptionalSelector( obj, @selector(tearDown) ); arcSafeRelease( obj ); } private: virtual ~OcMethod() {} Class m_cls; SEL m_sel; }; namespace Detail{ inline std::string getAnnotation( Class cls, std::string const& annotationName, std::string const& testCaseName ) { NSString* selStr = [[NSString alloc] initWithFormat:@"Catch_%s_%s", annotationName.c_str(), testCaseName.c_str()]; SEL sel = NSSelectorFromString( selStr ); arcSafeRelease( selStr ); id value = performOptionalSelector( cls, sel ); if( value ) return [(NSString*)value UTF8String]; return ""; } } inline size_t registerTestMethods() { size_t noTestMethods = 0; int noClasses = objc_getClassList( CATCH_NULL, 0 ); Class* classes = (CATCH_UNSAFE_UNRETAINED Class *)malloc( sizeof(Class) * noClasses); objc_getClassList( classes, noClasses ); for( int c = 0; c < noClasses; c++ ) { Class cls = classes[c]; { u_int count; Method* methods = class_copyMethodList( cls, &count ); for( u_int m = 0; m < count ; m++ ) { SEL selector = method_getName(methods[m]); std::string methodName = sel_getName(selector); if( startsWith( methodName, "Catch_TestCase_" ) ) { std::string testCaseName = methodName.substr( 15 ); std::string name = Detail::getAnnotation( cls, "Name", testCaseName ); std::string desc = Detail::getAnnotation( cls, "Description", testCaseName ); const char* className = class_getName( cls ); getMutableRegistryHub().registerTest( makeTestCase( new OcMethod( cls, selector ), className, name.c_str(), desc.c_str(), SourceLineInfo() ) ); noTestMethods++; } } free(methods); } } return noTestMethods; } namespace Matchers { namespace Impl { namespace NSStringMatchers { struct StringHolder : MatcherBase{ StringHolder( NSString* substr ) : m_substr( [substr copy] ){} StringHolder( StringHolder const& other ) : m_substr( [other.m_substr copy] ){} StringHolder() { arcSafeRelease( m_substr ); } virtual bool match( NSString* arg ) const CATCH_OVERRIDE { return false; } NSString* m_substr; }; struct Equals : StringHolder { Equals( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const CATCH_OVERRIDE { return (str != nil || m_substr == nil ) && [str isEqualToString:m_substr]; } virtual std::string describe() const CATCH_OVERRIDE { return "equals string: " + Catch::toString( m_substr ); } }; struct Contains : StringHolder { Contains( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location != NSNotFound; } virtual std::string describe() const CATCH_OVERRIDE { return "contains string: " + Catch::toString( m_substr ); } }; struct StartsWith : StringHolder { StartsWith( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location == 0; } virtual std::string describe() const CATCH_OVERRIDE { return "starts with: " + Catch::toString( m_substr ); } }; struct EndsWith : StringHolder { EndsWith( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location == [str length] - [m_substr length]; } virtual std::string describe() const CATCH_OVERRIDE { return "ends with: " + Catch::toString( m_substr ); } }; } // namespace NSStringMatchers } // namespace Impl inline Impl::NSStringMatchers::Equals Equals( NSString* substr ){ return Impl::NSStringMatchers::Equals( substr ); } inline Impl::NSStringMatchers::Contains Contains( NSString* substr ){ return Impl::NSStringMatchers::Contains( substr ); } inline Impl::NSStringMatchers::StartsWith StartsWith( NSString* substr ){ return Impl::NSStringMatchers::StartsWith( substr ); } inline Impl::NSStringMatchers::EndsWith EndsWith( NSString* substr ){ return Impl::NSStringMatchers::EndsWith( substr ); } } // namespace Matchers using namespace Matchers; } // namespace Catch /////////////////////////////////////////////////////////////////////////////// #define OC_TEST_CASE( name, desc )\ +(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Name_test ) \ {\ return @ name; \ }\ +(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Description_test ) \ { \ return @ desc; \ } \ -(void) INTERNAL_CATCH_UNIQUE_NAME( Catch_TestCase_test ) #endif #ifdef CATCH_IMPL // !TBD: Move the leak detector code into a separate header #ifdef CATCH_CONFIG_WINDOWS_CRTDBG #include class LeakDetector { public: LeakDetector() { int flag = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG); flag |= _CRTDBG_LEAK_CHECK_DF; flag |= _CRTDBG_ALLOC_MEM_DF; _CrtSetDbgFlag(flag); _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); // Change this to leaking allocation's number to break there _CrtSetBreakAlloc(-1); } }; #else class LeakDetector {}; #endif LeakDetector leakDetector; // #included from: internal/catch_impl.hpp #define TWOBLUECUBES_CATCH_IMPL_HPP_INCLUDED // Collect all the implementation files together here // These are the equivalent of what would usually be cpp files #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wweak-vtables" #endif // #included from: ../catch_session.hpp #define TWOBLUECUBES_CATCH_RUNNER_HPP_INCLUDED // #included from: internal/catch_commandline.hpp #define TWOBLUECUBES_CATCH_COMMANDLINE_HPP_INCLUDED // #included from: catch_config.hpp #define TWOBLUECUBES_CATCH_CONFIG_HPP_INCLUDED // #included from: catch_test_spec_parser.hpp #define TWOBLUECUBES_CATCH_TEST_SPEC_PARSER_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif // #included from: catch_test_spec.hpp #define TWOBLUECUBES_CATCH_TEST_SPEC_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif // #included from: catch_wildcard_pattern.hpp #define TWOBLUECUBES_CATCH_WILDCARD_PATTERN_HPP_INCLUDED #include namespace Catch { class WildcardPattern { enum WildcardPosition { NoWildcard = 0, WildcardAtStart = 1, WildcardAtEnd = 2, WildcardAtBothEnds = WildcardAtStart | WildcardAtEnd }; public: WildcardPattern( std::string const& pattern, CaseSensitive::Choice caseSensitivity ) : m_caseSensitivity( caseSensitivity ), m_wildcard( NoWildcard ), m_pattern( adjustCase( pattern ) ) { if( startsWith( m_pattern, '*' ) ) { m_pattern = m_pattern.substr( 1 ); m_wildcard = WildcardAtStart; } if( endsWith( m_pattern, '*' ) ) { m_pattern = m_pattern.substr( 0, m_pattern.size()-1 ); m_wildcard = static_cast( m_wildcard | WildcardAtEnd ); } } virtual ~WildcardPattern(); virtual bool matches( std::string const& str ) const { switch( m_wildcard ) { case NoWildcard: return m_pattern == adjustCase( str ); case WildcardAtStart: return endsWith( adjustCase( str ), m_pattern ); case WildcardAtEnd: return startsWith( adjustCase( str ), m_pattern ); case WildcardAtBothEnds: return contains( adjustCase( str ), m_pattern ); } #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wunreachable-code" #endif throw std::logic_error( "Unknown enum" ); #ifdef __clang__ # pragma clang diagnostic pop #endif } private: std::string adjustCase( std::string const& str ) const { return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str; } CaseSensitive::Choice m_caseSensitivity; WildcardPosition m_wildcard; std::string m_pattern; }; } #include #include namespace Catch { class TestSpec { struct Pattern : SharedImpl<> { virtual ~Pattern(); virtual bool matches( TestCaseInfo const& testCase ) const = 0; }; class NamePattern : public Pattern { public: NamePattern( std::string const& name ) : m_wildcardPattern( toLower( name ), CaseSensitive::No ) {} virtual ~NamePattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return m_wildcardPattern.matches( toLower( testCase.name ) ); } private: WildcardPattern m_wildcardPattern; }; class TagPattern : public Pattern { public: TagPattern( std::string const& tag ) : m_tag( toLower( tag ) ) {} virtual ~TagPattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return testCase.lcaseTags.find( m_tag ) != testCase.lcaseTags.end(); } private: std::string m_tag; }; class ExcludedPattern : public Pattern { public: ExcludedPattern( Ptr const& underlyingPattern ) : m_underlyingPattern( underlyingPattern ) {} virtual ~ExcludedPattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return !m_underlyingPattern->matches( testCase ); } private: Ptr m_underlyingPattern; }; struct Filter { std::vector > m_patterns; bool matches( TestCaseInfo const& testCase ) const { // All patterns in a filter must match for the filter to be a match for( std::vector >::const_iterator it = m_patterns.begin(), itEnd = m_patterns.end(); it != itEnd; ++it ) { if( !(*it)->matches( testCase ) ) return false; } return true; } }; public: bool hasFilters() const { return !m_filters.empty(); } bool matches( TestCaseInfo const& testCase ) const { // A TestSpec matches if any filter matches for( std::vector::const_iterator it = m_filters.begin(), itEnd = m_filters.end(); it != itEnd; ++it ) if( it->matches( testCase ) ) return true; return false; } private: std::vector m_filters; friend class TestSpecParser; }; } #ifdef __clang__ # pragma clang diagnostic pop #endif namespace Catch { class TestSpecParser { enum Mode{ None, Name, QuotedName, Tag, EscapedName }; Mode m_mode; bool m_exclusion; std::size_t m_start, m_pos; std::string m_arg; std::vector m_escapeChars; TestSpec::Filter m_currentFilter; TestSpec m_testSpec; ITagAliasRegistry const* m_tagAliases; public: TestSpecParser( ITagAliasRegistry const& tagAliases ) : m_tagAliases( &tagAliases ) {} TestSpecParser& parse( std::string const& arg ) { m_mode = None; m_exclusion = false; m_start = std::string::npos; m_arg = m_tagAliases->expandAliases( arg ); m_escapeChars.clear(); for( m_pos = 0; m_pos < m_arg.size(); ++m_pos ) visitChar( m_arg[m_pos] ); if( m_mode == Name ) addPattern(); return *this; } TestSpec testSpec() { addFilter(); return m_testSpec; } private: void visitChar( char c ) { if( m_mode == None ) { switch( c ) { case ' ': return; case '~': m_exclusion = true; return; case '[': return startNewMode( Tag, ++m_pos ); case '"': return startNewMode( QuotedName, ++m_pos ); case '\\': return escape(); default: startNewMode( Name, m_pos ); break; } } if( m_mode == Name ) { if( c == ',' ) { addPattern(); addFilter(); } else if( c == '[' ) { if( subString() == "exclude:" ) m_exclusion = true; else addPattern(); startNewMode( Tag, ++m_pos ); } else if( c == '\\' ) escape(); } else if( m_mode == EscapedName ) m_mode = Name; else if( m_mode == QuotedName && c == '"' ) addPattern(); else if( m_mode == Tag && c == ']' ) addPattern(); } void startNewMode( Mode mode, std::size_t start ) { m_mode = mode; m_start = start; } void escape() { if( m_mode == None ) m_start = m_pos; m_mode = EscapedName; m_escapeChars.push_back( m_pos ); } std::string subString() const { return m_arg.substr( m_start, m_pos - m_start ); } template void addPattern() { std::string token = subString(); for( size_t i = 0; i < m_escapeChars.size(); ++i ) token = token.substr( 0, m_escapeChars[i]-m_start-i ) + token.substr( m_escapeChars[i]-m_start-i+1 ); m_escapeChars.clear(); if( startsWith( token, "exclude:" ) ) { m_exclusion = true; token = token.substr( 8 ); } if( !token.empty() ) { Ptr pattern = new T( token ); if( m_exclusion ) pattern = new TestSpec::ExcludedPattern( pattern ); m_currentFilter.m_patterns.push_back( pattern ); } m_exclusion = false; m_mode = None; } void addFilter() { if( !m_currentFilter.m_patterns.empty() ) { m_testSpec.m_filters.push_back( m_currentFilter ); m_currentFilter = TestSpec::Filter(); } } }; inline TestSpec parseTestSpec( std::string const& arg ) { return TestSpecParser( ITagAliasRegistry::get() ).parse( arg ).testSpec(); } } // namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif // #included from: catch_interfaces_config.h #define TWOBLUECUBES_CATCH_INTERFACES_CONFIG_H_INCLUDED #include #include #include namespace Catch { struct Verbosity { enum Level { NoOutput = 0, Quiet, Normal }; }; struct WarnAbout { enum What { Nothing = 0x00, NoAssertions = 0x01 }; }; struct ShowDurations { enum OrNot { DefaultForReporter, Always, Never }; }; struct RunTests { enum InWhatOrder { InDeclarationOrder, InLexicographicalOrder, InRandomOrder }; }; struct UseColour { enum YesOrNo { Auto, Yes, No }; }; class TestSpec; struct IConfig : IShared { virtual ~IConfig(); virtual bool allowThrows() const = 0; virtual std::ostream& stream() const = 0; virtual std::string name() const = 0; virtual bool includeSuccessfulResults() const = 0; virtual bool shouldDebugBreak() const = 0; virtual bool warnAboutMissingAssertions() const = 0; virtual int abortAfter() const = 0; virtual bool showInvisibles() const = 0; virtual ShowDurations::OrNot showDurations() const = 0; virtual TestSpec const& testSpec() const = 0; virtual RunTests::InWhatOrder runOrder() const = 0; virtual unsigned int rngSeed() const = 0; virtual UseColour::YesOrNo useColour() const = 0; virtual std::vector const& getSectionsToRun() const = 0; }; } // #included from: catch_stream.h #define TWOBLUECUBES_CATCH_STREAM_H_INCLUDED // #included from: catch_streambuf.h #define TWOBLUECUBES_CATCH_STREAMBUF_H_INCLUDED #include namespace Catch { class StreamBufBase : public std::streambuf { public: virtual ~StreamBufBase() CATCH_NOEXCEPT; }; } #include #include #include #include namespace Catch { std::ostream& cout(); std::ostream& cerr(); struct IStream { virtual ~IStream() CATCH_NOEXCEPT; virtual std::ostream& stream() const = 0; }; class FileStream : public IStream { mutable std::ofstream m_ofs; public: FileStream( std::string const& filename ); virtual ~FileStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; class CoutStream : public IStream { mutable std::ostream m_os; public: CoutStream(); virtual ~CoutStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; class DebugOutStream : public IStream { CATCH_AUTO_PTR( StreamBufBase ) m_streamBuf; mutable std::ostream m_os; public: DebugOutStream(); virtual ~DebugOutStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; } #include #include #include #include #ifndef CATCH_CONFIG_CONSOLE_WIDTH #define CATCH_CONFIG_CONSOLE_WIDTH 80 #endif namespace Catch { struct ConfigData { ConfigData() : listTests( false ), listTags( false ), listReporters( false ), listTestNamesOnly( false ), listExtraInfo( false ), showSuccessfulTests( false ), shouldDebugBreak( false ), noThrow( false ), showHelp( false ), showInvisibles( false ), filenamesAsTags( false ), abortAfter( -1 ), rngSeed( 0 ), verbosity( Verbosity::Normal ), warnings( WarnAbout::Nothing ), showDurations( ShowDurations::DefaultForReporter ), runOrder( RunTests::InDeclarationOrder ), useColour( UseColour::Auto ) {} bool listTests; bool listTags; bool listReporters; bool listTestNamesOnly; bool listExtraInfo; bool showSuccessfulTests; bool shouldDebugBreak; bool noThrow; bool showHelp; bool showInvisibles; bool filenamesAsTags; int abortAfter; unsigned int rngSeed; Verbosity::Level verbosity; WarnAbout::What warnings; ShowDurations::OrNot showDurations; RunTests::InWhatOrder runOrder; UseColour::YesOrNo useColour; std::string outputFilename; std::string name; std::string processName; std::vector reporterNames; std::vector testsOrTags; std::vector sectionsToRun; }; class Config : public SharedImpl { private: Config( Config const& other ); Config& operator = ( Config const& other ); virtual void dummy(); public: Config() {} Config( ConfigData const& data ) : m_data( data ), m_stream( openStream() ) { if( !data.testsOrTags.empty() ) { TestSpecParser parser( ITagAliasRegistry::get() ); for( std::size_t i = 0; i < data.testsOrTags.size(); ++i ) parser.parse( data.testsOrTags[i] ); m_testSpec = parser.testSpec(); } } virtual ~Config() {} std::string const& getFilename() const { return m_data.outputFilename ; } bool listTests() const { return m_data.listTests; } bool listTestNamesOnly() const { return m_data.listTestNamesOnly; } bool listTags() const { return m_data.listTags; } bool listReporters() const { return m_data.listReporters; } bool listExtraInfo() const { return m_data.listExtraInfo; } std::string getProcessName() const { return m_data.processName; } std::vector const& getReporterNames() const { return m_data.reporterNames; } std::vector const& getSectionsToRun() const CATCH_OVERRIDE { return m_data.sectionsToRun; } virtual TestSpec const& testSpec() const CATCH_OVERRIDE { return m_testSpec; } bool showHelp() const { return m_data.showHelp; } // IConfig interface virtual bool allowThrows() const CATCH_OVERRIDE { return !m_data.noThrow; } virtual std::ostream& stream() const CATCH_OVERRIDE { return m_stream->stream(); } virtual std::string name() const CATCH_OVERRIDE { return m_data.name.empty() ? m_data.processName : m_data.name; } virtual bool includeSuccessfulResults() const CATCH_OVERRIDE { return m_data.showSuccessfulTests; } virtual bool warnAboutMissingAssertions() const CATCH_OVERRIDE { return m_data.warnings & WarnAbout::NoAssertions; } virtual ShowDurations::OrNot showDurations() const CATCH_OVERRIDE { return m_data.showDurations; } virtual RunTests::InWhatOrder runOrder() const CATCH_OVERRIDE { return m_data.runOrder; } virtual unsigned int rngSeed() const CATCH_OVERRIDE { return m_data.rngSeed; } virtual UseColour::YesOrNo useColour() const CATCH_OVERRIDE { return m_data.useColour; } virtual bool shouldDebugBreak() const CATCH_OVERRIDE { return m_data.shouldDebugBreak; } virtual int abortAfter() const CATCH_OVERRIDE { return m_data.abortAfter; } virtual bool showInvisibles() const CATCH_OVERRIDE { return m_data.showInvisibles; } private: IStream const* openStream() { if( m_data.outputFilename.empty() ) return new CoutStream(); else if( m_data.outputFilename[0] == '%' ) { if( m_data.outputFilename == "%debug" ) return new DebugOutStream(); else throw std::domain_error( "Unrecognised stream: " + m_data.outputFilename ); } else return new FileStream( m_data.outputFilename ); } ConfigData m_data; CATCH_AUTO_PTR( IStream const ) m_stream; TestSpec m_testSpec; }; } // end namespace Catch // #included from: catch_clara.h #define TWOBLUECUBES_CATCH_CLARA_H_INCLUDED // Use Catch's value for console width (store Clara's off to the side, if present) #ifdef CLARA_CONFIG_CONSOLE_WIDTH #define CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH CLARA_CONFIG_CONSOLE_WIDTH #undef CLARA_CONFIG_CONSOLE_WIDTH #endif #define CLARA_CONFIG_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH // Declare Clara inside the Catch namespace #define STITCH_CLARA_OPEN_NAMESPACE namespace Catch { // #included from: ../external/clara.h // Version 0.0.2.4 // Only use header guard if we are not using an outer namespace #if !defined(TWOBLUECUBES_CLARA_H_INCLUDED) || defined(STITCH_CLARA_OPEN_NAMESPACE) #ifndef STITCH_CLARA_OPEN_NAMESPACE #define TWOBLUECUBES_CLARA_H_INCLUDED #define STITCH_CLARA_OPEN_NAMESPACE #define STITCH_CLARA_CLOSE_NAMESPACE #else #define STITCH_CLARA_CLOSE_NAMESPACE } #endif #define STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE // ----------- #included from tbc_text_format.h ----------- // Only use header guard if we are not using an outer namespace #if !defined(TBC_TEXT_FORMAT_H_INCLUDED) || defined(STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE) #ifndef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE #define TBC_TEXT_FORMAT_H_INCLUDED #endif #include #include #include #include #include // Use optional outer namespace #ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE { #endif namespace Tbc { #ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif struct TextAttributes { TextAttributes() : initialIndent( std::string::npos ), indent( 0 ), width( consoleWidth-1 ), tabChar( '\t' ) {} TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } TextAttributes& setTabChar( char _value ) { tabChar = _value; return *this; } std::size_t initialIndent; // indent of first line, or npos std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos std::size_t width; // maximum width of text, including indent. Longer text will wrap char tabChar; // If this char is seen the indent is changed to current pos }; class Text { public: Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) : attr( _attr ) { std::string wrappableChars = " [({.,/|\\-"; std::size_t indent = _attr.initialIndent != std::string::npos ? _attr.initialIndent : _attr.indent; std::string remainder = _str; while( !remainder.empty() ) { if( lines.size() >= 1000 ) { lines.push_back( "... message truncated due to excessive size" ); return; } std::size_t tabPos = std::string::npos; std::size_t width = (std::min)( remainder.size(), _attr.width - indent ); std::size_t pos = remainder.find_first_of( '\n' ); if( pos <= width ) { width = pos; } pos = remainder.find_last_of( _attr.tabChar, width ); if( pos != std::string::npos ) { tabPos = pos; if( remainder[width] == '\n' ) width--; remainder = remainder.substr( 0, tabPos ) + remainder.substr( tabPos+1 ); } if( width == remainder.size() ) { spliceLine( indent, remainder, width ); } else if( remainder[width] == '\n' ) { spliceLine( indent, remainder, width ); if( width <= 1 || remainder.size() != 1 ) remainder = remainder.substr( 1 ); indent = _attr.indent; } else { pos = remainder.find_last_of( wrappableChars, width ); if( pos != std::string::npos && pos > 0 ) { spliceLine( indent, remainder, pos ); if( remainder[0] == ' ' ) remainder = remainder.substr( 1 ); } else { spliceLine( indent, remainder, width-1 ); lines.back() += "-"; } if( lines.size() == 1 ) indent = _attr.indent; if( tabPos != std::string::npos ) indent += tabPos; } } } void spliceLine( std::size_t _indent, std::string& _remainder, std::size_t _pos ) { lines.push_back( std::string( _indent, ' ' ) + _remainder.substr( 0, _pos ) ); _remainder = _remainder.substr( _pos ); } typedef std::vector::const_iterator const_iterator; const_iterator begin() const { return lines.begin(); } const_iterator end() const { return lines.end(); } std::string const& last() const { return lines.back(); } std::size_t size() const { return lines.size(); } std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } std::string toString() const { std::ostringstream oss; oss << *this; return oss.str(); } inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); it != itEnd; ++it ) { if( it != _text.begin() ) _stream << "\n"; _stream << *it; } return _stream; } private: std::string str; TextAttributes attr; std::vector lines; }; } // end namespace Tbc #ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE } // end outer namespace #endif #endif // TBC_TEXT_FORMAT_H_INCLUDED // ----------- end of #include from tbc_text_format.h ----------- // ........... back in clara.h #undef STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE // ----------- #included from clara_compilers.h ----------- #ifndef TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED #define TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED // Detect a number of compiler features - mostly C++11/14 conformance - by compiler // The following features are defined: // // CLARA_CONFIG_CPP11_NULLPTR : is nullptr supported? // CLARA_CONFIG_CPP11_NOEXCEPT : is noexcept supported? // CLARA_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods // CLARA_CONFIG_CPP11_OVERRIDE : is override supported? // CLARA_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) // CLARA_CONFIG_CPP11_OR_GREATER : Is C++11 supported? // CLARA_CONFIG_VARIADIC_MACROS : are variadic macros supported? // In general each macro has a _NO_ form // (e.g. CLARA_CONFIG_CPP11_NO_NULLPTR) which disables the feature. // Many features, at point of detection, define an _INTERNAL_ macro, so they // can be combined, en-mass, with the _NO_ forms later. // All the C++11 features can be disabled with CLARA_CONFIG_NO_CPP11 #ifdef __clang__ #if __has_feature(cxx_nullptr) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif #if __has_feature(cxx_noexcept) #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #endif #endif // __clang__ //////////////////////////////////////////////////////////////////////////////// // GCC #ifdef __GNUC__ #if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif // - otherwise more recent versions define __cplusplus >= 201103L // and will get picked up below #endif // __GNUC__ //////////////////////////////////////////////////////////////////////////////// // Visual C++ #ifdef _MSC_VER #if (_MSC_VER >= 1600) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #endif #endif // _MSC_VER //////////////////////////////////////////////////////////////////////////////// // C++ language feature support // catch all support for C++11 #if defined(__cplusplus) && __cplusplus >= 201103L #define CLARA_CPP11_OR_GREATER #if !defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif #ifndef CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #endif #ifndef CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #endif #if !defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) #define CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE #endif #if !defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) #define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #endif // __cplusplus >= 201103L // Now set the actual defines based on the above + anything the user has configured #if defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NO_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_NULLPTR #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_NOEXCEPT #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_GENERATED_METHODS #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_OVERRIDE) && !defined(CLARA_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_OVERRIDE #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_UNIQUE_PTR) && !defined(CLARA_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_UNIQUE_PTR #endif // noexcept support: #if defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_NOEXCEPT) #define CLARA_NOEXCEPT noexcept # define CLARA_NOEXCEPT_IS(x) noexcept(x) #else #define CLARA_NOEXCEPT throw() # define CLARA_NOEXCEPT_IS(x) #endif // nullptr support #ifdef CLARA_CONFIG_CPP11_NULLPTR #define CLARA_NULL nullptr #else #define CLARA_NULL NULL #endif // override support #ifdef CLARA_CONFIG_CPP11_OVERRIDE #define CLARA_OVERRIDE override #else #define CLARA_OVERRIDE #endif // unique_ptr support #ifdef CLARA_CONFIG_CPP11_UNIQUE_PTR # define CLARA_AUTO_PTR( T ) std::unique_ptr #else # define CLARA_AUTO_PTR( T ) std::auto_ptr #endif #endif // TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED // ----------- end of #include from clara_compilers.h ----------- // ........... back in clara.h #include #include #include #if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) #define CLARA_PLATFORM_WINDOWS #endif // Use optional outer namespace #ifdef STITCH_CLARA_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE #endif namespace Clara { struct UnpositionalTag {}; extern UnpositionalTag _; #ifdef CLARA_CONFIG_MAIN UnpositionalTag _; #endif namespace Detail { #ifdef CLARA_CONSOLE_WIDTH const unsigned int consoleWidth = CLARA_CONFIG_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif using namespace Tbc; inline bool startsWith( std::string const& str, std::string const& prefix ) { return str.size() >= prefix.size() && str.substr( 0, prefix.size() ) == prefix; } template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct IsBool { static const bool value = false; }; template<> struct IsBool { static const bool value = true; }; template void convertInto( std::string const& _source, T& _dest ) { std::stringstream ss; ss << _source; ss >> _dest; if( ss.fail() ) throw std::runtime_error( "Unable to convert " + _source + " to destination type" ); } inline void convertInto( std::string const& _source, std::string& _dest ) { _dest = _source; } char toLowerCh(char c) { return static_cast( std::tolower( c ) ); } inline void convertInto( std::string const& _source, bool& _dest ) { std::string sourceLC = _source; std::transform( sourceLC.begin(), sourceLC.end(), sourceLC.begin(), toLowerCh ); if( sourceLC == "y" || sourceLC == "1" || sourceLC == "true" || sourceLC == "yes" || sourceLC == "on" ) _dest = true; else if( sourceLC == "n" || sourceLC == "0" || sourceLC == "false" || sourceLC == "no" || sourceLC == "off" ) _dest = false; else throw std::runtime_error( "Expected a boolean value but did not recognise:\n '" + _source + "'" ); } template struct IArgFunction { virtual ~IArgFunction() {} #ifdef CLARA_CONFIG_CPP11_GENERATED_METHODS IArgFunction() = default; IArgFunction( IArgFunction const& ) = default; #endif virtual void set( ConfigT& config, std::string const& value ) const = 0; virtual bool takesArg() const = 0; virtual IArgFunction* clone() const = 0; }; template class BoundArgFunction { public: BoundArgFunction() : functionObj( CLARA_NULL ) {} BoundArgFunction( IArgFunction* _functionObj ) : functionObj( _functionObj ) {} BoundArgFunction( BoundArgFunction const& other ) : functionObj( other.functionObj ? other.functionObj->clone() : CLARA_NULL ) {} BoundArgFunction& operator = ( BoundArgFunction const& other ) { IArgFunction* newFunctionObj = other.functionObj ? other.functionObj->clone() : CLARA_NULL; delete functionObj; functionObj = newFunctionObj; return *this; } ~BoundArgFunction() { delete functionObj; } void set( ConfigT& config, std::string const& value ) const { functionObj->set( config, value ); } bool takesArg() const { return functionObj->takesArg(); } bool isSet() const { return functionObj != CLARA_NULL; } private: IArgFunction* functionObj; }; template struct NullBinder : IArgFunction{ virtual void set( C&, std::string const& ) const {} virtual bool takesArg() const { return true; } virtual IArgFunction* clone() const { return new NullBinder( *this ); } }; template struct BoundDataMember : IArgFunction{ BoundDataMember( M C::* _member ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { convertInto( stringValue, p.*member ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundDataMember( *this ); } M C::* member; }; template struct BoundUnaryMethod : IArgFunction{ BoundUnaryMethod( void (C::*_member)( M ) ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { typename RemoveConstRef::type value; convertInto( stringValue, value ); (p.*member)( value ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundUnaryMethod( *this ); } void (C::*member)( M ); }; template struct BoundNullaryMethod : IArgFunction{ BoundNullaryMethod( void (C::*_member)() ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { bool value; convertInto( stringValue, value ); if( value ) (p.*member)(); } virtual bool takesArg() const { return false; } virtual IArgFunction* clone() const { return new BoundNullaryMethod( *this ); } void (C::*member)(); }; template struct BoundUnaryFunction : IArgFunction{ BoundUnaryFunction( void (*_function)( C& ) ) : function( _function ) {} virtual void set( C& obj, std::string const& stringValue ) const { bool value; convertInto( stringValue, value ); if( value ) function( obj ); } virtual bool takesArg() const { return false; } virtual IArgFunction* clone() const { return new BoundUnaryFunction( *this ); } void (*function)( C& ); }; template struct BoundBinaryFunction : IArgFunction{ BoundBinaryFunction( void (*_function)( C&, T ) ) : function( _function ) {} virtual void set( C& obj, std::string const& stringValue ) const { typename RemoveConstRef::type value; convertInto( stringValue, value ); function( obj, value ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundBinaryFunction( *this ); } void (*function)( C&, T ); }; } // namespace Detail inline std::vector argsToVector( int argc, char const* const* const argv ) { std::vector args( static_cast( argc ) ); for( std::size_t i = 0; i < static_cast( argc ); ++i ) args[i] = argv[i]; return args; } class Parser { enum Mode { None, MaybeShortOpt, SlashOpt, ShortOpt, LongOpt, Positional }; Mode mode; std::size_t from; bool inQuotes; public: struct Token { enum Type { Positional, ShortOpt, LongOpt }; Token( Type _type, std::string const& _data ) : type( _type ), data( _data ) {} Type type; std::string data; }; Parser() : mode( None ), from( 0 ), inQuotes( false ){} void parseIntoTokens( std::vector const& args, std::vector& tokens ) { const std::string doubleDash = "--"; for( std::size_t i = 1; i < args.size() && args[i] != doubleDash; ++i ) parseIntoTokens( args[i], tokens); } void parseIntoTokens( std::string const& arg, std::vector& tokens ) { for( std::size_t i = 0; i < arg.size(); ++i ) { char c = arg[i]; if( c == '"' ) inQuotes = !inQuotes; mode = handleMode( i, c, arg, tokens ); } mode = handleMode( arg.size(), '\0', arg, tokens ); } Mode handleMode( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { switch( mode ) { case None: return handleNone( i, c ); case MaybeShortOpt: return handleMaybeShortOpt( i, c ); case ShortOpt: case LongOpt: case SlashOpt: return handleOpt( i, c, arg, tokens ); case Positional: return handlePositional( i, c, arg, tokens ); default: throw std::logic_error( "Unknown mode" ); } } Mode handleNone( std::size_t i, char c ) { if( inQuotes ) { from = i; return Positional; } switch( c ) { case '-': return MaybeShortOpt; #ifdef CLARA_PLATFORM_WINDOWS case '/': from = i+1; return SlashOpt; #endif default: from = i; return Positional; } } Mode handleMaybeShortOpt( std::size_t i, char c ) { switch( c ) { case '-': from = i+1; return LongOpt; default: from = i; return ShortOpt; } } Mode handleOpt( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { if( std::string( ":=\0", 3 ).find( c ) == std::string::npos ) return mode; std::string optName = arg.substr( from, i-from ); if( mode == ShortOpt ) for( std::size_t j = 0; j < optName.size(); ++j ) tokens.push_back( Token( Token::ShortOpt, optName.substr( j, 1 ) ) ); else if( mode == SlashOpt && optName.size() == 1 ) tokens.push_back( Token( Token::ShortOpt, optName ) ); else tokens.push_back( Token( Token::LongOpt, optName ) ); return None; } Mode handlePositional( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { if( inQuotes || std::string( "\0", 1 ).find( c ) == std::string::npos ) return mode; std::string data = arg.substr( from, i-from ); tokens.push_back( Token( Token::Positional, data ) ); return None; } }; template struct CommonArgProperties { CommonArgProperties() {} CommonArgProperties( Detail::BoundArgFunction const& _boundField ) : boundField( _boundField ) {} Detail::BoundArgFunction boundField; std::string description; std::string detail; std::string placeholder; // Only value if boundField takes an arg bool takesArg() const { return !placeholder.empty(); } void validate() const { if( !boundField.isSet() ) throw std::logic_error( "option not bound" ); } }; struct OptionArgProperties { std::vector shortNames; std::string longName; bool hasShortName( std::string const& shortName ) const { return std::find( shortNames.begin(), shortNames.end(), shortName ) != shortNames.end(); } bool hasLongName( std::string const& _longName ) const { return _longName == longName; } }; struct PositionalArgProperties { PositionalArgProperties() : position( -1 ) {} int position; // -1 means non-positional (floating) bool isFixedPositional() const { return position != -1; } }; template class CommandLine { struct Arg : CommonArgProperties, OptionArgProperties, PositionalArgProperties { Arg() {} Arg( Detail::BoundArgFunction const& _boundField ) : CommonArgProperties( _boundField ) {} using CommonArgProperties::placeholder; // !TBD std::string dbgName() const { if( !longName.empty() ) return "--" + longName; if( !shortNames.empty() ) return "-" + shortNames[0]; return "positional args"; } std::string commands() const { std::ostringstream oss; bool first = true; std::vector::const_iterator it = shortNames.begin(), itEnd = shortNames.end(); for(; it != itEnd; ++it ) { if( first ) first = false; else oss << ", "; oss << "-" << *it; } if( !longName.empty() ) { if( !first ) oss << ", "; oss << "--" << longName; } if( !placeholder.empty() ) oss << " <" << placeholder << ">"; return oss.str(); } }; typedef CLARA_AUTO_PTR( Arg ) ArgAutoPtr; friend void addOptName( Arg& arg, std::string const& optName ) { if( optName.empty() ) return; if( Detail::startsWith( optName, "--" ) ) { if( !arg.longName.empty() ) throw std::logic_error( "Only one long opt may be specified. '" + arg.longName + "' already specified, now attempting to add '" + optName + "'" ); arg.longName = optName.substr( 2 ); } else if( Detail::startsWith( optName, "-" ) ) arg.shortNames.push_back( optName.substr( 1 ) ); else throw std::logic_error( "option must begin with - or --. Option was: '" + optName + "'" ); } friend void setPositionalArg( Arg& arg, int position ) { arg.position = position; } class ArgBuilder { public: ArgBuilder( Arg* arg ) : m_arg( arg ) {} // Bind a non-boolean data member (requires placeholder string) template void bind( M C::* field, std::string const& placeholder ) { m_arg->boundField = new Detail::BoundDataMember( field ); m_arg->placeholder = placeholder; } // Bind a boolean data member (no placeholder required) template void bind( bool C::* field ) { m_arg->boundField = new Detail::BoundDataMember( field ); } // Bind a method taking a single, non-boolean argument (requires a placeholder string) template void bind( void (C::* unaryMethod)( M ), std::string const& placeholder ) { m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); m_arg->placeholder = placeholder; } // Bind a method taking a single, boolean argument (no placeholder string required) template void bind( void (C::* unaryMethod)( bool ) ) { m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); } // Bind a method that takes no arguments (will be called if opt is present) template void bind( void (C::* nullaryMethod)() ) { m_arg->boundField = new Detail::BoundNullaryMethod( nullaryMethod ); } // Bind a free function taking a single argument - the object to operate on (no placeholder string required) template void bind( void (* unaryFunction)( C& ) ) { m_arg->boundField = new Detail::BoundUnaryFunction( unaryFunction ); } // Bind a free function taking a single argument - the object to operate on (requires a placeholder string) template void bind( void (* binaryFunction)( C&, T ), std::string const& placeholder ) { m_arg->boundField = new Detail::BoundBinaryFunction( binaryFunction ); m_arg->placeholder = placeholder; } ArgBuilder& describe( std::string const& description ) { m_arg->description = description; return *this; } ArgBuilder& detail( std::string const& detail ) { m_arg->detail = detail; return *this; } protected: Arg* m_arg; }; class OptBuilder : public ArgBuilder { public: OptBuilder( Arg* arg ) : ArgBuilder( arg ) {} OptBuilder( OptBuilder& other ) : ArgBuilder( other ) {} OptBuilder& operator[]( std::string const& optName ) { addOptName( *ArgBuilder::m_arg, optName ); return *this; } }; public: CommandLine() : m_boundProcessName( new Detail::NullBinder() ), m_highestSpecifiedArgPosition( 0 ), m_throwOnUnrecognisedTokens( false ) {} CommandLine( CommandLine const& other ) : m_boundProcessName( other.m_boundProcessName ), m_options ( other.m_options ), m_positionalArgs( other.m_positionalArgs ), m_highestSpecifiedArgPosition( other.m_highestSpecifiedArgPosition ), m_throwOnUnrecognisedTokens( other.m_throwOnUnrecognisedTokens ) { if( other.m_floatingArg.get() ) m_floatingArg.reset( new Arg( *other.m_floatingArg ) ); } CommandLine& setThrowOnUnrecognisedTokens( bool shouldThrow = true ) { m_throwOnUnrecognisedTokens = shouldThrow; return *this; } OptBuilder operator[]( std::string const& optName ) { m_options.push_back( Arg() ); addOptName( m_options.back(), optName ); OptBuilder builder( &m_options.back() ); return builder; } ArgBuilder operator[]( int position ) { m_positionalArgs.insert( std::make_pair( position, Arg() ) ); if( position > m_highestSpecifiedArgPosition ) m_highestSpecifiedArgPosition = position; setPositionalArg( m_positionalArgs[position], position ); ArgBuilder builder( &m_positionalArgs[position] ); return builder; } // Invoke this with the _ instance ArgBuilder operator[]( UnpositionalTag ) { if( m_floatingArg.get() ) throw std::logic_error( "Only one unpositional argument can be added" ); m_floatingArg.reset( new Arg() ); ArgBuilder builder( m_floatingArg.get() ); return builder; } template void bindProcessName( M C::* field ) { m_boundProcessName = new Detail::BoundDataMember( field ); } template void bindProcessName( void (C::*_unaryMethod)( M ) ) { m_boundProcessName = new Detail::BoundUnaryMethod( _unaryMethod ); } void optUsage( std::ostream& os, std::size_t indent = 0, std::size_t width = Detail::consoleWidth ) const { typename std::vector::const_iterator itBegin = m_options.begin(), itEnd = m_options.end(), it; std::size_t maxWidth = 0; for( it = itBegin; it != itEnd; ++it ) maxWidth = (std::max)( maxWidth, it->commands().size() ); for( it = itBegin; it != itEnd; ++it ) { Detail::Text usage( it->commands(), Detail::TextAttributes() .setWidth( maxWidth+indent ) .setIndent( indent ) ); Detail::Text desc( it->description, Detail::TextAttributes() .setWidth( width - maxWidth - 3 ) ); for( std::size_t i = 0; i < (std::max)( usage.size(), desc.size() ); ++i ) { std::string usageCol = i < usage.size() ? usage[i] : ""; os << usageCol; if( i < desc.size() && !desc[i].empty() ) os << std::string( indent + 2 + maxWidth - usageCol.size(), ' ' ) << desc[i]; os << "\n"; } } } std::string optUsage() const { std::ostringstream oss; optUsage( oss ); return oss.str(); } void argSynopsis( std::ostream& os ) const { for( int i = 1; i <= m_highestSpecifiedArgPosition; ++i ) { if( i > 1 ) os << " "; typename std::map::const_iterator it = m_positionalArgs.find( i ); if( it != m_positionalArgs.end() ) os << "<" << it->second.placeholder << ">"; else if( m_floatingArg.get() ) os << "<" << m_floatingArg->placeholder << ">"; else throw std::logic_error( "non consecutive positional arguments with no floating args" ); } // !TBD No indication of mandatory args if( m_floatingArg.get() ) { if( m_highestSpecifiedArgPosition > 1 ) os << " "; os << "[<" << m_floatingArg->placeholder << "> ...]"; } } std::string argSynopsis() const { std::ostringstream oss; argSynopsis( oss ); return oss.str(); } void usage( std::ostream& os, std::string const& procName ) const { validate(); os << "usage:\n " << procName << " "; argSynopsis( os ); if( !m_options.empty() ) { os << " [options]\n\nwhere options are: \n"; optUsage( os, 2 ); } os << "\n"; } std::string usage( std::string const& procName ) const { std::ostringstream oss; usage( oss, procName ); return oss.str(); } ConfigT parse( std::vector const& args ) const { ConfigT config; parseInto( args, config ); return config; } std::vector parseInto( std::vector const& args, ConfigT& config ) const { std::string processName = args.empty() ? std::string() : args[0]; std::size_t lastSlash = processName.find_last_of( "/\\" ); if( lastSlash != std::string::npos ) processName = processName.substr( lastSlash+1 ); m_boundProcessName.set( config, processName ); std::vector tokens; Parser parser; parser.parseIntoTokens( args, tokens ); return populate( tokens, config ); } std::vector populate( std::vector const& tokens, ConfigT& config ) const { validate(); std::vector unusedTokens = populateOptions( tokens, config ); unusedTokens = populateFixedArgs( unusedTokens, config ); unusedTokens = populateFloatingArgs( unusedTokens, config ); return unusedTokens; } std::vector populateOptions( std::vector const& tokens, ConfigT& config ) const { std::vector unusedTokens; std::vector errors; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; typename std::vector::const_iterator it = m_options.begin(), itEnd = m_options.end(); for(; it != itEnd; ++it ) { Arg const& arg = *it; try { if( ( token.type == Parser::Token::ShortOpt && arg.hasShortName( token.data ) ) || ( token.type == Parser::Token::LongOpt && arg.hasLongName( token.data ) ) ) { if( arg.takesArg() ) { if( i == tokens.size()-1 || tokens[i+1].type != Parser::Token::Positional ) errors.push_back( "Expected argument to option: " + token.data ); else arg.boundField.set( config, tokens[++i].data ); } else { arg.boundField.set( config, "true" ); } break; } } catch( std::exception& ex ) { errors.push_back( std::string( ex.what() ) + "\n- while parsing: (" + arg.commands() + ")" ); } } if( it == itEnd ) { if( token.type == Parser::Token::Positional || !m_throwOnUnrecognisedTokens ) unusedTokens.push_back( token ); else if( errors.empty() && m_throwOnUnrecognisedTokens ) errors.push_back( "unrecognised option: " + token.data ); } } if( !errors.empty() ) { std::ostringstream oss; for( std::vector::const_iterator it = errors.begin(), itEnd = errors.end(); it != itEnd; ++it ) { if( it != errors.begin() ) oss << "\n"; oss << *it; } throw std::runtime_error( oss.str() ); } return unusedTokens; } std::vector populateFixedArgs( std::vector const& tokens, ConfigT& config ) const { std::vector unusedTokens; int position = 1; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; typename std::map::const_iterator it = m_positionalArgs.find( position ); if( it != m_positionalArgs.end() ) it->second.boundField.set( config, token.data ); else unusedTokens.push_back( token ); if( token.type == Parser::Token::Positional ) position++; } return unusedTokens; } std::vector populateFloatingArgs( std::vector const& tokens, ConfigT& config ) const { if( !m_floatingArg.get() ) return tokens; std::vector unusedTokens; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; if( token.type == Parser::Token::Positional ) m_floatingArg->boundField.set( config, token.data ); else unusedTokens.push_back( token ); } return unusedTokens; } void validate() const { if( m_options.empty() && m_positionalArgs.empty() && !m_floatingArg.get() ) throw std::logic_error( "No options or arguments specified" ); for( typename std::vector::const_iterator it = m_options.begin(), itEnd = m_options.end(); it != itEnd; ++it ) it->validate(); } private: Detail::BoundArgFunction m_boundProcessName; std::vector m_options; std::map m_positionalArgs; ArgAutoPtr m_floatingArg; int m_highestSpecifiedArgPosition; bool m_throwOnUnrecognisedTokens; }; } // end namespace Clara STITCH_CLARA_CLOSE_NAMESPACE #undef STITCH_CLARA_OPEN_NAMESPACE #undef STITCH_CLARA_CLOSE_NAMESPACE #endif // TWOBLUECUBES_CLARA_H_INCLUDED #undef STITCH_CLARA_OPEN_NAMESPACE // Restore Clara's value for console width, if present #ifdef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #define CLARA_CONFIG_CONSOLE_WIDTH CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #undef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #endif #include #include namespace Catch { inline void abortAfterFirst( ConfigData& config ) { config.abortAfter = 1; } inline void abortAfterX( ConfigData& config, int x ) { if( x < 1 ) throw std::runtime_error( "Value after -x or --abortAfter must be greater than zero" ); config.abortAfter = x; } inline void addTestOrTags( ConfigData& config, std::string const& _testSpec ) { config.testsOrTags.push_back( _testSpec ); } inline void addSectionToRun( ConfigData& config, std::string const& sectionName ) { config.sectionsToRun.push_back( sectionName ); } inline void addReporterName( ConfigData& config, std::string const& _reporterName ) { config.reporterNames.push_back( _reporterName ); } inline void addWarning( ConfigData& config, std::string const& _warning ) { if( _warning == "NoAssertions" ) config.warnings = static_cast( config.warnings | WarnAbout::NoAssertions ); else throw std::runtime_error( "Unrecognised warning: '" + _warning + '\'' ); } inline void setOrder( ConfigData& config, std::string const& order ) { if( startsWith( "declared", order ) ) config.runOrder = RunTests::InDeclarationOrder; else if( startsWith( "lexical", order ) ) config.runOrder = RunTests::InLexicographicalOrder; else if( startsWith( "random", order ) ) config.runOrder = RunTests::InRandomOrder; else throw std::runtime_error( "Unrecognised ordering: '" + order + '\'' ); } inline void setRngSeed( ConfigData& config, std::string const& seed ) { if( seed == "time" ) { config.rngSeed = static_cast( std::time(0) ); } else { std::stringstream ss; ss << seed; ss >> config.rngSeed; if( ss.fail() ) throw std::runtime_error( "Argument to --rng-seed should be the word 'time' or a number" ); } } inline void setVerbosity( ConfigData& config, int level ) { // !TBD: accept strings? config.verbosity = static_cast( level ); } inline void setShowDurations( ConfigData& config, bool _showDurations ) { config.showDurations = _showDurations ? ShowDurations::Always : ShowDurations::Never; } inline void setUseColour( ConfigData& config, std::string const& value ) { std::string mode = toLower( value ); if( mode == "yes" ) config.useColour = UseColour::Yes; else if( mode == "no" ) config.useColour = UseColour::No; else if( mode == "auto" ) config.useColour = UseColour::Auto; else throw std::runtime_error( "colour mode must be one of: auto, yes or no" ); } inline void forceColour( ConfigData& config ) { config.useColour = UseColour::Yes; } inline void loadTestNamesFromFile( ConfigData& config, std::string const& _filename ) { std::ifstream f( _filename.c_str() ); if( !f.is_open() ) throw std::domain_error( "Unable to load input file: " + _filename ); std::string line; while( std::getline( f, line ) ) { line = trim(line); if( !line.empty() && !startsWith( line, '#' ) ) { if( !startsWith( line, '"' ) ) line = '"' + line + '"'; addTestOrTags( config, line + ',' ); } } } inline Clara::CommandLine makeCommandLineParser() { using namespace Clara; CommandLine cli; cli.bindProcessName( &ConfigData::processName ); cli["-?"]["-h"]["--help"] .describe( "display usage information" ) .bind( &ConfigData::showHelp ); cli["-l"]["--list-tests"] .describe( "list all/matching test cases" ) .bind( &ConfigData::listTests ); cli["-t"]["--list-tags"] .describe( "list all/matching tags" ) .bind( &ConfigData::listTags ); cli["-s"]["--success"] .describe( "include successful tests in output" ) .bind( &ConfigData::showSuccessfulTests ); cli["-b"]["--break"] .describe( "break into debugger on failure" ) .bind( &ConfigData::shouldDebugBreak ); cli["-e"]["--nothrow"] .describe( "skip exception tests" ) .bind( &ConfigData::noThrow ); cli["-i"]["--invisibles"] .describe( "show invisibles (tabs, newlines)" ) .bind( &ConfigData::showInvisibles ); cli["-o"]["--out"] .describe( "output filename" ) .bind( &ConfigData::outputFilename, "filename" ); cli["-r"]["--reporter"] // .placeholder( "name[:filename]" ) .describe( "reporter to use (defaults to console)" ) .bind( &addReporterName, "name" ); cli["-n"]["--name"] .describe( "suite name" ) .bind( &ConfigData::name, "name" ); cli["-a"]["--abort"] .describe( "abort at first failure" ) .bind( &abortAfterFirst ); cli["-x"]["--abortx"] .describe( "abort after x failures" ) .bind( &abortAfterX, "no. failures" ); cli["-w"]["--warn"] .describe( "enable warnings" ) .bind( &addWarning, "warning name" ); // - needs updating if reinstated // cli.into( &setVerbosity ) // .describe( "level of verbosity (0=no output)" ) // .shortOpt( "v") // .longOpt( "verbosity" ) // .placeholder( "level" ); cli[_] .describe( "which test or tests to use" ) .bind( &addTestOrTags, "test name, pattern or tags" ); cli["-d"]["--durations"] .describe( "show test durations" ) .bind( &setShowDurations, "yes|no" ); cli["-f"]["--input-file"] .describe( "load test names to run from a file" ) .bind( &loadTestNamesFromFile, "filename" ); cli["-#"]["--filenames-as-tags"] .describe( "adds a tag for the filename" ) .bind( &ConfigData::filenamesAsTags ); cli["-c"]["--section"] .describe( "specify section to run" ) .bind( &addSectionToRun, "section name" ); // Less common commands which don't have a short form cli["--list-test-names-only"] .describe( "list all/matching test cases names only" ) .bind( &ConfigData::listTestNamesOnly ); cli["--list-extra-info"] .describe( "list all/matching test cases with more info" ) .bind( &ConfigData::listExtraInfo ); cli["--list-reporters"] .describe( "list all reporters" ) .bind( &ConfigData::listReporters ); cli["--order"] .describe( "test case order (defaults to decl)" ) .bind( &setOrder, "decl|lex|rand" ); cli["--rng-seed"] .describe( "set a specific seed for random numbers" ) .bind( &setRngSeed, "'time'|number" ); cli["--force-colour"] .describe( "force colourised output (deprecated)" ) .bind( &forceColour ); cli["--use-colour"] .describe( "should output be colourised" ) .bind( &setUseColour, "yes|no" ); return cli; } } // end namespace Catch // #included from: internal/catch_list.hpp #define TWOBLUECUBES_CATCH_LIST_HPP_INCLUDED // #included from: catch_text.h #define TWOBLUECUBES_CATCH_TEXT_H_INCLUDED #define TBC_TEXT_FORMAT_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH #define CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE Catch // #included from: ../external/tbc_text_format.h // Only use header guard if we are not using an outer namespace #ifndef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE # ifdef TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED # ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED # define TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED # endif # else # define TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED # endif #endif #ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED #include #include #include // Use optional outer namespace #ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE { #endif namespace Tbc { #ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif struct TextAttributes { TextAttributes() : initialIndent( std::string::npos ), indent( 0 ), width( consoleWidth-1 ) {} TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } std::size_t initialIndent; // indent of first line, or npos std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos std::size_t width; // maximum width of text, including indent. Longer text will wrap }; class Text { public: Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) : attr( _attr ) { const std::string wrappableBeforeChars = "[({<\t"; const std::string wrappableAfterChars = "])}>-,./|\\"; const std::string wrappableInsteadOfChars = " \n\r"; std::string indent = _attr.initialIndent != std::string::npos ? std::string( _attr.initialIndent, ' ' ) : std::string( _attr.indent, ' ' ); typedef std::string::const_iterator iterator; iterator it = _str.begin(); const iterator strEnd = _str.end(); while( it != strEnd ) { if( lines.size() >= 1000 ) { lines.push_back( "... message truncated due to excessive size" ); return; } std::string suffix; std::size_t width = (std::min)( static_cast( strEnd-it ), _attr.width-static_cast( indent.size() ) ); iterator itEnd = it+width; iterator itNext = _str.end(); iterator itNewLine = std::find( it, itEnd, '\n' ); if( itNewLine != itEnd ) itEnd = itNewLine; if( itEnd != strEnd ) { bool foundWrapPoint = false; iterator findIt = itEnd; do { if( wrappableAfterChars.find( *findIt ) != std::string::npos && findIt != itEnd ) { itEnd = findIt+1; itNext = findIt+1; foundWrapPoint = true; } else if( findIt > it && wrappableBeforeChars.find( *findIt ) != std::string::npos ) { itEnd = findIt; itNext = findIt; foundWrapPoint = true; } else if( wrappableInsteadOfChars.find( *findIt ) != std::string::npos ) { itNext = findIt+1; itEnd = findIt; foundWrapPoint = true; } if( findIt == it ) break; else --findIt; } while( !foundWrapPoint ); if( !foundWrapPoint ) { // No good wrap char, so we'll break mid word and add a hyphen --itEnd; itNext = itEnd; suffix = "-"; } else { while( itEnd > it && wrappableInsteadOfChars.find( *(itEnd-1) ) != std::string::npos ) --itEnd; } } lines.push_back( indent + std::string( it, itEnd ) + suffix ); if( indent.size() != _attr.indent ) indent = std::string( _attr.indent, ' ' ); it = itNext; } } typedef std::vector::const_iterator const_iterator; const_iterator begin() const { return lines.begin(); } const_iterator end() const { return lines.end(); } std::string const& last() const { return lines.back(); } std::size_t size() const { return lines.size(); } std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } std::string toString() const { std::ostringstream oss; oss << *this; return oss.str(); } inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); it != itEnd; ++it ) { if( it != _text.begin() ) _stream << "\n"; _stream << *it; } return _stream; } private: std::string str; TextAttributes attr; std::vector lines; }; } // end namespace Tbc #ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE } // end outer namespace #endif #endif // TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED #undef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace Catch { using Tbc::Text; using Tbc::TextAttributes; } // #included from: catch_console_colour.hpp #define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_HPP_INCLUDED namespace Catch { struct Colour { enum Code { None = 0, White, Red, Green, Blue, Cyan, Yellow, Grey, Bright = 0x10, BrightRed = Bright | Red, BrightGreen = Bright | Green, LightGrey = Bright | Grey, BrightWhite = Bright | White, // By intention FileName = LightGrey, Warning = Yellow, ResultError = BrightRed, ResultSuccess = BrightGreen, ResultExpectedFailure = Warning, Error = BrightRed, Success = Green, OriginalExpression = Cyan, ReconstructedExpression = Yellow, SecondaryText = LightGrey, Headers = White }; // Use constructed object for RAII guard Colour( Code _colourCode ); Colour( Colour const& other ); ~Colour(); // Use static method for one-shot changes static void use( Code _colourCode ); private: bool m_moved; }; inline std::ostream& operator << ( std::ostream& os, Colour const& ) { return os; } } // end namespace Catch // #included from: catch_interfaces_reporter.h #define TWOBLUECUBES_CATCH_INTERFACES_REPORTER_H_INCLUDED #include #include #include namespace Catch { struct ReporterConfig { explicit ReporterConfig( Ptr const& _fullConfig ) : m_stream( &_fullConfig->stream() ), m_fullConfig( _fullConfig ) {} ReporterConfig( Ptr const& _fullConfig, std::ostream& _stream ) : m_stream( &_stream ), m_fullConfig( _fullConfig ) {} std::ostream& stream() const { return *m_stream; } Ptr fullConfig() const { return m_fullConfig; } private: std::ostream* m_stream; Ptr m_fullConfig; }; struct ReporterPreferences { ReporterPreferences() : shouldRedirectStdOut( false ) {} bool shouldRedirectStdOut; }; template struct LazyStat : Option { LazyStat() : used( false ) {} LazyStat& operator=( T const& _value ) { Option::operator=( _value ); used = false; return *this; } void reset() { Option::reset(); used = false; } bool used; }; struct TestRunInfo { TestRunInfo( std::string const& _name ) : name( _name ) {} std::string name; }; struct GroupInfo { GroupInfo( std::string const& _name, std::size_t _groupIndex, std::size_t _groupsCount ) : name( _name ), groupIndex( _groupIndex ), groupsCounts( _groupsCount ) {} std::string name; std::size_t groupIndex; std::size_t groupsCounts; }; struct AssertionStats { AssertionStats( AssertionResult const& _assertionResult, std::vector const& _infoMessages, Totals const& _totals ) : assertionResult( _assertionResult ), infoMessages( _infoMessages ), totals( _totals ) { if( assertionResult.hasMessage() ) { // Copy message into messages list. // !TBD This should have been done earlier, somewhere MessageBuilder builder( assertionResult.getTestMacroName(), assertionResult.getSourceInfo(), assertionResult.getResultType() ); builder << assertionResult.getMessage(); builder.m_info.message = builder.m_stream.str(); infoMessages.push_back( builder.m_info ); } } virtual ~AssertionStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS AssertionStats( AssertionStats const& ) = default; AssertionStats( AssertionStats && ) = default; AssertionStats& operator = ( AssertionStats const& ) = default; AssertionStats& operator = ( AssertionStats && ) = default; # endif AssertionResult assertionResult; std::vector infoMessages; Totals totals; }; struct SectionStats { SectionStats( SectionInfo const& _sectionInfo, Counts const& _assertions, double _durationInSeconds, bool _missingAssertions ) : sectionInfo( _sectionInfo ), assertions( _assertions ), durationInSeconds( _durationInSeconds ), missingAssertions( _missingAssertions ) {} virtual ~SectionStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS SectionStats( SectionStats const& ) = default; SectionStats( SectionStats && ) = default; SectionStats& operator = ( SectionStats const& ) = default; SectionStats& operator = ( SectionStats && ) = default; # endif SectionInfo sectionInfo; Counts assertions; double durationInSeconds; bool missingAssertions; }; struct TestCaseStats { TestCaseStats( TestCaseInfo const& _testInfo, Totals const& _totals, std::string const& _stdOut, std::string const& _stdErr, bool _aborting ) : testInfo( _testInfo ), totals( _totals ), stdOut( _stdOut ), stdErr( _stdErr ), aborting( _aborting ) {} virtual ~TestCaseStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS TestCaseStats( TestCaseStats const& ) = default; TestCaseStats( TestCaseStats && ) = default; TestCaseStats& operator = ( TestCaseStats const& ) = default; TestCaseStats& operator = ( TestCaseStats && ) = default; # endif TestCaseInfo testInfo; Totals totals; std::string stdOut; std::string stdErr; bool aborting; }; struct TestGroupStats { TestGroupStats( GroupInfo const& _groupInfo, Totals const& _totals, bool _aborting ) : groupInfo( _groupInfo ), totals( _totals ), aborting( _aborting ) {} TestGroupStats( GroupInfo const& _groupInfo ) : groupInfo( _groupInfo ), aborting( false ) {} virtual ~TestGroupStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS TestGroupStats( TestGroupStats const& ) = default; TestGroupStats( TestGroupStats && ) = default; TestGroupStats& operator = ( TestGroupStats const& ) = default; TestGroupStats& operator = ( TestGroupStats && ) = default; # endif GroupInfo groupInfo; Totals totals; bool aborting; }; struct TestRunStats { TestRunStats( TestRunInfo const& _runInfo, Totals const& _totals, bool _aborting ) : runInfo( _runInfo ), totals( _totals ), aborting( _aborting ) {} virtual ~TestRunStats(); # ifndef CATCH_CONFIG_CPP11_GENERATED_METHODS TestRunStats( TestRunStats const& _other ) : runInfo( _other.runInfo ), totals( _other.totals ), aborting( _other.aborting ) {} # else TestRunStats( TestRunStats const& ) = default; TestRunStats( TestRunStats && ) = default; TestRunStats& operator = ( TestRunStats const& ) = default; TestRunStats& operator = ( TestRunStats && ) = default; # endif TestRunInfo runInfo; Totals totals; bool aborting; }; class MultipleReporters; struct IStreamingReporter : IShared { virtual ~IStreamingReporter(); // Implementing class must also provide the following static method: // static std::string getDescription(); virtual ReporterPreferences getPreferences() const = 0; virtual void noMatchingTestCases( std::string const& spec ) = 0; virtual void testRunStarting( TestRunInfo const& testRunInfo ) = 0; virtual void testGroupStarting( GroupInfo const& groupInfo ) = 0; virtual void testCaseStarting( TestCaseInfo const& testInfo ) = 0; virtual void sectionStarting( SectionInfo const& sectionInfo ) = 0; virtual void assertionStarting( AssertionInfo const& assertionInfo ) = 0; // The return value indicates if the messages buffer should be cleared: virtual bool assertionEnded( AssertionStats const& assertionStats ) = 0; virtual void sectionEnded( SectionStats const& sectionStats ) = 0; virtual void testCaseEnded( TestCaseStats const& testCaseStats ) = 0; virtual void testGroupEnded( TestGroupStats const& testGroupStats ) = 0; virtual void testRunEnded( TestRunStats const& testRunStats ) = 0; virtual void skipTest( TestCaseInfo const& testInfo ) = 0; virtual MultipleReporters* tryAsMulti() { return CATCH_NULL; } }; struct IReporterFactory : IShared { virtual ~IReporterFactory(); virtual IStreamingReporter* create( ReporterConfig const& config ) const = 0; virtual std::string getDescription() const = 0; }; struct IReporterRegistry { typedef std::map > FactoryMap; typedef std::vector > Listeners; virtual ~IReporterRegistry(); virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const = 0; virtual FactoryMap const& getFactories() const = 0; virtual Listeners const& getListeners() const = 0; }; Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ); } #include #include namespace Catch { inline std::size_t listTests( Config const& config ) { TestSpec testSpec = config.testSpec(); if( config.testSpec().hasFilters() ) Catch::cout() << "Matching test cases:\n"; else { Catch::cout() << "All available test cases:\n"; testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); } std::size_t matchedTests = 0; TextAttributes nameAttr, descAttr, tagsAttr; nameAttr.setInitialIndent( 2 ).setIndent( 4 ); descAttr.setIndent( 4 ); tagsAttr.setIndent( 6 ); std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { matchedTests++; TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); Colour::Code colour = testCaseInfo.isHidden() ? Colour::SecondaryText : Colour::None; Colour colourGuard( colour ); Catch::cout() << Text( testCaseInfo.name, nameAttr ) << std::endl; if( config.listExtraInfo() ) { Catch::cout() << " " << testCaseInfo.lineInfo << std::endl; std::string description = testCaseInfo.description; if( description.empty() ) description = "(NO DESCRIPTION)"; Catch::cout() << Text( description, descAttr ) << std::endl; } if( !testCaseInfo.tags.empty() ) Catch::cout() << Text( testCaseInfo.tagsAsString, tagsAttr ) << std::endl; } if( !config.testSpec().hasFilters() ) Catch::cout() << pluralise( matchedTests, "test case" ) << '\n' << std::endl; else Catch::cout() << pluralise( matchedTests, "matching test case" ) << '\n' << std::endl; return matchedTests; } inline std::size_t listTestsNamesOnly( Config const& config ) { TestSpec testSpec = config.testSpec(); if( !config.testSpec().hasFilters() ) testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); std::size_t matchedTests = 0; std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { matchedTests++; TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); if( startsWith( testCaseInfo.name, '#' ) ) Catch::cout() << '"' << testCaseInfo.name << '"'; else Catch::cout() << testCaseInfo.name; if ( config.listExtraInfo() ) Catch::cout() << "\t@" << testCaseInfo.lineInfo; Catch::cout() << std::endl; } return matchedTests; } struct TagInfo { TagInfo() : count ( 0 ) {} void add( std::string const& spelling ) { ++count; spellings.insert( spelling ); } std::string all() const { std::string out; for( std::set::const_iterator it = spellings.begin(), itEnd = spellings.end(); it != itEnd; ++it ) out += "[" + *it + "]"; return out; } std::set spellings; std::size_t count; }; inline std::size_t listTags( Config const& config ) { TestSpec testSpec = config.testSpec(); if( config.testSpec().hasFilters() ) Catch::cout() << "Tags for matching test cases:\n"; else { Catch::cout() << "All available tags:\n"; testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); } std::map tagCounts; std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { for( std::set::const_iterator tagIt = it->getTestCaseInfo().tags.begin(), tagItEnd = it->getTestCaseInfo().tags.end(); tagIt != tagItEnd; ++tagIt ) { std::string tagName = *tagIt; std::string lcaseTagName = toLower( tagName ); std::map::iterator countIt = tagCounts.find( lcaseTagName ); if( countIt == tagCounts.end() ) countIt = tagCounts.insert( std::make_pair( lcaseTagName, TagInfo() ) ).first; countIt->second.add( tagName ); } } for( std::map::const_iterator countIt = tagCounts.begin(), countItEnd = tagCounts.end(); countIt != countItEnd; ++countIt ) { std::ostringstream oss; oss << " " << std::setw(2) << countIt->second.count << " "; Text wrapper( countIt->second.all(), TextAttributes() .setInitialIndent( 0 ) .setIndent( oss.str().size() ) .setWidth( CATCH_CONFIG_CONSOLE_WIDTH-10 ) ); Catch::cout() << oss.str() << wrapper << '\n'; } Catch::cout() << pluralise( tagCounts.size(), "tag" ) << '\n' << std::endl; return tagCounts.size(); } inline std::size_t listReporters( Config const& /*config*/ ) { Catch::cout() << "Available reporters:\n"; IReporterRegistry::FactoryMap const& factories = getRegistryHub().getReporterRegistry().getFactories(); IReporterRegistry::FactoryMap::const_iterator itBegin = factories.begin(), itEnd = factories.end(), it; std::size_t maxNameLen = 0; for(it = itBegin; it != itEnd; ++it ) maxNameLen = (std::max)( maxNameLen, it->first.size() ); for(it = itBegin; it != itEnd; ++it ) { Text wrapper( it->second->getDescription(), TextAttributes() .setInitialIndent( 0 ) .setIndent( 7+maxNameLen ) .setWidth( CATCH_CONFIG_CONSOLE_WIDTH - maxNameLen-8 ) ); Catch::cout() << " " << it->first << ':' << std::string( maxNameLen - it->first.size() + 2, ' ' ) << wrapper << '\n'; } Catch::cout() << std::endl; return factories.size(); } inline Option list( Config const& config ) { Option listedCount; if( config.listTests() || ( config.listExtraInfo() && !config.listTestNamesOnly() ) ) listedCount = listedCount.valueOr(0) + listTests( config ); if( config.listTestNamesOnly() ) listedCount = listedCount.valueOr(0) + listTestsNamesOnly( config ); if( config.listTags() ) listedCount = listedCount.valueOr(0) + listTags( config ); if( config.listReporters() ) listedCount = listedCount.valueOr(0) + listReporters( config ); return listedCount; } } // end namespace Catch // #included from: internal/catch_run_context.hpp #define TWOBLUECUBES_CATCH_RUNNER_IMPL_HPP_INCLUDED // #included from: catch_test_case_tracker.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_TRACKER_HPP_INCLUDED #include #include #include #include #include CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS namespace Catch { namespace TestCaseTracking { struct NameAndLocation { std::string name; SourceLineInfo location; NameAndLocation( std::string const& _name, SourceLineInfo const& _location ) : name( _name ), location( _location ) {} }; struct ITracker : SharedImpl<> { virtual ~ITracker(); // static queries virtual NameAndLocation const& nameAndLocation() const = 0; // dynamic queries virtual bool isComplete() const = 0; // Successfully completed or failed virtual bool isSuccessfullyCompleted() const = 0; virtual bool isOpen() const = 0; // Started but not complete virtual bool hasChildren() const = 0; virtual ITracker& parent() = 0; // actions virtual void close() = 0; // Successfully complete virtual void fail() = 0; virtual void markAsNeedingAnotherRun() = 0; virtual void addChild( Ptr const& child ) = 0; virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) = 0; virtual void openChild() = 0; // Debug/ checking virtual bool isSectionTracker() const = 0; virtual bool isIndexTracker() const = 0; }; class TrackerContext { enum RunState { NotStarted, Executing, CompletedCycle }; Ptr m_rootTracker; ITracker* m_currentTracker; RunState m_runState; public: static TrackerContext& instance() { static TrackerContext s_instance; return s_instance; } TrackerContext() : m_currentTracker( CATCH_NULL ), m_runState( NotStarted ) {} ITracker& startRun(); void endRun() { m_rootTracker.reset(); m_currentTracker = CATCH_NULL; m_runState = NotStarted; } void startCycle() { m_currentTracker = m_rootTracker.get(); m_runState = Executing; } void completeCycle() { m_runState = CompletedCycle; } bool completedCycle() const { return m_runState == CompletedCycle; } ITracker& currentTracker() { return *m_currentTracker; } void setCurrentTracker( ITracker* tracker ) { m_currentTracker = tracker; } }; class TrackerBase : public ITracker { protected: enum CycleState { NotStarted, Executing, ExecutingChildren, NeedsAnotherRun, CompletedSuccessfully, Failed }; class TrackerHasName { NameAndLocation m_nameAndLocation; public: TrackerHasName( NameAndLocation const& nameAndLocation ) : m_nameAndLocation( nameAndLocation ) {} bool operator ()( Ptr const& tracker ) { return tracker->nameAndLocation().name == m_nameAndLocation.name && tracker->nameAndLocation().location == m_nameAndLocation.location; } }; typedef std::vector > Children; NameAndLocation m_nameAndLocation; TrackerContext& m_ctx; ITracker* m_parent; Children m_children; CycleState m_runState; public: TrackerBase( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) : m_nameAndLocation( nameAndLocation ), m_ctx( ctx ), m_parent( parent ), m_runState( NotStarted ) {} virtual ~TrackerBase(); virtual NameAndLocation const& nameAndLocation() const CATCH_OVERRIDE { return m_nameAndLocation; } virtual bool isComplete() const CATCH_OVERRIDE { return m_runState == CompletedSuccessfully || m_runState == Failed; } virtual bool isSuccessfullyCompleted() const CATCH_OVERRIDE { return m_runState == CompletedSuccessfully; } virtual bool isOpen() const CATCH_OVERRIDE { return m_runState != NotStarted && !isComplete(); } virtual bool hasChildren() const CATCH_OVERRIDE { return !m_children.empty(); } virtual void addChild( Ptr const& child ) CATCH_OVERRIDE { m_children.push_back( child ); } virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) CATCH_OVERRIDE { Children::const_iterator it = std::find_if( m_children.begin(), m_children.end(), TrackerHasName( nameAndLocation ) ); return( it != m_children.end() ) ? it->get() : CATCH_NULL; } virtual ITracker& parent() CATCH_OVERRIDE { assert( m_parent ); // Should always be non-null except for root return *m_parent; } virtual void openChild() CATCH_OVERRIDE { if( m_runState != ExecutingChildren ) { m_runState = ExecutingChildren; if( m_parent ) m_parent->openChild(); } } virtual bool isSectionTracker() const CATCH_OVERRIDE { return false; } virtual bool isIndexTracker() const CATCH_OVERRIDE { return false; } void open() { m_runState = Executing; moveToThis(); if( m_parent ) m_parent->openChild(); } virtual void close() CATCH_OVERRIDE { // Close any still open children (e.g. generators) while( &m_ctx.currentTracker() != this ) m_ctx.currentTracker().close(); switch( m_runState ) { case NotStarted: case CompletedSuccessfully: case Failed: throw std::logic_error( "Illogical state" ); case NeedsAnotherRun: break;; case Executing: m_runState = CompletedSuccessfully; break; case ExecutingChildren: if( m_children.empty() || m_children.back()->isComplete() ) m_runState = CompletedSuccessfully; break; default: throw std::logic_error( "Unexpected state" ); } moveToParent(); m_ctx.completeCycle(); } virtual void fail() CATCH_OVERRIDE { m_runState = Failed; if( m_parent ) m_parent->markAsNeedingAnotherRun(); moveToParent(); m_ctx.completeCycle(); } virtual void markAsNeedingAnotherRun() CATCH_OVERRIDE { m_runState = NeedsAnotherRun; } private: void moveToParent() { assert( m_parent ); m_ctx.setCurrentTracker( m_parent ); } void moveToThis() { m_ctx.setCurrentTracker( this ); } }; class SectionTracker : public TrackerBase { std::vector m_filters; public: SectionTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) : TrackerBase( nameAndLocation, ctx, parent ) { if( parent ) { while( !parent->isSectionTracker() ) parent = &parent->parent(); SectionTracker& parentSection = static_cast( *parent ); addNextFilters( parentSection.m_filters ); } } virtual ~SectionTracker(); virtual bool isSectionTracker() const CATCH_OVERRIDE { return true; } static SectionTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation ) { SectionTracker* section = CATCH_NULL; ITracker& currentTracker = ctx.currentTracker(); if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { assert( childTracker ); assert( childTracker->isSectionTracker() ); section = static_cast( childTracker ); } else { section = new SectionTracker( nameAndLocation, ctx, ¤tTracker ); currentTracker.addChild( section ); } if( !ctx.completedCycle() ) section->tryOpen(); return *section; } void tryOpen() { if( !isComplete() && (m_filters.empty() || m_filters[0].empty() || m_filters[0] == m_nameAndLocation.name ) ) open(); } void addInitialFilters( std::vector const& filters ) { if( !filters.empty() ) { m_filters.push_back(""); // Root - should never be consulted m_filters.push_back(""); // Test Case - not a section filter m_filters.insert( m_filters.end(), filters.begin(), filters.end() ); } } void addNextFilters( std::vector const& filters ) { if( filters.size() > 1 ) m_filters.insert( m_filters.end(), ++filters.begin(), filters.end() ); } }; class IndexTracker : public TrackerBase { int m_size; int m_index; public: IndexTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent, int size ) : TrackerBase( nameAndLocation, ctx, parent ), m_size( size ), m_index( -1 ) {} virtual ~IndexTracker(); virtual bool isIndexTracker() const CATCH_OVERRIDE { return true; } static IndexTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation, int size ) { IndexTracker* tracker = CATCH_NULL; ITracker& currentTracker = ctx.currentTracker(); if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { assert( childTracker ); assert( childTracker->isIndexTracker() ); tracker = static_cast( childTracker ); } else { tracker = new IndexTracker( nameAndLocation, ctx, ¤tTracker, size ); currentTracker.addChild( tracker ); } if( !ctx.completedCycle() && !tracker->isComplete() ) { if( tracker->m_runState != ExecutingChildren && tracker->m_runState != NeedsAnotherRun ) tracker->moveNext(); tracker->open(); } return *tracker; } int index() const { return m_index; } void moveNext() { m_index++; m_children.clear(); } virtual void close() CATCH_OVERRIDE { TrackerBase::close(); if( m_runState == CompletedSuccessfully && m_index < m_size-1 ) m_runState = Executing; } }; inline ITracker& TrackerContext::startRun() { m_rootTracker = new SectionTracker( NameAndLocation( "{root}", CATCH_INTERNAL_LINEINFO ), *this, CATCH_NULL ); m_currentTracker = CATCH_NULL; m_runState = Executing; return *m_rootTracker; } } // namespace TestCaseTracking using TestCaseTracking::ITracker; using TestCaseTracking::TrackerContext; using TestCaseTracking::SectionTracker; using TestCaseTracking::IndexTracker; } // namespace Catch CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS // #included from: catch_fatal_condition.hpp #define TWOBLUECUBES_CATCH_FATAL_CONDITION_H_INCLUDED namespace Catch { // Report the error condition inline void reportFatal( std::string const& message ) { IContext& context = Catch::getCurrentContext(); IResultCapture* resultCapture = context.getResultCapture(); resultCapture->handleFatalErrorCondition( message ); } } // namespace Catch #if defined ( CATCH_PLATFORM_WINDOWS ) ///////////////////////////////////////// // #included from: catch_windows_h_proxy.h #define TWOBLUECUBES_CATCH_WINDOWS_H_PROXY_H_INCLUDED #ifdef CATCH_DEFINES_NOMINMAX # define NOMINMAX #endif #ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN #endif #ifdef __AFXDLL #include #else #include #endif #ifdef CATCH_DEFINES_NOMINMAX # undef NOMINMAX #endif #ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN # undef WIN32_LEAN_AND_MEAN #endif # if !defined ( CATCH_CONFIG_WINDOWS_SEH ) namespace Catch { struct FatalConditionHandler { void reset() {} }; } # else // CATCH_CONFIG_WINDOWS_SEH is defined namespace Catch { struct SignalDefs { DWORD id; const char* name; }; extern SignalDefs signalDefs[]; // There is no 1-1 mapping between signals and windows exceptions. // Windows can easily distinguish between SO and SigSegV, // but SigInt, SigTerm, etc are handled differently. SignalDefs signalDefs[] = { { EXCEPTION_ILLEGAL_INSTRUCTION, "SIGILL - Illegal instruction signal" }, { EXCEPTION_STACK_OVERFLOW, "SIGSEGV - Stack overflow" }, { EXCEPTION_ACCESS_VIOLATION, "SIGSEGV - Segmentation violation signal" }, { EXCEPTION_INT_DIVIDE_BY_ZERO, "Divide by zero error" }, }; struct FatalConditionHandler { static LONG CALLBACK handleVectoredException(PEXCEPTION_POINTERS ExceptionInfo) { for (int i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { if (ExceptionInfo->ExceptionRecord->ExceptionCode == signalDefs[i].id) { reportFatal(signalDefs[i].name); } } // If its not an exception we care about, pass it along. // This stops us from eating debugger breaks etc. return EXCEPTION_CONTINUE_SEARCH; } FatalConditionHandler() { isSet = true; // 32k seems enough for Catch to handle stack overflow, // but the value was found experimentally, so there is no strong guarantee guaranteeSize = 32 * 1024; exceptionHandlerHandle = CATCH_NULL; // Register as first handler in current chain exceptionHandlerHandle = AddVectoredExceptionHandler(1, handleVectoredException); // Pass in guarantee size to be filled SetThreadStackGuarantee(&guaranteeSize); } static void reset() { if (isSet) { // Unregister handler and restore the old guarantee RemoveVectoredExceptionHandler(exceptionHandlerHandle); SetThreadStackGuarantee(&guaranteeSize); exceptionHandlerHandle = CATCH_NULL; isSet = false; } } ~FatalConditionHandler() { reset(); } private: static bool isSet; static ULONG guaranteeSize; static PVOID exceptionHandlerHandle; }; bool FatalConditionHandler::isSet = false; ULONG FatalConditionHandler::guaranteeSize = 0; PVOID FatalConditionHandler::exceptionHandlerHandle = CATCH_NULL; } // namespace Catch # endif // CATCH_CONFIG_WINDOWS_SEH #else // Not Windows - assumed to be POSIX compatible ////////////////////////// # if !defined(CATCH_CONFIG_POSIX_SIGNALS) namespace Catch { struct FatalConditionHandler { void reset() {} }; } # else // CATCH_CONFIG_POSIX_SIGNALS is defined #include namespace Catch { struct SignalDefs { int id; const char* name; }; extern SignalDefs signalDefs[]; SignalDefs signalDefs[] = { { SIGINT, "SIGINT - Terminal interrupt signal" }, { SIGILL, "SIGILL - Illegal instruction signal" }, { SIGFPE, "SIGFPE - Floating point error signal" }, { SIGSEGV, "SIGSEGV - Segmentation violation signal" }, { SIGTERM, "SIGTERM - Termination request signal" }, { SIGABRT, "SIGABRT - Abort (abnormal termination) signal" } }; struct FatalConditionHandler { static bool isSet; static struct sigaction oldSigActions [sizeof(signalDefs)/sizeof(SignalDefs)]; static stack_t oldSigStack; static char altStackMem[32768]; static void handleSignal( int sig ) { std::string name = ""; for (std::size_t i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { SignalDefs &def = signalDefs[i]; if (sig == def.id) { name = def.name; break; } } reset(); reportFatal(name); raise( sig ); } FatalConditionHandler() { isSet = true; stack_t sigStack; sigStack.ss_sp = altStackMem; sigStack.ss_size = 32768; sigStack.ss_flags = 0; sigaltstack(&sigStack, &oldSigStack); struct sigaction sa = { 0 }; sa.sa_handler = handleSignal; sa.sa_flags = SA_ONSTACK; for (std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i) { sigaction(signalDefs[i].id, &sa, &oldSigActions[i]); } } ~FatalConditionHandler() { reset(); } static void reset() { if( isSet ) { // Set signals back to previous values -- hopefully nobody overwrote them in the meantime for( std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i ) { sigaction(signalDefs[i].id, &oldSigActions[i], CATCH_NULL); } // Return the old stack sigaltstack(&oldSigStack, CATCH_NULL); isSet = false; } } }; bool FatalConditionHandler::isSet = false; struct sigaction FatalConditionHandler::oldSigActions[sizeof(signalDefs)/sizeof(SignalDefs)] = {}; stack_t FatalConditionHandler::oldSigStack = {}; char FatalConditionHandler::altStackMem[32768] = {}; } // namespace Catch # endif // CATCH_CONFIG_POSIX_SIGNALS #endif // not Windows #include #include namespace Catch { class StreamRedirect { public: StreamRedirect( std::ostream& stream, std::string& targetString ) : m_stream( stream ), m_prevBuf( stream.rdbuf() ), m_targetString( targetString ) { stream.rdbuf( m_oss.rdbuf() ); } ~StreamRedirect() { m_targetString += m_oss.str(); m_stream.rdbuf( m_prevBuf ); } private: std::ostream& m_stream; std::streambuf* m_prevBuf; std::ostringstream m_oss; std::string& m_targetString; }; /////////////////////////////////////////////////////////////////////////// class RunContext : public IResultCapture, public IRunner { RunContext( RunContext const& ); void operator =( RunContext const& ); public: explicit RunContext( Ptr const& _config, Ptr const& reporter ) : m_runInfo( _config->name() ), m_context( getCurrentMutableContext() ), m_activeTestCase( CATCH_NULL ), m_config( _config ), m_reporter( reporter ), m_shouldReportUnexpected ( true ) { m_context.setRunner( this ); m_context.setConfig( m_config ); m_context.setResultCapture( this ); m_reporter->testRunStarting( m_runInfo ); } virtual ~RunContext() { m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, aborting() ) ); } void testGroupStarting( std::string const& testSpec, std::size_t groupIndex, std::size_t groupsCount ) { m_reporter->testGroupStarting( GroupInfo( testSpec, groupIndex, groupsCount ) ); } void testGroupEnded( std::string const& testSpec, Totals const& totals, std::size_t groupIndex, std::size_t groupsCount ) { m_reporter->testGroupEnded( TestGroupStats( GroupInfo( testSpec, groupIndex, groupsCount ), totals, aborting() ) ); } Totals runTest( TestCase const& testCase ) { Totals prevTotals = m_totals; std::string redirectedCout; std::string redirectedCerr; TestCaseInfo testInfo = testCase.getTestCaseInfo(); m_reporter->testCaseStarting( testInfo ); m_activeTestCase = &testCase; do { ITracker& rootTracker = m_trackerContext.startRun(); assert( rootTracker.isSectionTracker() ); static_cast( rootTracker ).addInitialFilters( m_config->getSectionsToRun() ); do { m_trackerContext.startCycle(); m_testCaseTracker = &SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( testInfo.name, testInfo.lineInfo ) ); runCurrentTest( redirectedCout, redirectedCerr ); } while( !m_testCaseTracker->isSuccessfullyCompleted() && !aborting() ); } // !TBD: deprecated - this will be replaced by indexed trackers while( getCurrentContext().advanceGeneratorsForCurrentTest() && !aborting() ); Totals deltaTotals = m_totals.delta( prevTotals ); if( testInfo.expectedToFail() && deltaTotals.testCases.passed > 0 ) { deltaTotals.assertions.failed++; deltaTotals.testCases.passed--; deltaTotals.testCases.failed++; } m_totals.testCases += deltaTotals.testCases; m_reporter->testCaseEnded( TestCaseStats( testInfo, deltaTotals, redirectedCout, redirectedCerr, aborting() ) ); m_activeTestCase = CATCH_NULL; m_testCaseTracker = CATCH_NULL; return deltaTotals; } Ptr config() const { return m_config; } private: // IResultCapture virtual void assertionEnded( AssertionResult const& result ) { if( result.getResultType() == ResultWas::Ok ) { m_totals.assertions.passed++; } else if( !result.isOk() ) { m_totals.assertions.failed++; } // We have no use for the return value (whether messages should be cleared), because messages were made scoped // and should be let to clear themselves out. static_cast(m_reporter->assertionEnded(AssertionStats(result, m_messages, m_totals))); // Reset working state m_lastAssertionInfo = AssertionInfo( "", m_lastAssertionInfo.lineInfo, "{Unknown expression after the reported line}" , m_lastAssertionInfo.resultDisposition ); m_lastResult = result; } virtual bool sectionStarted ( SectionInfo const& sectionInfo, Counts& assertions ) { ITracker& sectionTracker = SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( sectionInfo.name, sectionInfo.lineInfo ) ); if( !sectionTracker.isOpen() ) return false; m_activeSections.push_back( §ionTracker ); m_lastAssertionInfo.lineInfo = sectionInfo.lineInfo; m_reporter->sectionStarting( sectionInfo ); assertions = m_totals.assertions; return true; } bool testForMissingAssertions( Counts& assertions ) { if( assertions.total() != 0 ) return false; if( !m_config->warnAboutMissingAssertions() ) return false; if( m_trackerContext.currentTracker().hasChildren() ) return false; m_totals.assertions.failed++; assertions.failed++; return true; } virtual void sectionEnded( SectionEndInfo const& endInfo ) { Counts assertions = m_totals.assertions - endInfo.prevAssertions; bool missingAssertions = testForMissingAssertions( assertions ); if( !m_activeSections.empty() ) { m_activeSections.back()->close(); m_activeSections.pop_back(); } m_reporter->sectionEnded( SectionStats( endInfo.sectionInfo, assertions, endInfo.durationInSeconds, missingAssertions ) ); m_messages.clear(); } virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) { if( m_unfinishedSections.empty() ) m_activeSections.back()->fail(); else m_activeSections.back()->close(); m_activeSections.pop_back(); m_unfinishedSections.push_back( endInfo ); } virtual void pushScopedMessage( MessageInfo const& message ) { m_messages.push_back( message ); } virtual void popScopedMessage( MessageInfo const& message ) { m_messages.erase( std::remove( m_messages.begin(), m_messages.end(), message ), m_messages.end() ); } virtual std::string getCurrentTestName() const { return m_activeTestCase ? m_activeTestCase->getTestCaseInfo().name : std::string(); } virtual const AssertionResult* getLastResult() const { return &m_lastResult; } virtual void exceptionEarlyReported() { m_shouldReportUnexpected = false; } virtual void handleFatalErrorCondition( std::string const& message ) { // Don't rebuild the result -- the stringification itself can cause more fatal errors // Instead, fake a result data. AssertionResultData tempResult; tempResult.resultType = ResultWas::FatalErrorCondition; tempResult.message = message; AssertionResult result(m_lastAssertionInfo, tempResult); getResultCapture().assertionEnded(result); handleUnfinishedSections(); // Recreate section for test case (as we will lose the one that was in scope) TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); Counts assertions; assertions.failed = 1; SectionStats testCaseSectionStats( testCaseSection, assertions, 0, false ); m_reporter->sectionEnded( testCaseSectionStats ); TestCaseInfo testInfo = m_activeTestCase->getTestCaseInfo(); Totals deltaTotals; deltaTotals.testCases.failed = 1; m_reporter->testCaseEnded( TestCaseStats( testInfo, deltaTotals, std::string(), std::string(), false ) ); m_totals.testCases.failed++; testGroupEnded( std::string(), m_totals, 1, 1 ); m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, false ) ); } public: // !TBD We need to do this another way! bool aborting() const { return m_totals.assertions.failed == static_cast( m_config->abortAfter() ); } private: void runCurrentTest( std::string& redirectedCout, std::string& redirectedCerr ) { TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); m_reporter->sectionStarting( testCaseSection ); Counts prevAssertions = m_totals.assertions; double duration = 0; m_shouldReportUnexpected = true; try { m_lastAssertionInfo = AssertionInfo( "TEST_CASE", testCaseInfo.lineInfo, "", ResultDisposition::Normal ); seedRng( *m_config ); Timer timer; timer.start(); if( m_reporter->getPreferences().shouldRedirectStdOut ) { StreamRedirect coutRedir( Catch::cout(), redirectedCout ); StreamRedirect cerrRedir( Catch::cerr(), redirectedCerr ); invokeActiveTestCase(); } else { invokeActiveTestCase(); } duration = timer.getElapsedSeconds(); } catch( TestFailureException& ) { // This just means the test was aborted due to failure } catch(...) { // Under CATCH_CONFIG_FAST_COMPILE, unexpected exceptions under REQUIRE assertions // are reported without translation at the point of origin. if (m_shouldReportUnexpected) { makeUnexpectedResultBuilder().useActiveException(); } } m_testCaseTracker->close(); handleUnfinishedSections(); m_messages.clear(); Counts assertions = m_totals.assertions - prevAssertions; bool missingAssertions = testForMissingAssertions( assertions ); if( testCaseInfo.okToFail() ) { std::swap( assertions.failedButOk, assertions.failed ); m_totals.assertions.failed -= assertions.failedButOk; m_totals.assertions.failedButOk += assertions.failedButOk; } SectionStats testCaseSectionStats( testCaseSection, assertions, duration, missingAssertions ); m_reporter->sectionEnded( testCaseSectionStats ); } void invokeActiveTestCase() { FatalConditionHandler fatalConditionHandler; // Handle signals m_activeTestCase->invoke(); fatalConditionHandler.reset(); } private: ResultBuilder makeUnexpectedResultBuilder() const { return ResultBuilder( m_lastAssertionInfo.macroName, m_lastAssertionInfo.lineInfo, m_lastAssertionInfo.capturedExpression, m_lastAssertionInfo.resultDisposition ); } void handleUnfinishedSections() { // If sections ended prematurely due to an exception we stored their // infos here so we can tear them down outside the unwind process. for( std::vector::const_reverse_iterator it = m_unfinishedSections.rbegin(), itEnd = m_unfinishedSections.rend(); it != itEnd; ++it ) sectionEnded( *it ); m_unfinishedSections.clear(); } TestRunInfo m_runInfo; IMutableContext& m_context; TestCase const* m_activeTestCase; ITracker* m_testCaseTracker; ITracker* m_currentSectionTracker; AssertionResult m_lastResult; Ptr m_config; Totals m_totals; Ptr m_reporter; std::vector m_messages; AssertionInfo m_lastAssertionInfo; std::vector m_unfinishedSections; std::vector m_activeSections; TrackerContext m_trackerContext; bool m_shouldReportUnexpected; }; IResultCapture& getResultCapture() { if( IResultCapture* capture = getCurrentContext().getResultCapture() ) return *capture; else throw std::logic_error( "No result capture instance" ); } } // end namespace Catch // #included from: internal/catch_version.h #define TWOBLUECUBES_CATCH_VERSION_H_INCLUDED namespace Catch { // Versioning information struct Version { Version( unsigned int _majorVersion, unsigned int _minorVersion, unsigned int _patchNumber, char const * const _branchName, unsigned int _buildNumber ); unsigned int const majorVersion; unsigned int const minorVersion; unsigned int const patchNumber; // buildNumber is only used if branchName is not null char const * const branchName; unsigned int const buildNumber; friend std::ostream& operator << ( std::ostream& os, Version const& version ); private: void operator=( Version const& ); }; inline Version libraryVersion(); } #include #include #include namespace Catch { Ptr createReporter( std::string const& reporterName, Ptr const& config ) { Ptr reporter = getRegistryHub().getReporterRegistry().create( reporterName, config.get() ); if( !reporter ) { std::ostringstream oss; oss << "No reporter registered with name: '" << reporterName << "'"; throw std::domain_error( oss.str() ); } return reporter; } Ptr makeReporter( Ptr const& config ) { std::vector reporters = config->getReporterNames(); if( reporters.empty() ) reporters.push_back( "console" ); Ptr reporter; for( std::vector::const_iterator it = reporters.begin(), itEnd = reporters.end(); it != itEnd; ++it ) reporter = addReporter( reporter, createReporter( *it, config ) ); return reporter; } Ptr addListeners( Ptr const& config, Ptr reporters ) { IReporterRegistry::Listeners listeners = getRegistryHub().getReporterRegistry().getListeners(); for( IReporterRegistry::Listeners::const_iterator it = listeners.begin(), itEnd = listeners.end(); it != itEnd; ++it ) reporters = addReporter(reporters, (*it)->create( ReporterConfig( config ) ) ); return reporters; } Totals runTests( Ptr const& config ) { Ptr iconfig = config.get(); Ptr reporter = makeReporter( config ); reporter = addListeners( iconfig, reporter ); RunContext context( iconfig, reporter ); Totals totals; context.testGroupStarting( config->name(), 1, 1 ); TestSpec testSpec = config->testSpec(); if( !testSpec.hasFilters() ) testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "~[.]" ).testSpec(); // All not hidden tests std::vector const& allTestCases = getAllTestCasesSorted( *iconfig ); for( std::vector::const_iterator it = allTestCases.begin(), itEnd = allTestCases.end(); it != itEnd; ++it ) { if( !context.aborting() && matchTest( *it, testSpec, *iconfig ) ) totals += context.runTest( *it ); else reporter->skipTest( *it ); } context.testGroupEnded( iconfig->name(), totals, 1, 1 ); return totals; } void applyFilenamesAsTags( IConfig const& config ) { std::vector const& tests = getAllTestCasesSorted( config ); for(std::size_t i = 0; i < tests.size(); ++i ) { TestCase& test = const_cast( tests[i] ); std::set tags = test.tags; std::string filename = test.lineInfo.file; std::string::size_type lastSlash = filename.find_last_of( "\\/" ); if( lastSlash != std::string::npos ) filename = filename.substr( lastSlash+1 ); std::string::size_type lastDot = filename.find_last_of( "." ); if( lastDot != std::string::npos ) filename = filename.substr( 0, lastDot ); tags.insert( "#" + filename ); setTags( test, tags ); } } class Session : NonCopyable { static bool alreadyInstantiated; public: struct OnUnusedOptions { enum DoWhat { Ignore, Fail }; }; Session() : m_cli( makeCommandLineParser() ) { if( alreadyInstantiated ) { std::string msg = "Only one instance of Catch::Session can ever be used"; Catch::cerr() << msg << std::endl; throw std::logic_error( msg ); } alreadyInstantiated = true; } ~Session() { Catch::cleanUp(); } void showHelp( std::string const& processName ) { Catch::cout() << "\nCatch v" << libraryVersion() << "\n"; m_cli.usage( Catch::cout(), processName ); Catch::cout() << "For more detail usage please see the project docs\n" << std::endl; } int applyCommandLine( int argc, char const* const* const argv, OnUnusedOptions::DoWhat unusedOptionBehaviour = OnUnusedOptions::Fail ) { try { m_cli.setThrowOnUnrecognisedTokens( unusedOptionBehaviour == OnUnusedOptions::Fail ); m_unusedTokens = m_cli.parseInto( Clara::argsToVector( argc, argv ), m_configData ); if( m_configData.showHelp ) showHelp( m_configData.processName ); m_config.reset(); } catch( std::exception& ex ) { { Colour colourGuard( Colour::Red ); Catch::cerr() << "\nError(s) in input:\n" << Text( ex.what(), TextAttributes().setIndent(2) ) << "\n\n"; } m_cli.usage( Catch::cout(), m_configData.processName ); return (std::numeric_limits::max)(); } return 0; } void useConfigData( ConfigData const& _configData ) { m_configData = _configData; m_config.reset(); } int run( int argc, char const* const* const argv ) { int returnCode = applyCommandLine( argc, argv ); if( returnCode == 0 ) returnCode = run(); return returnCode; } #if defined(WIN32) && defined(UNICODE) int run( int argc, wchar_t const* const* const argv ) { char **utf8Argv = new char *[ argc ]; for ( int i = 0; i < argc; ++i ) { int bufSize = WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, NULL, 0, NULL, NULL ); utf8Argv[ i ] = new char[ bufSize ]; WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, utf8Argv[i], bufSize, NULL, NULL ); } int returnCode = applyCommandLine( argc, utf8Argv ); if( returnCode == 0 ) returnCode = run(); for ( int i = 0; i < argc; ++i ) delete [] utf8Argv[ i ]; delete [] utf8Argv; return returnCode; } #endif int run() { if( m_configData.showHelp ) return 0; try { config(); // Force config to be constructed seedRng( *m_config ); if( m_configData.filenamesAsTags ) applyFilenamesAsTags( *m_config ); // Handle list request if( Option listed = list( config() ) ) return static_cast( *listed ); return static_cast( runTests( m_config ).assertions.failed ); } catch( std::exception& ex ) { Catch::cerr() << ex.what() << std::endl; return (std::numeric_limits::max)(); } } Clara::CommandLine const& cli() const { return m_cli; } std::vector const& unusedTokens() const { return m_unusedTokens; } ConfigData& configData() { return m_configData; } Config& config() { if( !m_config ) m_config = new Config( m_configData ); return *m_config; } private: Clara::CommandLine m_cli; std::vector m_unusedTokens; ConfigData m_configData; Ptr m_config; }; bool Session::alreadyInstantiated = false; } // end namespace Catch // #included from: catch_registry_hub.hpp #define TWOBLUECUBES_CATCH_REGISTRY_HUB_HPP_INCLUDED // #included from: catch_test_case_registry_impl.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_REGISTRY_IMPL_HPP_INCLUDED #include #include #include #include namespace Catch { struct RandomNumberGenerator { typedef std::ptrdiff_t result_type; result_type operator()( result_type n ) const { return rand() % n; } #ifdef CATCH_CONFIG_CPP11_SHUFFLE static constexpr result_type min() { return 0; } static constexpr result_type max() { return 1000000; } result_type operator()() const { return rand() % max(); } #endif template static void shuffle( V& vector ) { RandomNumberGenerator rng; #ifdef CATCH_CONFIG_CPP11_SHUFFLE std::shuffle( vector.begin(), vector.end(), rng ); #else random_shuffle( vector.begin(), vector.end(), rng ); #endif } }; inline std::vector sortTests( IConfig const& config, std::vector const& unsortedTestCases ) { std::vector sorted = unsortedTestCases; switch( config.runOrder() ) { case RunTests::InLexicographicalOrder: std::sort( sorted.begin(), sorted.end() ); break; case RunTests::InRandomOrder: { seedRng( config ); RandomNumberGenerator::shuffle( sorted ); } break; case RunTests::InDeclarationOrder: // already in declaration order break; } return sorted; } bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ) { return testSpec.matches( testCase ) && ( config.allowThrows() || !testCase.throws() ); } void enforceNoDuplicateTestCases( std::vector const& functions ) { std::set seenFunctions; for( std::vector::const_iterator it = functions.begin(), itEnd = functions.end(); it != itEnd; ++it ) { std::pair::const_iterator, bool> prev = seenFunctions.insert( *it ); if( !prev.second ) { std::ostringstream ss; ss << Colour( Colour::Red ) << "error: TEST_CASE( \"" << it->name << "\" ) already defined.\n" << "\tFirst seen at " << prev.first->getTestCaseInfo().lineInfo << '\n' << "\tRedefined at " << it->getTestCaseInfo().lineInfo << std::endl; throw std::runtime_error(ss.str()); } } } std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ) { std::vector filtered; filtered.reserve( testCases.size() ); for( std::vector::const_iterator it = testCases.begin(), itEnd = testCases.end(); it != itEnd; ++it ) if( matchTest( *it, testSpec, config ) ) filtered.push_back( *it ); return filtered; } std::vector const& getAllTestCasesSorted( IConfig const& config ) { return getRegistryHub().getTestCaseRegistry().getAllTestsSorted( config ); } class TestRegistry : public ITestCaseRegistry { public: TestRegistry() : m_currentSortOrder( RunTests::InDeclarationOrder ), m_unnamedCount( 0 ) {} virtual ~TestRegistry(); virtual void registerTest( TestCase const& testCase ) { std::string name = testCase.getTestCaseInfo().name; if( name.empty() ) { std::ostringstream oss; oss << "Anonymous test case " << ++m_unnamedCount; return registerTest( testCase.withName( oss.str() ) ); } m_functions.push_back( testCase ); } virtual std::vector const& getAllTests() const { return m_functions; } virtual std::vector const& getAllTestsSorted( IConfig const& config ) const { if( m_sortedFunctions.empty() ) enforceNoDuplicateTestCases( m_functions ); if( m_currentSortOrder != config.runOrder() || m_sortedFunctions.empty() ) { m_sortedFunctions = sortTests( config, m_functions ); m_currentSortOrder = config.runOrder(); } return m_sortedFunctions; } private: std::vector m_functions; mutable RunTests::InWhatOrder m_currentSortOrder; mutable std::vector m_sortedFunctions; size_t m_unnamedCount; std::ios_base::Init m_ostreamInit; // Forces cout/ cerr to be initialised }; /////////////////////////////////////////////////////////////////////////// class FreeFunctionTestCase : public SharedImpl { public: FreeFunctionTestCase( TestFunction fun ) : m_fun( fun ) {} virtual void invoke() const { m_fun(); } private: virtual ~FreeFunctionTestCase(); TestFunction m_fun; }; inline std::string extractClassName( std::string const& classOrQualifiedMethodName ) { std::string className = classOrQualifiedMethodName; if( startsWith( className, '&' ) ) { std::size_t lastColons = className.rfind( "::" ); std::size_t penultimateColons = className.rfind( "::", lastColons-1 ); if( penultimateColons == std::string::npos ) penultimateColons = 1; className = className.substr( penultimateColons, lastColons-penultimateColons ); } return className; } void registerTestCase ( ITestCase* testCase, char const* classOrQualifiedMethodName, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ) { getMutableRegistryHub().registerTest ( makeTestCase ( testCase, extractClassName( classOrQualifiedMethodName ), nameAndDesc.name, nameAndDesc.description, lineInfo ) ); } void registerTestCaseFunction ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ) { registerTestCase( new FreeFunctionTestCase( function ), "", nameAndDesc, lineInfo ); } /////////////////////////////////////////////////////////////////////////// AutoReg::AutoReg ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ) { registerTestCaseFunction( function, lineInfo, nameAndDesc ); } AutoReg::~AutoReg() {} } // end namespace Catch // #included from: catch_reporter_registry.hpp #define TWOBLUECUBES_CATCH_REPORTER_REGISTRY_HPP_INCLUDED #include namespace Catch { class ReporterRegistry : public IReporterRegistry { public: virtual ~ReporterRegistry() CATCH_OVERRIDE {} virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const CATCH_OVERRIDE { FactoryMap::const_iterator it = m_factories.find( name ); if( it == m_factories.end() ) return CATCH_NULL; return it->second->create( ReporterConfig( config ) ); } void registerReporter( std::string const& name, Ptr const& factory ) { m_factories.insert( std::make_pair( name, factory ) ); } void registerListener( Ptr const& factory ) { m_listeners.push_back( factory ); } virtual FactoryMap const& getFactories() const CATCH_OVERRIDE { return m_factories; } virtual Listeners const& getListeners() const CATCH_OVERRIDE { return m_listeners; } private: FactoryMap m_factories; Listeners m_listeners; }; } // #included from: catch_exception_translator_registry.hpp #define TWOBLUECUBES_CATCH_EXCEPTION_TRANSLATOR_REGISTRY_HPP_INCLUDED #ifdef __OBJC__ #import "Foundation/Foundation.h" #endif namespace Catch { class ExceptionTranslatorRegistry : public IExceptionTranslatorRegistry { public: ~ExceptionTranslatorRegistry() { deleteAll( m_translators ); } virtual void registerTranslator( const IExceptionTranslator* translator ) { m_translators.push_back( translator ); } virtual std::string translateActiveException() const { try { #ifdef __OBJC__ // In Objective-C try objective-c exceptions first @try { return tryTranslators(); } @catch (NSException *exception) { return Catch::toString( [exception description] ); } #else return tryTranslators(); #endif } catch( TestFailureException& ) { throw; } catch( std::exception& ex ) { return ex.what(); } catch( std::string& msg ) { return msg; } catch( const char* msg ) { return msg; } catch(...) { return "Unknown exception"; } } std::string tryTranslators() const { if( m_translators.empty() ) throw; else return m_translators[0]->translate( m_translators.begin()+1, m_translators.end() ); } private: std::vector m_translators; }; } // #included from: catch_tag_alias_registry.h #define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_H_INCLUDED #include namespace Catch { class TagAliasRegistry : public ITagAliasRegistry { public: virtual ~TagAliasRegistry(); virtual Option find( std::string const& alias ) const; virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const; void add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ); private: std::map m_registry; }; } // end namespace Catch namespace Catch { namespace { class RegistryHub : public IRegistryHub, public IMutableRegistryHub { RegistryHub( RegistryHub const& ); void operator=( RegistryHub const& ); public: // IRegistryHub RegistryHub() { } virtual IReporterRegistry const& getReporterRegistry() const CATCH_OVERRIDE { return m_reporterRegistry; } virtual ITestCaseRegistry const& getTestCaseRegistry() const CATCH_OVERRIDE { return m_testCaseRegistry; } virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() CATCH_OVERRIDE { return m_exceptionTranslatorRegistry; } virtual ITagAliasRegistry const& getTagAliasRegistry() const CATCH_OVERRIDE { return m_tagAliasRegistry; } public: // IMutableRegistryHub virtual void registerReporter( std::string const& name, Ptr const& factory ) CATCH_OVERRIDE { m_reporterRegistry.registerReporter( name, factory ); } virtual void registerListener( Ptr const& factory ) CATCH_OVERRIDE { m_reporterRegistry.registerListener( factory ); } virtual void registerTest( TestCase const& testInfo ) CATCH_OVERRIDE { m_testCaseRegistry.registerTest( testInfo ); } virtual void registerTranslator( const IExceptionTranslator* translator ) CATCH_OVERRIDE { m_exceptionTranslatorRegistry.registerTranslator( translator ); } virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) CATCH_OVERRIDE { m_tagAliasRegistry.add( alias, tag, lineInfo ); } private: TestRegistry m_testCaseRegistry; ReporterRegistry m_reporterRegistry; ExceptionTranslatorRegistry m_exceptionTranslatorRegistry; TagAliasRegistry m_tagAliasRegistry; }; // Single, global, instance inline RegistryHub*& getTheRegistryHub() { static RegistryHub* theRegistryHub = CATCH_NULL; if( !theRegistryHub ) theRegistryHub = new RegistryHub(); return theRegistryHub; } } IRegistryHub& getRegistryHub() { return *getTheRegistryHub(); } IMutableRegistryHub& getMutableRegistryHub() { return *getTheRegistryHub(); } void cleanUp() { delete getTheRegistryHub(); getTheRegistryHub() = CATCH_NULL; cleanUpContext(); } std::string translateActiveException() { return getRegistryHub().getExceptionTranslatorRegistry().translateActiveException(); } } // end namespace Catch // #included from: catch_notimplemented_exception.hpp #define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_HPP_INCLUDED #include namespace Catch { NotImplementedException::NotImplementedException( SourceLineInfo const& lineInfo ) : m_lineInfo( lineInfo ) { std::ostringstream oss; oss << lineInfo << ": function "; oss << "not implemented"; m_what = oss.str(); } const char* NotImplementedException::what() const CATCH_NOEXCEPT { return m_what.c_str(); } } // end namespace Catch // #included from: catch_context_impl.hpp #define TWOBLUECUBES_CATCH_CONTEXT_IMPL_HPP_INCLUDED // #included from: catch_stream.hpp #define TWOBLUECUBES_CATCH_STREAM_HPP_INCLUDED #include #include #include namespace Catch { template class StreamBufImpl : public StreamBufBase { char data[bufferSize]; WriterF m_writer; public: StreamBufImpl() { setp( data, data + sizeof(data) ); } ~StreamBufImpl() CATCH_NOEXCEPT { sync(); } private: int overflow( int c ) { sync(); if( c != EOF ) { if( pbase() == epptr() ) m_writer( std::string( 1, static_cast( c ) ) ); else sputc( static_cast( c ) ); } return 0; } int sync() { if( pbase() != pptr() ) { m_writer( std::string( pbase(), static_cast( pptr() - pbase() ) ) ); setp( pbase(), epptr() ); } return 0; } }; /////////////////////////////////////////////////////////////////////////// FileStream::FileStream( std::string const& filename ) { m_ofs.open( filename.c_str() ); if( m_ofs.fail() ) { std::ostringstream oss; oss << "Unable to open file: '" << filename << '\''; throw std::domain_error( oss.str() ); } } std::ostream& FileStream::stream() const { return m_ofs; } struct OutputDebugWriter { void operator()( std::string const&str ) { writeToDebugConsole( str ); } }; DebugOutStream::DebugOutStream() : m_streamBuf( new StreamBufImpl() ), m_os( m_streamBuf.get() ) {} std::ostream& DebugOutStream::stream() const { return m_os; } // Store the streambuf from cout up-front because // cout may get redirected when running tests CoutStream::CoutStream() : m_os( Catch::cout().rdbuf() ) {} std::ostream& CoutStream::stream() const { return m_os; } #ifndef CATCH_CONFIG_NOSTDOUT // If you #define this you must implement these functions std::ostream& cout() { return std::cout; } std::ostream& cerr() { return std::cerr; } #endif } namespace Catch { class Context : public IMutableContext { Context() : m_config( CATCH_NULL ), m_runner( CATCH_NULL ), m_resultCapture( CATCH_NULL ) {} Context( Context const& ); void operator=( Context const& ); public: virtual ~Context() { deleteAllValues( m_generatorsByTestName ); } public: // IContext virtual IResultCapture* getResultCapture() { return m_resultCapture; } virtual IRunner* getRunner() { return m_runner; } virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) { return getGeneratorsForCurrentTest() .getGeneratorInfo( fileInfo, totalSize ) .getCurrentIndex(); } virtual bool advanceGeneratorsForCurrentTest() { IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); return generators && generators->moveNext(); } virtual Ptr getConfig() const { return m_config; } public: // IMutableContext virtual void setResultCapture( IResultCapture* resultCapture ) { m_resultCapture = resultCapture; } virtual void setRunner( IRunner* runner ) { m_runner = runner; } virtual void setConfig( Ptr const& config ) { m_config = config; } friend IMutableContext& getCurrentMutableContext(); private: IGeneratorsForTest* findGeneratorsForCurrentTest() { std::string testName = getResultCapture()->getCurrentTestName(); std::map::const_iterator it = m_generatorsByTestName.find( testName ); return it != m_generatorsByTestName.end() ? it->second : CATCH_NULL; } IGeneratorsForTest& getGeneratorsForCurrentTest() { IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); if( !generators ) { std::string testName = getResultCapture()->getCurrentTestName(); generators = createGeneratorsForTest(); m_generatorsByTestName.insert( std::make_pair( testName, generators ) ); } return *generators; } private: Ptr m_config; IRunner* m_runner; IResultCapture* m_resultCapture; std::map m_generatorsByTestName; }; namespace { Context* currentContext = CATCH_NULL; } IMutableContext& getCurrentMutableContext() { if( !currentContext ) currentContext = new Context(); return *currentContext; } IContext& getCurrentContext() { return getCurrentMutableContext(); } void cleanUpContext() { delete currentContext; currentContext = CATCH_NULL; } } // #included from: catch_console_colour_impl.hpp #define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_IMPL_HPP_INCLUDED // #included from: catch_errno_guard.hpp #define TWOBLUECUBES_CATCH_ERRNO_GUARD_HPP_INCLUDED #include namespace Catch { class ErrnoGuard { public: ErrnoGuard():m_oldErrno(errno){} ~ErrnoGuard() { errno = m_oldErrno; } private: int m_oldErrno; }; } namespace Catch { namespace { struct IColourImpl { virtual ~IColourImpl() {} virtual void use( Colour::Code _colourCode ) = 0; }; struct NoColourImpl : IColourImpl { void use( Colour::Code ) {} static IColourImpl* instance() { static NoColourImpl s_instance; return &s_instance; } }; } // anon namespace } // namespace Catch #if !defined( CATCH_CONFIG_COLOUR_NONE ) && !defined( CATCH_CONFIG_COLOUR_WINDOWS ) && !defined( CATCH_CONFIG_COLOUR_ANSI ) # ifdef CATCH_PLATFORM_WINDOWS # define CATCH_CONFIG_COLOUR_WINDOWS # else # define CATCH_CONFIG_COLOUR_ANSI # endif #endif #if defined ( CATCH_CONFIG_COLOUR_WINDOWS ) ///////////////////////////////////////// namespace Catch { namespace { class Win32ColourImpl : public IColourImpl { public: Win32ColourImpl() : stdoutHandle( GetStdHandle(STD_OUTPUT_HANDLE) ) { CONSOLE_SCREEN_BUFFER_INFO csbiInfo; GetConsoleScreenBufferInfo( stdoutHandle, &csbiInfo ); originalForegroundAttributes = csbiInfo.wAttributes & ~( BACKGROUND_GREEN | BACKGROUND_RED | BACKGROUND_BLUE | BACKGROUND_INTENSITY ); originalBackgroundAttributes = csbiInfo.wAttributes & ~( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_INTENSITY ); } virtual void use( Colour::Code _colourCode ) { switch( _colourCode ) { case Colour::None: return setTextAttribute( originalForegroundAttributes ); case Colour::White: return setTextAttribute( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); case Colour::Red: return setTextAttribute( FOREGROUND_RED ); case Colour::Green: return setTextAttribute( FOREGROUND_GREEN ); case Colour::Blue: return setTextAttribute( FOREGROUND_BLUE ); case Colour::Cyan: return setTextAttribute( FOREGROUND_BLUE | FOREGROUND_GREEN ); case Colour::Yellow: return setTextAttribute( FOREGROUND_RED | FOREGROUND_GREEN ); case Colour::Grey: return setTextAttribute( 0 ); case Colour::LightGrey: return setTextAttribute( FOREGROUND_INTENSITY ); case Colour::BrightRed: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_RED ); case Colour::BrightGreen: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN ); case Colour::BrightWhite: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); case Colour::Bright: throw std::logic_error( "not a colour" ); } } private: void setTextAttribute( WORD _textAttribute ) { SetConsoleTextAttribute( stdoutHandle, _textAttribute | originalBackgroundAttributes ); } HANDLE stdoutHandle; WORD originalForegroundAttributes; WORD originalBackgroundAttributes; }; IColourImpl* platformColourInstance() { static Win32ColourImpl s_instance; Ptr config = getCurrentContext().getConfig(); UseColour::YesOrNo colourMode = config ? config->useColour() : UseColour::Auto; if( colourMode == UseColour::Auto ) colourMode = !isDebuggerActive() ? UseColour::Yes : UseColour::No; return colourMode == UseColour::Yes ? &s_instance : NoColourImpl::instance(); } } // end anon namespace } // end namespace Catch #elif defined( CATCH_CONFIG_COLOUR_ANSI ) ////////////////////////////////////// #include namespace Catch { namespace { // use POSIX/ ANSI console terminal codes // Thanks to Adam Strzelecki for original contribution // (http://github.com/nanoant) // https://github.com/philsquared/Catch/pull/131 class PosixColourImpl : public IColourImpl { public: virtual void use( Colour::Code _colourCode ) { switch( _colourCode ) { case Colour::None: case Colour::White: return setColour( "[0m" ); case Colour::Red: return setColour( "[0;31m" ); case Colour::Green: return setColour( "[0;32m" ); case Colour::Blue: return setColour( "[0;34m" ); case Colour::Cyan: return setColour( "[0;36m" ); case Colour::Yellow: return setColour( "[0;33m" ); case Colour::Grey: return setColour( "[1;30m" ); case Colour::LightGrey: return setColour( "[0;37m" ); case Colour::BrightRed: return setColour( "[1;31m" ); case Colour::BrightGreen: return setColour( "[1;32m" ); case Colour::BrightWhite: return setColour( "[1;37m" ); case Colour::Bright: throw std::logic_error( "not a colour" ); } } static IColourImpl* instance() { static PosixColourImpl s_instance; return &s_instance; } private: void setColour( const char* _escapeCode ) { Catch::cout() << '\033' << _escapeCode; } }; IColourImpl* platformColourInstance() { ErrnoGuard guard; Ptr config = getCurrentContext().getConfig(); UseColour::YesOrNo colourMode = config ? config->useColour() : UseColour::Auto; if( colourMode == UseColour::Auto ) colourMode = (!isDebuggerActive() && isatty(STDOUT_FILENO) ) ? UseColour::Yes : UseColour::No; return colourMode == UseColour::Yes ? PosixColourImpl::instance() : NoColourImpl::instance(); } } // end anon namespace } // end namespace Catch #else // not Windows or ANSI /////////////////////////////////////////////// namespace Catch { static IColourImpl* platformColourInstance() { return NoColourImpl::instance(); } } // end namespace Catch #endif // Windows/ ANSI/ None namespace Catch { Colour::Colour( Code _colourCode ) : m_moved( false ) { use( _colourCode ); } Colour::Colour( Colour const& _other ) : m_moved( false ) { const_cast( _other ).m_moved = true; } Colour::~Colour(){ if( !m_moved ) use( None ); } void Colour::use( Code _colourCode ) { static IColourImpl* impl = platformColourInstance(); impl->use( _colourCode ); } } // end namespace Catch // #included from: catch_generators_impl.hpp #define TWOBLUECUBES_CATCH_GENERATORS_IMPL_HPP_INCLUDED #include #include #include namespace Catch { struct GeneratorInfo : IGeneratorInfo { GeneratorInfo( std::size_t size ) : m_size( size ), m_currentIndex( 0 ) {} bool moveNext() { if( ++m_currentIndex == m_size ) { m_currentIndex = 0; return false; } return true; } std::size_t getCurrentIndex() const { return m_currentIndex; } std::size_t m_size; std::size_t m_currentIndex; }; /////////////////////////////////////////////////////////////////////////// class GeneratorsForTest : public IGeneratorsForTest { public: ~GeneratorsForTest() { deleteAll( m_generatorsInOrder ); } IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) { std::map::const_iterator it = m_generatorsByName.find( fileInfo ); if( it == m_generatorsByName.end() ) { IGeneratorInfo* info = new GeneratorInfo( size ); m_generatorsByName.insert( std::make_pair( fileInfo, info ) ); m_generatorsInOrder.push_back( info ); return *info; } return *it->second; } bool moveNext() { std::vector::const_iterator it = m_generatorsInOrder.begin(); std::vector::const_iterator itEnd = m_generatorsInOrder.end(); for(; it != itEnd; ++it ) { if( (*it)->moveNext() ) return true; } return false; } private: std::map m_generatorsByName; std::vector m_generatorsInOrder; }; IGeneratorsForTest* createGeneratorsForTest() { return new GeneratorsForTest(); } } // end namespace Catch // #included from: catch_assertionresult.hpp #define TWOBLUECUBES_CATCH_ASSERTIONRESULT_HPP_INCLUDED namespace Catch { AssertionInfo::AssertionInfo( char const * _macroName, SourceLineInfo const& _lineInfo, char const * _capturedExpression, ResultDisposition::Flags _resultDisposition, char const * _secondArg) : macroName( _macroName ), lineInfo( _lineInfo ), capturedExpression( _capturedExpression ), resultDisposition( _resultDisposition ), secondArg( _secondArg ) {} AssertionResult::AssertionResult() {} AssertionResult::AssertionResult( AssertionInfo const& info, AssertionResultData const& data ) : m_info( info ), m_resultData( data ) {} AssertionResult::~AssertionResult() {} // Result was a success bool AssertionResult::succeeded() const { return Catch::isOk( m_resultData.resultType ); } // Result was a success, or failure is suppressed bool AssertionResult::isOk() const { return Catch::isOk( m_resultData.resultType ) || shouldSuppressFailure( m_info.resultDisposition ); } ResultWas::OfType AssertionResult::getResultType() const { return m_resultData.resultType; } bool AssertionResult::hasExpression() const { return m_info.capturedExpression[0] != 0; } bool AssertionResult::hasMessage() const { return !m_resultData.message.empty(); } std::string capturedExpressionWithSecondArgument( char const * capturedExpression, char const * secondArg ) { return (secondArg[0] == 0 || secondArg[0] == '"' && secondArg[1] == '"') ? capturedExpression : std::string(capturedExpression) + ", " + secondArg; } std::string AssertionResult::getExpression() const { if( isFalseTest( m_info.resultDisposition ) ) return '!' + capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); else return capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); } std::string AssertionResult::getExpressionInMacro() const { if( m_info.macroName[0] == 0 ) return capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); else return std::string(m_info.macroName) + "( " + capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg) + " )"; } bool AssertionResult::hasExpandedExpression() const { return hasExpression() && getExpandedExpression() != getExpression(); } std::string AssertionResult::getExpandedExpression() const { return m_resultData.reconstructExpression(); } std::string AssertionResult::getMessage() const { return m_resultData.message; } SourceLineInfo AssertionResult::getSourceInfo() const { return m_info.lineInfo; } std::string AssertionResult::getTestMacroName() const { return m_info.macroName; } void AssertionResult::discardDecomposedExpression() const { m_resultData.decomposedExpression = CATCH_NULL; } void AssertionResult::expandDecomposedExpression() const { m_resultData.reconstructExpression(); } } // end namespace Catch // #included from: catch_test_case_info.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_INFO_HPP_INCLUDED #include namespace Catch { inline TestCaseInfo::SpecialProperties parseSpecialTag( std::string const& tag ) { if( startsWith( tag, '.' ) || tag == "hide" || tag == "!hide" ) return TestCaseInfo::IsHidden; else if( tag == "!throws" ) return TestCaseInfo::Throws; else if( tag == "!shouldfail" ) return TestCaseInfo::ShouldFail; else if( tag == "!mayfail" ) return TestCaseInfo::MayFail; else if( tag == "!nonportable" ) return TestCaseInfo::NonPortable; else return TestCaseInfo::None; } inline bool isReservedTag( std::string const& tag ) { return parseSpecialTag( tag ) == TestCaseInfo::None && tag.size() > 0 && !std::isalnum( tag[0] ); } inline void enforceNotReservedTag( std::string const& tag, SourceLineInfo const& _lineInfo ) { if( isReservedTag( tag ) ) { std::ostringstream ss; ss << Colour(Colour::Red) << "Tag name [" << tag << "] not allowed.\n" << "Tag names starting with non alpha-numeric characters are reserved\n" << Colour(Colour::FileName) << _lineInfo << '\n'; throw std::runtime_error(ss.str()); } } TestCase makeTestCase( ITestCase* _testCase, std::string const& _className, std::string const& _name, std::string const& _descOrTags, SourceLineInfo const& _lineInfo ) { bool isHidden( startsWith( _name, "./" ) ); // Legacy support // Parse out tags std::set tags; std::string desc, tag; bool inTag = false; for( std::size_t i = 0; i < _descOrTags.size(); ++i ) { char c = _descOrTags[i]; if( !inTag ) { if( c == '[' ) inTag = true; else desc += c; } else { if( c == ']' ) { TestCaseInfo::SpecialProperties prop = parseSpecialTag( tag ); if( prop == TestCaseInfo::IsHidden ) isHidden = true; else if( prop == TestCaseInfo::None ) enforceNotReservedTag( tag, _lineInfo ); tags.insert( tag ); tag.clear(); inTag = false; } else tag += c; } } if( isHidden ) { tags.insert( "hide" ); tags.insert( "." ); } TestCaseInfo info( _name, _className, desc, tags, _lineInfo ); return TestCase( _testCase, info ); } void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ) { testCaseInfo.tags = tags; testCaseInfo.lcaseTags.clear(); std::ostringstream oss; for( std::set::const_iterator it = tags.begin(), itEnd = tags.end(); it != itEnd; ++it ) { oss << '[' << *it << ']'; std::string lcaseTag = toLower( *it ); testCaseInfo.properties = static_cast( testCaseInfo.properties | parseSpecialTag( lcaseTag ) ); testCaseInfo.lcaseTags.insert( lcaseTag ); } testCaseInfo.tagsAsString = oss.str(); } TestCaseInfo::TestCaseInfo( std::string const& _name, std::string const& _className, std::string const& _description, std::set const& _tags, SourceLineInfo const& _lineInfo ) : name( _name ), className( _className ), description( _description ), lineInfo( _lineInfo ), properties( None ) { setTags( *this, _tags ); } TestCaseInfo::TestCaseInfo( TestCaseInfo const& other ) : name( other.name ), className( other.className ), description( other.description ), tags( other.tags ), lcaseTags( other.lcaseTags ), tagsAsString( other.tagsAsString ), lineInfo( other.lineInfo ), properties( other.properties ) {} bool TestCaseInfo::isHidden() const { return ( properties & IsHidden ) != 0; } bool TestCaseInfo::throws() const { return ( properties & Throws ) != 0; } bool TestCaseInfo::okToFail() const { return ( properties & (ShouldFail | MayFail ) ) != 0; } bool TestCaseInfo::expectedToFail() const { return ( properties & (ShouldFail ) ) != 0; } TestCase::TestCase( ITestCase* testCase, TestCaseInfo const& info ) : TestCaseInfo( info ), test( testCase ) {} TestCase::TestCase( TestCase const& other ) : TestCaseInfo( other ), test( other.test ) {} TestCase TestCase::withName( std::string const& _newName ) const { TestCase other( *this ); other.name = _newName; return other; } void TestCase::swap( TestCase& other ) { test.swap( other.test ); name.swap( other.name ); className.swap( other.className ); description.swap( other.description ); tags.swap( other.tags ); lcaseTags.swap( other.lcaseTags ); tagsAsString.swap( other.tagsAsString ); std::swap( TestCaseInfo::properties, static_cast( other ).properties ); std::swap( lineInfo, other.lineInfo ); } void TestCase::invoke() const { test->invoke(); } bool TestCase::operator == ( TestCase const& other ) const { return test.get() == other.test.get() && name == other.name && className == other.className; } bool TestCase::operator < ( TestCase const& other ) const { return name < other.name; } TestCase& TestCase::operator = ( TestCase const& other ) { TestCase temp( other ); swap( temp ); return *this; } TestCaseInfo const& TestCase::getTestCaseInfo() const { return *this; } } // end namespace Catch // #included from: catch_version.hpp #define TWOBLUECUBES_CATCH_VERSION_HPP_INCLUDED namespace Catch { Version::Version ( unsigned int _majorVersion, unsigned int _minorVersion, unsigned int _patchNumber, char const * const _branchName, unsigned int _buildNumber ) : majorVersion( _majorVersion ), minorVersion( _minorVersion ), patchNumber( _patchNumber ), branchName( _branchName ), buildNumber( _buildNumber ) {} std::ostream& operator << ( std::ostream& os, Version const& version ) { os << version.majorVersion << '.' << version.minorVersion << '.' << version.patchNumber; // branchName is never null -> 0th char is \0 if it is empty if (version.branchName[0]) { os << '-' << version.branchName << '.' << version.buildNumber; } return os; } inline Version libraryVersion() { static Version version( 1, 9, 6, "", 0 ); return version; } } // #included from: catch_message.hpp #define TWOBLUECUBES_CATCH_MESSAGE_HPP_INCLUDED namespace Catch { MessageInfo::MessageInfo( std::string const& _macroName, SourceLineInfo const& _lineInfo, ResultWas::OfType _type ) : macroName( _macroName ), lineInfo( _lineInfo ), type( _type ), sequence( ++globalCount ) {} // This may need protecting if threading support is added unsigned int MessageInfo::globalCount = 0; //////////////////////////////////////////////////////////////////////////// ScopedMessage::ScopedMessage( MessageBuilder const& builder ) : m_info( builder.m_info ) { m_info.message = builder.m_stream.str(); getResultCapture().pushScopedMessage( m_info ); } ScopedMessage::ScopedMessage( ScopedMessage const& other ) : m_info( other.m_info ) {} ScopedMessage::~ScopedMessage() { if ( !std::uncaught_exception() ){ getResultCapture().popScopedMessage(m_info); } } } // end namespace Catch // #included from: catch_legacy_reporter_adapter.hpp #define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_HPP_INCLUDED // #included from: catch_legacy_reporter_adapter.h #define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_H_INCLUDED namespace Catch { // Deprecated struct IReporter : IShared { virtual ~IReporter(); virtual bool shouldRedirectStdout() const = 0; virtual void StartTesting() = 0; virtual void EndTesting( Totals const& totals ) = 0; virtual void StartGroup( std::string const& groupName ) = 0; virtual void EndGroup( std::string const& groupName, Totals const& totals ) = 0; virtual void StartTestCase( TestCaseInfo const& testInfo ) = 0; virtual void EndTestCase( TestCaseInfo const& testInfo, Totals const& totals, std::string const& stdOut, std::string const& stdErr ) = 0; virtual void StartSection( std::string const& sectionName, std::string const& description ) = 0; virtual void EndSection( std::string const& sectionName, Counts const& assertions ) = 0; virtual void NoAssertionsInSection( std::string const& sectionName ) = 0; virtual void NoAssertionsInTestCase( std::string const& testName ) = 0; virtual void Aborted() = 0; virtual void Result( AssertionResult const& result ) = 0; }; class LegacyReporterAdapter : public SharedImpl { public: LegacyReporterAdapter( Ptr const& legacyReporter ); virtual ~LegacyReporterAdapter(); virtual ReporterPreferences getPreferences() const; virtual void noMatchingTestCases( std::string const& ); virtual void testRunStarting( TestRunInfo const& ); virtual void testGroupStarting( GroupInfo const& groupInfo ); virtual void testCaseStarting( TestCaseInfo const& testInfo ); virtual void sectionStarting( SectionInfo const& sectionInfo ); virtual void assertionStarting( AssertionInfo const& ); virtual bool assertionEnded( AssertionStats const& assertionStats ); virtual void sectionEnded( SectionStats const& sectionStats ); virtual void testCaseEnded( TestCaseStats const& testCaseStats ); virtual void testGroupEnded( TestGroupStats const& testGroupStats ); virtual void testRunEnded( TestRunStats const& testRunStats ); virtual void skipTest( TestCaseInfo const& ); private: Ptr m_legacyReporter; }; } namespace Catch { LegacyReporterAdapter::LegacyReporterAdapter( Ptr const& legacyReporter ) : m_legacyReporter( legacyReporter ) {} LegacyReporterAdapter::~LegacyReporterAdapter() {} ReporterPreferences LegacyReporterAdapter::getPreferences() const { ReporterPreferences prefs; prefs.shouldRedirectStdOut = m_legacyReporter->shouldRedirectStdout(); return prefs; } void LegacyReporterAdapter::noMatchingTestCases( std::string const& ) {} void LegacyReporterAdapter::testRunStarting( TestRunInfo const& ) { m_legacyReporter->StartTesting(); } void LegacyReporterAdapter::testGroupStarting( GroupInfo const& groupInfo ) { m_legacyReporter->StartGroup( groupInfo.name ); } void LegacyReporterAdapter::testCaseStarting( TestCaseInfo const& testInfo ) { m_legacyReporter->StartTestCase( testInfo ); } void LegacyReporterAdapter::sectionStarting( SectionInfo const& sectionInfo ) { m_legacyReporter->StartSection( sectionInfo.name, sectionInfo.description ); } void LegacyReporterAdapter::assertionStarting( AssertionInfo const& ) { // Not on legacy interface } bool LegacyReporterAdapter::assertionEnded( AssertionStats const& assertionStats ) { if( assertionStats.assertionResult.getResultType() != ResultWas::Ok ) { for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); it != itEnd; ++it ) { if( it->type == ResultWas::Info ) { ResultBuilder rb( it->macroName.c_str(), it->lineInfo, "", ResultDisposition::Normal ); rb << it->message; rb.setResultType( ResultWas::Info ); AssertionResult result = rb.build(); m_legacyReporter->Result( result ); } } } m_legacyReporter->Result( assertionStats.assertionResult ); return true; } void LegacyReporterAdapter::sectionEnded( SectionStats const& sectionStats ) { if( sectionStats.missingAssertions ) m_legacyReporter->NoAssertionsInSection( sectionStats.sectionInfo.name ); m_legacyReporter->EndSection( sectionStats.sectionInfo.name, sectionStats.assertions ); } void LegacyReporterAdapter::testCaseEnded( TestCaseStats const& testCaseStats ) { m_legacyReporter->EndTestCase ( testCaseStats.testInfo, testCaseStats.totals, testCaseStats.stdOut, testCaseStats.stdErr ); } void LegacyReporterAdapter::testGroupEnded( TestGroupStats const& testGroupStats ) { if( testGroupStats.aborting ) m_legacyReporter->Aborted(); m_legacyReporter->EndGroup( testGroupStats.groupInfo.name, testGroupStats.totals ); } void LegacyReporterAdapter::testRunEnded( TestRunStats const& testRunStats ) { m_legacyReporter->EndTesting( testRunStats.totals ); } void LegacyReporterAdapter::skipTest( TestCaseInfo const& ) { } } // #included from: catch_timer.hpp #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wc++11-long-long" #endif #ifdef CATCH_PLATFORM_WINDOWS #else #include #endif namespace Catch { namespace { #ifdef CATCH_PLATFORM_WINDOWS UInt64 getCurrentTicks() { static UInt64 hz=0, hzo=0; if (!hz) { QueryPerformanceFrequency( reinterpret_cast( &hz ) ); QueryPerformanceCounter( reinterpret_cast( &hzo ) ); } UInt64 t; QueryPerformanceCounter( reinterpret_cast( &t ) ); return ((t-hzo)*1000000)/hz; } #else UInt64 getCurrentTicks() { timeval t; gettimeofday(&t,CATCH_NULL); return static_cast( t.tv_sec ) * 1000000ull + static_cast( t.tv_usec ); } #endif } void Timer::start() { m_ticks = getCurrentTicks(); } unsigned int Timer::getElapsedMicroseconds() const { return static_cast(getCurrentTicks() - m_ticks); } unsigned int Timer::getElapsedMilliseconds() const { return static_cast(getElapsedMicroseconds()/1000); } double Timer::getElapsedSeconds() const { return getElapsedMicroseconds()/1000000.0; } } // namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif // #included from: catch_common.hpp #define TWOBLUECUBES_CATCH_COMMON_HPP_INCLUDED #include #include namespace Catch { bool startsWith( std::string const& s, std::string const& prefix ) { return s.size() >= prefix.size() && std::equal(prefix.begin(), prefix.end(), s.begin()); } bool startsWith( std::string const& s, char prefix ) { return !s.empty() && s[0] == prefix; } bool endsWith( std::string const& s, std::string const& suffix ) { return s.size() >= suffix.size() && std::equal(suffix.rbegin(), suffix.rend(), s.rbegin()); } bool endsWith( std::string const& s, char suffix ) { return !s.empty() && s[s.size()-1] == suffix; } bool contains( std::string const& s, std::string const& infix ) { return s.find( infix ) != std::string::npos; } char toLowerCh(char c) { return static_cast( std::tolower( c ) ); } void toLowerInPlace( std::string& s ) { std::transform( s.begin(), s.end(), s.begin(), toLowerCh ); } std::string toLower( std::string const& s ) { std::string lc = s; toLowerInPlace( lc ); return lc; } std::string trim( std::string const& str ) { static char const* whitespaceChars = "\n\r\t "; std::string::size_type start = str.find_first_not_of( whitespaceChars ); std::string::size_type end = str.find_last_not_of( whitespaceChars ); return start != std::string::npos ? str.substr( start, 1+end-start ) : std::string(); } bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ) { bool replaced = false; std::size_t i = str.find( replaceThis ); while( i != std::string::npos ) { replaced = true; str = str.substr( 0, i ) + withThis + str.substr( i+replaceThis.size() ); if( i < str.size()-withThis.size() ) i = str.find( replaceThis, i+withThis.size() ); else i = std::string::npos; } return replaced; } pluralise::pluralise( std::size_t count, std::string const& label ) : m_count( count ), m_label( label ) {} std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ) { os << pluraliser.m_count << ' ' << pluraliser.m_label; if( pluraliser.m_count != 1 ) os << 's'; return os; } SourceLineInfo::SourceLineInfo() : file(""), line( 0 ){} SourceLineInfo::SourceLineInfo( char const* _file, std::size_t _line ) : file( _file ), line( _line ) {} bool SourceLineInfo::empty() const { return file[0] == '\0'; } bool SourceLineInfo::operator == ( SourceLineInfo const& other ) const { return line == other.line && (file == other.file || std::strcmp(file, other.file) == 0); } bool SourceLineInfo::operator < ( SourceLineInfo const& other ) const { return line < other.line || ( line == other.line && (std::strcmp(file, other.file) < 0)); } void seedRng( IConfig const& config ) { if( config.rngSeed() != 0 ) srand( config.rngSeed() ); } unsigned int rngSeed() { return getCurrentContext().getConfig()->rngSeed(); } std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ) { #ifndef __GNUG__ os << info.file << '(' << info.line << ')'; #else os << info.file << ':' << info.line; #endif return os; } void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ) { std::ostringstream oss; oss << locationInfo << ": Internal Catch error: '" << message << '\''; if( alwaysTrue() ) throw std::logic_error( oss.str() ); } } // #included from: catch_section.hpp #define TWOBLUECUBES_CATCH_SECTION_HPP_INCLUDED namespace Catch { SectionInfo::SectionInfo ( SourceLineInfo const& _lineInfo, std::string const& _name, std::string const& _description ) : name( _name ), description( _description ), lineInfo( _lineInfo ) {} Section::Section( SectionInfo const& info ) : m_info( info ), m_sectionIncluded( getResultCapture().sectionStarted( m_info, m_assertions ) ) { m_timer.start(); } #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable:4996) // std::uncaught_exception is deprecated in C++17 #endif Section::~Section() { if( m_sectionIncluded ) { SectionEndInfo endInfo( m_info, m_assertions, m_timer.getElapsedSeconds() ); if( std::uncaught_exception() ) getResultCapture().sectionEndedEarly( endInfo ); else getResultCapture().sectionEnded( endInfo ); } } #if defined(_MSC_VER) # pragma warning(pop) #endif // This indicates whether the section should be executed or not Section::operator bool() const { return m_sectionIncluded; } } // end namespace Catch // #included from: catch_debugger.hpp #define TWOBLUECUBES_CATCH_DEBUGGER_HPP_INCLUDED #ifdef CATCH_PLATFORM_MAC #include #include #include #include #include namespace Catch{ // The following function is taken directly from the following technical note: // http://developer.apple.com/library/mac/#qa/qa2004/qa1361.html // Returns true if the current process is being debugged (either // running under the debugger or has a debugger attached post facto). bool isDebuggerActive(){ int mib[4]; struct kinfo_proc info; size_t size; // Initialize the flags so that, if sysctl fails for some bizarre // reason, we get a predictable result. info.kp_proc.p_flag = 0; // Initialize mib, which tells sysctl the info we want, in this case // we're looking for information about a specific process ID. mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_PID; mib[3] = getpid(); // Call sysctl. size = sizeof(info); if( sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, CATCH_NULL, 0) != 0 ) { Catch::cerr() << "\n** Call to sysctl failed - unable to determine if debugger is active **\n" << std::endl; return false; } // We're being debugged if the P_TRACED flag is set. return ( (info.kp_proc.p_flag & P_TRACED) != 0 ); } } // namespace Catch #elif defined(CATCH_PLATFORM_LINUX) #include #include namespace Catch{ // The standard POSIX way of detecting a debugger is to attempt to // ptrace() the process, but this needs to be done from a child and not // this process itself to still allow attaching to this process later // if wanted, so is rather heavy. Under Linux we have the PID of the // "debugger" (which doesn't need to be gdb, of course, it could also // be strace, for example) in /proc/$PID/status, so just get it from // there instead. bool isDebuggerActive(){ // Libstdc++ has a bug, where std::ifstream sets errno to 0 // This way our users can properly assert over errno values ErrnoGuard guard; std::ifstream in("/proc/self/status"); for( std::string line; std::getline(in, line); ) { static const int PREFIX_LEN = 11; if( line.compare(0, PREFIX_LEN, "TracerPid:\t") == 0 ) { // We're traced if the PID is not 0 and no other PID starts // with 0 digit, so it's enough to check for just a single // character. return line.length() > PREFIX_LEN && line[PREFIX_LEN] != '0'; } } return false; } } // namespace Catch #elif defined(_MSC_VER) extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); namespace Catch { bool isDebuggerActive() { return IsDebuggerPresent() != 0; } } #elif defined(__MINGW32__) extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); namespace Catch { bool isDebuggerActive() { return IsDebuggerPresent() != 0; } } #else namespace Catch { inline bool isDebuggerActive() { return false; } } #endif // Platform #ifdef CATCH_PLATFORM_WINDOWS namespace Catch { void writeToDebugConsole( std::string const& text ) { ::OutputDebugStringA( text.c_str() ); } } #else namespace Catch { void writeToDebugConsole( std::string const& text ) { // !TBD: Need a version for Mac/ XCode and other IDEs Catch::cout() << text; } } #endif // Platform // #included from: catch_tostring.hpp #define TWOBLUECUBES_CATCH_TOSTRING_HPP_INCLUDED namespace Catch { namespace Detail { const std::string unprintableString = "{?}"; namespace { const int hexThreshold = 255; struct Endianness { enum Arch { Big, Little }; static Arch which() { union _{ int asInt; char asChar[sizeof (int)]; } u; u.asInt = 1; return ( u.asChar[sizeof(int)-1] == 1 ) ? Big : Little; } }; } std::string rawMemoryToString( const void *object, std::size_t size ) { // Reverse order for little endian architectures int i = 0, end = static_cast( size ), inc = 1; if( Endianness::which() == Endianness::Little ) { i = end-1; end = inc = -1; } unsigned char const *bytes = static_cast(object); std::ostringstream os; os << "0x" << std::setfill('0') << std::hex; for( ; i != end; i += inc ) os << std::setw(2) << static_cast(bytes[i]); return os.str(); } } std::string toString( std::string const& value ) { std::string s = value; if( getCurrentContext().getConfig()->showInvisibles() ) { for(size_t i = 0; i < s.size(); ++i ) { std::string subs; switch( s[i] ) { case '\n': subs = "\\n"; break; case '\t': subs = "\\t"; break; default: break; } if( !subs.empty() ) { s = s.substr( 0, i ) + subs + s.substr( i+1 ); ++i; } } } return '"' + s + '"'; } std::string toString( std::wstring const& value ) { std::string s; s.reserve( value.size() ); for(size_t i = 0; i < value.size(); ++i ) s += value[i] <= 0xff ? static_cast( value[i] ) : '?'; return Catch::toString( s ); } std::string toString( const char* const value ) { return value ? Catch::toString( std::string( value ) ) : std::string( "{null string}" ); } std::string toString( char* const value ) { return Catch::toString( static_cast( value ) ); } std::string toString( const wchar_t* const value ) { return value ? Catch::toString( std::wstring(value) ) : std::string( "{null string}" ); } std::string toString( wchar_t* const value ) { return Catch::toString( static_cast( value ) ); } std::string toString( int value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned int value ) { return Catch::toString( static_cast( value ) ); } template std::string fpToString( T value, int precision ) { std::ostringstream oss; oss << std::setprecision( precision ) << std::fixed << value; std::string d = oss.str(); std::size_t i = d.find_last_not_of( '0' ); if( i != std::string::npos && i != d.size()-1 ) { if( d[i] == '.' ) i++; d = d.substr( 0, i+1 ); } return d; } std::string toString( const double value ) { return fpToString( value, 10 ); } std::string toString( const float value ) { return fpToString( value, 5 ) + 'f'; } std::string toString( bool value ) { return value ? "true" : "false"; } std::string toString( char value ) { if ( value == '\r' ) return "'\\r'"; if ( value == '\f' ) return "'\\f'"; if ( value == '\n' ) return "'\\n'"; if ( value == '\t' ) return "'\\t'"; if ( '\0' <= value && value < ' ' ) return toString( static_cast( value ) ); char chstr[] = "' '"; chstr[1] = value; return chstr; } std::string toString( signed char value ) { return toString( static_cast( value ) ); } std::string toString( unsigned char value ) { return toString( static_cast( value ) ); } #ifdef CATCH_CONFIG_CPP11_LONG_LONG std::string toString( long long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned long long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } #endif #ifdef CATCH_CONFIG_CPP11_NULLPTR std::string toString( std::nullptr_t ) { return "nullptr"; } #endif #ifdef __OBJC__ std::string toString( NSString const * const& nsstring ) { if( !nsstring ) return "nil"; return "@" + toString([nsstring UTF8String]); } std::string toString( NSString * CATCH_ARC_STRONG & nsstring ) { if( !nsstring ) return "nil"; return "@" + toString([nsstring UTF8String]); } std::string toString( NSObject* const& nsObject ) { return toString( [nsObject description] ); } #endif } // end namespace Catch // #included from: catch_result_builder.hpp #define TWOBLUECUBES_CATCH_RESULT_BUILDER_HPP_INCLUDED namespace Catch { ResultBuilder::ResultBuilder( char const* macroName, SourceLineInfo const& lineInfo, char const* capturedExpression, ResultDisposition::Flags resultDisposition, char const* secondArg ) : m_assertionInfo( macroName, lineInfo, capturedExpression, resultDisposition, secondArg ), m_shouldDebugBreak( false ), m_shouldThrow( false ), m_guardException( false ) { m_stream().oss.str(""); } ResultBuilder::~ResultBuilder() { #if defined(CATCH_CONFIG_FAST_COMPILE) if ( m_guardException ) { m_stream().oss << "Exception translation was disabled by CATCH_CONFIG_FAST_COMPILE"; captureResult( ResultWas::ThrewException ); getCurrentContext().getResultCapture()->exceptionEarlyReported(); } #endif } ResultBuilder& ResultBuilder::setResultType( ResultWas::OfType result ) { m_data.resultType = result; return *this; } ResultBuilder& ResultBuilder::setResultType( bool result ) { m_data.resultType = result ? ResultWas::Ok : ResultWas::ExpressionFailed; return *this; } void ResultBuilder::endExpression( DecomposedExpression const& expr ) { AssertionResult result = build( expr ); handleResult( result ); } void ResultBuilder::useActiveException( ResultDisposition::Flags resultDisposition ) { m_assertionInfo.resultDisposition = resultDisposition; m_stream().oss << Catch::translateActiveException(); captureResult( ResultWas::ThrewException ); } void ResultBuilder::captureResult( ResultWas::OfType resultType ) { setResultType( resultType ); captureExpression(); } void ResultBuilder::captureExpectedException( std::string const& expectedMessage ) { if( expectedMessage.empty() ) captureExpectedException( Matchers::Impl::MatchAllOf() ); else captureExpectedException( Matchers::Equals( expectedMessage ) ); } void ResultBuilder::captureExpectedException( Matchers::Impl::MatcherBase const& matcher ) { assert( !isFalseTest( m_assertionInfo.resultDisposition ) ); AssertionResultData data = m_data; data.resultType = ResultWas::Ok; data.reconstructedExpression = capturedExpressionWithSecondArgument(m_assertionInfo.capturedExpression, m_assertionInfo.secondArg); std::string actualMessage = Catch::translateActiveException(); if( !matcher.match( actualMessage ) ) { data.resultType = ResultWas::ExpressionFailed; data.reconstructedExpression = actualMessage; } AssertionResult result( m_assertionInfo, data ); handleResult( result ); } void ResultBuilder::captureExpression() { AssertionResult result = build(); handleResult( result ); } void ResultBuilder::handleResult( AssertionResult const& result ) { getResultCapture().assertionEnded( result ); if( !result.isOk() ) { if( getCurrentContext().getConfig()->shouldDebugBreak() ) m_shouldDebugBreak = true; if( getCurrentContext().getRunner()->aborting() || (m_assertionInfo.resultDisposition & ResultDisposition::Normal) ) m_shouldThrow = true; } } void ResultBuilder::react() { #if defined(CATCH_CONFIG_FAST_COMPILE) if (m_shouldDebugBreak) { /////////////////////////////////////////////////////////////////// // To inspect the state during test, you need to go one level up the callstack // To go back to the test and change execution, jump over the throw statement /////////////////////////////////////////////////////////////////// CATCH_BREAK_INTO_DEBUGGER(); } #endif if( m_shouldThrow ) throw Catch::TestFailureException(); } bool ResultBuilder::shouldDebugBreak() const { return m_shouldDebugBreak; } bool ResultBuilder::allowThrows() const { return getCurrentContext().getConfig()->allowThrows(); } AssertionResult ResultBuilder::build() const { return build( *this ); } // CAVEAT: The returned AssertionResult stores a pointer to the argument expr, // a temporary DecomposedExpression, which in turn holds references to // operands, possibly temporary as well. // It should immediately be passed to handleResult; if the expression // needs to be reported, its string expansion must be composed before // the temporaries are destroyed. AssertionResult ResultBuilder::build( DecomposedExpression const& expr ) const { assert( m_data.resultType != ResultWas::Unknown ); AssertionResultData data = m_data; // Flip bool results if FalseTest flag is set if( isFalseTest( m_assertionInfo.resultDisposition ) ) { data.negate( expr.isBinaryExpression() ); } data.message = m_stream().oss.str(); data.decomposedExpression = &expr; // for lazy reconstruction return AssertionResult( m_assertionInfo, data ); } void ResultBuilder::reconstructExpression( std::string& dest ) const { dest = capturedExpressionWithSecondArgument(m_assertionInfo.capturedExpression, m_assertionInfo.secondArg); } void ResultBuilder::setExceptionGuard() { m_guardException = true; } void ResultBuilder::unsetExceptionGuard() { m_guardException = false; } } // end namespace Catch // #included from: catch_tag_alias_registry.hpp #define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_HPP_INCLUDED namespace Catch { TagAliasRegistry::~TagAliasRegistry() {} Option TagAliasRegistry::find( std::string const& alias ) const { std::map::const_iterator it = m_registry.find( alias ); if( it != m_registry.end() ) return it->second; else return Option(); } std::string TagAliasRegistry::expandAliases( std::string const& unexpandedTestSpec ) const { std::string expandedTestSpec = unexpandedTestSpec; for( std::map::const_iterator it = m_registry.begin(), itEnd = m_registry.end(); it != itEnd; ++it ) { std::size_t pos = expandedTestSpec.find( it->first ); if( pos != std::string::npos ) { expandedTestSpec = expandedTestSpec.substr( 0, pos ) + it->second.tag + expandedTestSpec.substr( pos + it->first.size() ); } } return expandedTestSpec; } void TagAliasRegistry::add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) { if( !startsWith( alias, "[@" ) || !endsWith( alias, ']' ) ) { std::ostringstream oss; oss << Colour( Colour::Red ) << "error: tag alias, \"" << alias << "\" is not of the form [@alias name].\n" << Colour( Colour::FileName ) << lineInfo << '\n'; throw std::domain_error( oss.str().c_str() ); } if( !m_registry.insert( std::make_pair( alias, TagAlias( tag, lineInfo ) ) ).second ) { std::ostringstream oss; oss << Colour( Colour::Red ) << "error: tag alias, \"" << alias << "\" already registered.\n" << "\tFirst seen at " << Colour( Colour::Red ) << find(alias)->lineInfo << '\n' << Colour( Colour::Red ) << "\tRedefined at " << Colour( Colour::FileName) << lineInfo << '\n'; throw std::domain_error( oss.str().c_str() ); } } ITagAliasRegistry::~ITagAliasRegistry() {} ITagAliasRegistry const& ITagAliasRegistry::get() { return getRegistryHub().getTagAliasRegistry(); } RegistrarForTagAliases::RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ) { getMutableRegistryHub().registerTagAlias( alias, tag, lineInfo ); } } // end namespace Catch // #included from: catch_matchers_string.hpp namespace Catch { namespace Matchers { namespace StdString { CasedString::CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ) : m_caseSensitivity( caseSensitivity ), m_str( adjustString( str ) ) {} std::string CasedString::adjustString( std::string const& str ) const { return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str; } std::string CasedString::caseSensitivitySuffix() const { return m_caseSensitivity == CaseSensitive::No ? " (case insensitive)" : std::string(); } StringMatcherBase::StringMatcherBase( std::string const& operation, CasedString const& comparator ) : m_comparator( comparator ), m_operation( operation ) { } std::string StringMatcherBase::describe() const { std::string description; description.reserve(5 + m_operation.size() + m_comparator.m_str.size() + m_comparator.caseSensitivitySuffix().size()); description += m_operation; description += ": \""; description += m_comparator.m_str; description += "\""; description += m_comparator.caseSensitivitySuffix(); return description; } EqualsMatcher::EqualsMatcher( CasedString const& comparator ) : StringMatcherBase( "equals", comparator ) {} bool EqualsMatcher::match( std::string const& source ) const { return m_comparator.adjustString( source ) == m_comparator.m_str; } ContainsMatcher::ContainsMatcher( CasedString const& comparator ) : StringMatcherBase( "contains", comparator ) {} bool ContainsMatcher::match( std::string const& source ) const { return contains( m_comparator.adjustString( source ), m_comparator.m_str ); } StartsWithMatcher::StartsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "starts with", comparator ) {} bool StartsWithMatcher::match( std::string const& source ) const { return startsWith( m_comparator.adjustString( source ), m_comparator.m_str ); } EndsWithMatcher::EndsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "ends with", comparator ) {} bool EndsWithMatcher::match( std::string const& source ) const { return endsWith( m_comparator.adjustString( source ), m_comparator.m_str ); } } // namespace StdString StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::EqualsMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::ContainsMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::EndsWithMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::StartsWithMatcher( StdString::CasedString( str, caseSensitivity) ); } } // namespace Matchers } // namespace Catch // #included from: ../reporters/catch_reporter_multi.hpp #define TWOBLUECUBES_CATCH_REPORTER_MULTI_HPP_INCLUDED namespace Catch { class MultipleReporters : public SharedImpl { typedef std::vector > Reporters; Reporters m_reporters; public: void add( Ptr const& reporter ) { m_reporters.push_back( reporter ); } public: // IStreamingReporter virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporters[0]->getPreferences(); } virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->noMatchingTestCases( spec ); } virtual void testRunStarting( TestRunInfo const& testRunInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testRunStarting( testRunInfo ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testGroupStarting( groupInfo ); } virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testCaseStarting( testInfo ); } virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->sectionStarting( sectionInfo ); } virtual void assertionStarting( AssertionInfo const& assertionInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->assertionStarting( assertionInfo ); } // The return value indicates if the messages buffer should be cleared: virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { bool clearBuffer = false; for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) clearBuffer |= (*it)->assertionEnded( assertionStats ); return clearBuffer; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->sectionEnded( sectionStats ); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testCaseEnded( testCaseStats ); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testGroupEnded( testGroupStats ); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testRunEnded( testRunStats ); } virtual void skipTest( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->skipTest( testInfo ); } virtual MultipleReporters* tryAsMulti() CATCH_OVERRIDE { return this; } }; Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ) { Ptr resultingReporter; if( existingReporter ) { MultipleReporters* multi = existingReporter->tryAsMulti(); if( !multi ) { multi = new MultipleReporters; resultingReporter = Ptr( multi ); if( existingReporter ) multi->add( existingReporter ); } else resultingReporter = existingReporter; multi->add( additionalReporter ); } else resultingReporter = additionalReporter; return resultingReporter; } } // end namespace Catch // #included from: ../reporters/catch_reporter_xml.hpp #define TWOBLUECUBES_CATCH_REPORTER_XML_HPP_INCLUDED // #included from: catch_reporter_bases.hpp #define TWOBLUECUBES_CATCH_REPORTER_BASES_HPP_INCLUDED #include #include #include #include namespace Catch { namespace { // Because formatting using c++ streams is stateful, drop down to C is required // Alternatively we could use stringstream, but its performance is... not good. std::string getFormattedDuration( double duration ) { // Max exponent + 1 is required to represent the whole part // + 1 for decimal point // + 3 for the 3 decimal places // + 1 for null terminator const size_t maxDoubleSize = DBL_MAX_10_EXP + 1 + 1 + 3 + 1; char buffer[maxDoubleSize]; // Save previous errno, to prevent sprintf from overwriting it ErrnoGuard guard; #ifdef _MSC_VER sprintf_s(buffer, "%.3f", duration); #else sprintf(buffer, "%.3f", duration); #endif return std::string(buffer); } } struct StreamingReporterBase : SharedImpl { StreamingReporterBase( ReporterConfig const& _config ) : m_config( _config.fullConfig() ), stream( _config.stream() ) { m_reporterPrefs.shouldRedirectStdOut = false; } virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporterPrefs; } virtual ~StreamingReporterBase() CATCH_OVERRIDE; virtual void noMatchingTestCases( std::string const& ) CATCH_OVERRIDE {} virtual void testRunStarting( TestRunInfo const& _testRunInfo ) CATCH_OVERRIDE { currentTestRunInfo = _testRunInfo; } virtual void testGroupStarting( GroupInfo const& _groupInfo ) CATCH_OVERRIDE { currentGroupInfo = _groupInfo; } virtual void testCaseStarting( TestCaseInfo const& _testInfo ) CATCH_OVERRIDE { currentTestCaseInfo = _testInfo; } virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { m_sectionStack.push_back( _sectionInfo ); } virtual void sectionEnded( SectionStats const& /* _sectionStats */ ) CATCH_OVERRIDE { m_sectionStack.pop_back(); } virtual void testCaseEnded( TestCaseStats const& /* _testCaseStats */ ) CATCH_OVERRIDE { currentTestCaseInfo.reset(); } virtual void testGroupEnded( TestGroupStats const& /* _testGroupStats */ ) CATCH_OVERRIDE { currentGroupInfo.reset(); } virtual void testRunEnded( TestRunStats const& /* _testRunStats */ ) CATCH_OVERRIDE { currentTestCaseInfo.reset(); currentGroupInfo.reset(); currentTestRunInfo.reset(); } virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE { // Don't do anything with this by default. // It can optionally be overridden in the derived class. } Ptr m_config; std::ostream& stream; LazyStat currentTestRunInfo; LazyStat currentGroupInfo; LazyStat currentTestCaseInfo; std::vector m_sectionStack; ReporterPreferences m_reporterPrefs; }; struct CumulativeReporterBase : SharedImpl { template struct Node : SharedImpl<> { explicit Node( T const& _value ) : value( _value ) {} virtual ~Node() {} typedef std::vector > ChildNodes; T value; ChildNodes children; }; struct SectionNode : SharedImpl<> { explicit SectionNode( SectionStats const& _stats ) : stats( _stats ) {} virtual ~SectionNode(); bool operator == ( SectionNode const& other ) const { return stats.sectionInfo.lineInfo == other.stats.sectionInfo.lineInfo; } bool operator == ( Ptr const& other ) const { return operator==( *other ); } SectionStats stats; typedef std::vector > ChildSections; typedef std::vector Assertions; ChildSections childSections; Assertions assertions; std::string stdOut; std::string stdErr; }; struct BySectionInfo { BySectionInfo( SectionInfo const& other ) : m_other( other ) {} BySectionInfo( BySectionInfo const& other ) : m_other( other.m_other ) {} bool operator() ( Ptr const& node ) const { return node->stats.sectionInfo.lineInfo == m_other.lineInfo; } private: void operator=( BySectionInfo const& ); SectionInfo const& m_other; }; typedef Node TestCaseNode; typedef Node TestGroupNode; typedef Node TestRunNode; CumulativeReporterBase( ReporterConfig const& _config ) : m_config( _config.fullConfig() ), stream( _config.stream() ) { m_reporterPrefs.shouldRedirectStdOut = false; } ~CumulativeReporterBase(); virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporterPrefs; } virtual void testRunStarting( TestRunInfo const& ) CATCH_OVERRIDE {} virtual void testGroupStarting( GroupInfo const& ) CATCH_OVERRIDE {} virtual void testCaseStarting( TestCaseInfo const& ) CATCH_OVERRIDE {} virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { SectionStats incompleteStats( sectionInfo, Counts(), 0, false ); Ptr node; if( m_sectionStack.empty() ) { if( !m_rootSection ) m_rootSection = new SectionNode( incompleteStats ); node = m_rootSection; } else { SectionNode& parentNode = *m_sectionStack.back(); SectionNode::ChildSections::const_iterator it = std::find_if( parentNode.childSections.begin(), parentNode.childSections.end(), BySectionInfo( sectionInfo ) ); if( it == parentNode.childSections.end() ) { node = new SectionNode( incompleteStats ); parentNode.childSections.push_back( node ); } else node = *it; } m_sectionStack.push_back( node ); m_deepestSection = node; } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { assert( !m_sectionStack.empty() ); SectionNode& sectionNode = *m_sectionStack.back(); sectionNode.assertions.push_back( assertionStats ); // AssertionResult holds a pointer to a temporary DecomposedExpression, // which getExpandedExpression() calls to build the expression string. // Our section stack copy of the assertionResult will likely outlive the // temporary, so it must be expanded or discarded now to avoid calling // a destroyed object later. prepareExpandedExpression( sectionNode.assertions.back().assertionResult ); return true; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { assert( !m_sectionStack.empty() ); SectionNode& node = *m_sectionStack.back(); node.stats = sectionStats; m_sectionStack.pop_back(); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { Ptr node = new TestCaseNode( testCaseStats ); assert( m_sectionStack.size() == 0 ); node->children.push_back( m_rootSection ); m_testCases.push_back( node ); m_rootSection.reset(); assert( m_deepestSection ); m_deepestSection->stdOut = testCaseStats.stdOut; m_deepestSection->stdErr = testCaseStats.stdErr; } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { Ptr node = new TestGroupNode( testGroupStats ); node->children.swap( m_testCases ); m_testGroups.push_back( node ); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { Ptr node = new TestRunNode( testRunStats ); node->children.swap( m_testGroups ); m_testRuns.push_back( node ); testRunEndedCumulative(); } virtual void testRunEndedCumulative() = 0; virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE {} virtual void prepareExpandedExpression( AssertionResult& result ) const { if( result.isOk() ) result.discardDecomposedExpression(); else result.expandDecomposedExpression(); } Ptr m_config; std::ostream& stream; std::vector m_assertions; std::vector > > m_sections; std::vector > m_testCases; std::vector > m_testGroups; std::vector > m_testRuns; Ptr m_rootSection; Ptr m_deepestSection; std::vector > m_sectionStack; ReporterPreferences m_reporterPrefs; }; template char const* getLineOfChars() { static char line[CATCH_CONFIG_CONSOLE_WIDTH] = {0}; if( !*line ) { std::memset( line, C, CATCH_CONFIG_CONSOLE_WIDTH-1 ); line[CATCH_CONFIG_CONSOLE_WIDTH-1] = 0; } return line; } struct TestEventListenerBase : StreamingReporterBase { TestEventListenerBase( ReporterConfig const& _config ) : StreamingReporterBase( _config ) {} virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} virtual bool assertionEnded( AssertionStats const& ) CATCH_OVERRIDE { return false; } }; } // end namespace Catch // #included from: ../internal/catch_reporter_registrars.hpp #define TWOBLUECUBES_CATCH_REPORTER_REGISTRARS_HPP_INCLUDED namespace Catch { template class LegacyReporterRegistrar { class ReporterFactory : public IReporterFactory { virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new LegacyReporterAdapter( new T( config ) ); } virtual std::string getDescription() const { return T::getDescription(); } }; public: LegacyReporterRegistrar( std::string const& name ) { getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); } }; template class ReporterRegistrar { class ReporterFactory : public SharedImpl { // *** Please Note ***: // - If you end up here looking at a compiler error because it's trying to register // your custom reporter class be aware that the native reporter interface has changed // to IStreamingReporter. The "legacy" interface, IReporter, is still supported via // an adapter. Just use REGISTER_LEGACY_REPORTER to take advantage of the adapter. // However please consider updating to the new interface as the old one is now // deprecated and will probably be removed quite soon! // Please contact me via github if you have any questions at all about this. // In fact, ideally, please contact me anyway to let me know you've hit this - as I have // no idea who is actually using custom reporters at all (possibly no-one!). // The new interface is designed to minimise exposure to interface changes in the future. virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new T( config ); } virtual std::string getDescription() const { return T::getDescription(); } }; public: ReporterRegistrar( std::string const& name ) { getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); } }; template class ListenerRegistrar { class ListenerFactory : public SharedImpl { virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new T( config ); } virtual std::string getDescription() const { return std::string(); } }; public: ListenerRegistrar() { getMutableRegistryHub().registerListener( new ListenerFactory() ); } }; } #define INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) \ namespace{ Catch::LegacyReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } #define INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) \ namespace{ Catch::ReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } // Deprecated - use the form without INTERNAL_ #define INTERNAL_CATCH_REGISTER_LISTENER( listenerType ) \ namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } #define CATCH_REGISTER_LISTENER( listenerType ) \ namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } // #included from: ../internal/catch_xmlwriter.hpp #define TWOBLUECUBES_CATCH_XMLWRITER_HPP_INCLUDED #include #include #include #include namespace Catch { class XmlEncode { public: enum ForWhat { ForTextNodes, ForAttributes }; XmlEncode( std::string const& str, ForWhat forWhat = ForTextNodes ) : m_str( str ), m_forWhat( forWhat ) {} void encodeTo( std::ostream& os ) const { // Apostrophe escaping not necessary if we always use " to write attributes // (see: http://www.w3.org/TR/xml/#syntax) for( std::size_t i = 0; i < m_str.size(); ++ i ) { char c = m_str[i]; switch( c ) { case '<': os << "<"; break; case '&': os << "&"; break; case '>': // See: http://www.w3.org/TR/xml/#syntax if( i > 2 && m_str[i-1] == ']' && m_str[i-2] == ']' ) os << ">"; else os << c; break; case '\"': if( m_forWhat == ForAttributes ) os << """; else os << c; break; default: // Escape control chars - based on contribution by @espenalb in PR #465 and // by @mrpi PR #588 if ( ( c >= 0 && c < '\x09' ) || ( c > '\x0D' && c < '\x20') || c=='\x7F' ) { // see http://stackoverflow.com/questions/404107/why-are-control-characters-illegal-in-xml-1-0 os << "\\x" << std::uppercase << std::hex << std::setfill('0') << std::setw(2) << static_cast( c ); } else os << c; } } } friend std::ostream& operator << ( std::ostream& os, XmlEncode const& xmlEncode ) { xmlEncode.encodeTo( os ); return os; } private: std::string m_str; ForWhat m_forWhat; }; class XmlWriter { public: class ScopedElement { public: ScopedElement( XmlWriter* writer ) : m_writer( writer ) {} ScopedElement( ScopedElement const& other ) : m_writer( other.m_writer ){ other.m_writer = CATCH_NULL; } ~ScopedElement() { if( m_writer ) m_writer->endElement(); } ScopedElement& writeText( std::string const& text, bool indent = true ) { m_writer->writeText( text, indent ); return *this; } template ScopedElement& writeAttribute( std::string const& name, T const& attribute ) { m_writer->writeAttribute( name, attribute ); return *this; } private: mutable XmlWriter* m_writer; }; XmlWriter() : m_tagIsOpen( false ), m_needsNewline( false ), m_os( Catch::cout() ) { writeDeclaration(); } XmlWriter( std::ostream& os ) : m_tagIsOpen( false ), m_needsNewline( false ), m_os( os ) { writeDeclaration(); } ~XmlWriter() { while( !m_tags.empty() ) endElement(); } XmlWriter& startElement( std::string const& name ) { ensureTagClosed(); newlineIfNecessary(); m_os << m_indent << '<' << name; m_tags.push_back( name ); m_indent += " "; m_tagIsOpen = true; return *this; } ScopedElement scopedElement( std::string const& name ) { ScopedElement scoped( this ); startElement( name ); return scoped; } XmlWriter& endElement() { newlineIfNecessary(); m_indent = m_indent.substr( 0, m_indent.size()-2 ); if( m_tagIsOpen ) { m_os << "/>"; m_tagIsOpen = false; } else { m_os << m_indent << ""; } m_os << std::endl; m_tags.pop_back(); return *this; } XmlWriter& writeAttribute( std::string const& name, std::string const& attribute ) { if( !name.empty() && !attribute.empty() ) m_os << ' ' << name << "=\"" << XmlEncode( attribute, XmlEncode::ForAttributes ) << '"'; return *this; } XmlWriter& writeAttribute( std::string const& name, bool attribute ) { m_os << ' ' << name << "=\"" << ( attribute ? "true" : "false" ) << '"'; return *this; } template XmlWriter& writeAttribute( std::string const& name, T const& attribute ) { std::ostringstream oss; oss << attribute; return writeAttribute( name, oss.str() ); } XmlWriter& writeText( std::string const& text, bool indent = true ) { if( !text.empty() ){ bool tagWasOpen = m_tagIsOpen; ensureTagClosed(); if( tagWasOpen && indent ) m_os << m_indent; m_os << XmlEncode( text ); m_needsNewline = true; } return *this; } XmlWriter& writeComment( std::string const& text ) { ensureTagClosed(); m_os << m_indent << ""; m_needsNewline = true; return *this; } void writeStylesheetRef( std::string const& url ) { m_os << "\n"; } XmlWriter& writeBlankLine() { ensureTagClosed(); m_os << '\n'; return *this; } void ensureTagClosed() { if( m_tagIsOpen ) { m_os << ">" << std::endl; m_tagIsOpen = false; } } private: XmlWriter( XmlWriter const& ); void operator=( XmlWriter const& ); void writeDeclaration() { m_os << "\n"; } void newlineIfNecessary() { if( m_needsNewline ) { m_os << std::endl; m_needsNewline = false; } } bool m_tagIsOpen; bool m_needsNewline; std::vector m_tags; std::string m_indent; std::ostream& m_os; }; } namespace Catch { class XmlReporter : public StreamingReporterBase { public: XmlReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ), m_xml(_config.stream()), m_sectionDepth( 0 ) { m_reporterPrefs.shouldRedirectStdOut = true; } virtual ~XmlReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results as an XML document"; } virtual std::string getStylesheetRef() const { return std::string(); } void writeSourceInfo( SourceLineInfo const& sourceInfo ) { m_xml .writeAttribute( "filename", sourceInfo.file ) .writeAttribute( "line", sourceInfo.line ); } public: // StreamingReporterBase virtual void noMatchingTestCases( std::string const& s ) CATCH_OVERRIDE { StreamingReporterBase::noMatchingTestCases( s ); } virtual void testRunStarting( TestRunInfo const& testInfo ) CATCH_OVERRIDE { StreamingReporterBase::testRunStarting( testInfo ); std::string stylesheetRef = getStylesheetRef(); if( !stylesheetRef.empty() ) m_xml.writeStylesheetRef( stylesheetRef ); m_xml.startElement( "Catch" ); if( !m_config->name().empty() ) m_xml.writeAttribute( "name", m_config->name() ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { StreamingReporterBase::testGroupStarting( groupInfo ); m_xml.startElement( "Group" ) .writeAttribute( "name", groupInfo.name ); } virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { StreamingReporterBase::testCaseStarting(testInfo); m_xml.startElement( "TestCase" ) .writeAttribute( "name", trim( testInfo.name ) ) .writeAttribute( "description", testInfo.description ) .writeAttribute( "tags", testInfo.tagsAsString ); writeSourceInfo( testInfo.lineInfo ); if ( m_config->showDurations() == ShowDurations::Always ) m_testCaseTimer.start(); m_xml.ensureTagClosed(); } virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { StreamingReporterBase::sectionStarting( sectionInfo ); if( m_sectionDepth++ > 0 ) { m_xml.startElement( "Section" ) .writeAttribute( "name", trim( sectionInfo.name ) ) .writeAttribute( "description", sectionInfo.description ); writeSourceInfo( sectionInfo.lineInfo ); m_xml.ensureTagClosed(); } } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { } virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { AssertionResult const& result = assertionStats.assertionResult; bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); if( includeResults ) { // Print any info messages in tags. for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); it != itEnd; ++it ) { if( it->type == ResultWas::Info ) { m_xml.scopedElement( "Info" ) .writeText( it->message ); } else if ( it->type == ResultWas::Warning ) { m_xml.scopedElement( "Warning" ) .writeText( it->message ); } } } // Drop out if result was successful but we're not printing them. if( !includeResults && result.getResultType() != ResultWas::Warning ) return true; // Print the expression if there is one. if( result.hasExpression() ) { m_xml.startElement( "Expression" ) .writeAttribute( "success", result.succeeded() ) .writeAttribute( "type", result.getTestMacroName() ); writeSourceInfo( result.getSourceInfo() ); m_xml.scopedElement( "Original" ) .writeText( result.getExpression() ); m_xml.scopedElement( "Expanded" ) .writeText( result.getExpandedExpression() ); } // And... Print a result applicable to each result type. switch( result.getResultType() ) { case ResultWas::ThrewException: m_xml.startElement( "Exception" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; case ResultWas::FatalErrorCondition: m_xml.startElement( "FatalErrorCondition" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; case ResultWas::Info: m_xml.scopedElement( "Info" ) .writeText( result.getMessage() ); break; case ResultWas::Warning: // Warning will already have been written break; case ResultWas::ExplicitFailure: m_xml.startElement( "Failure" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; default: break; } if( result.hasExpression() ) m_xml.endElement(); return true; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { StreamingReporterBase::sectionEnded( sectionStats ); if( --m_sectionDepth > 0 ) { XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResults" ); e.writeAttribute( "successes", sectionStats.assertions.passed ); e.writeAttribute( "failures", sectionStats.assertions.failed ); e.writeAttribute( "expectedFailures", sectionStats.assertions.failedButOk ); if ( m_config->showDurations() == ShowDurations::Always ) e.writeAttribute( "durationInSeconds", sectionStats.durationInSeconds ); m_xml.endElement(); } } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { StreamingReporterBase::testCaseEnded( testCaseStats ); XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResult" ); e.writeAttribute( "success", testCaseStats.totals.assertions.allOk() ); if ( m_config->showDurations() == ShowDurations::Always ) e.writeAttribute( "durationInSeconds", m_testCaseTimer.getElapsedSeconds() ); if( !testCaseStats.stdOut.empty() ) m_xml.scopedElement( "StdOut" ).writeText( trim( testCaseStats.stdOut ), false ); if( !testCaseStats.stdErr.empty() ) m_xml.scopedElement( "StdErr" ).writeText( trim( testCaseStats.stdErr ), false ); m_xml.endElement(); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { StreamingReporterBase::testGroupEnded( testGroupStats ); // TODO: Check testGroupStats.aborting and act accordingly. m_xml.scopedElement( "OverallResults" ) .writeAttribute( "successes", testGroupStats.totals.assertions.passed ) .writeAttribute( "failures", testGroupStats.totals.assertions.failed ) .writeAttribute( "expectedFailures", testGroupStats.totals.assertions.failedButOk ); m_xml.endElement(); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { StreamingReporterBase::testRunEnded( testRunStats ); m_xml.scopedElement( "OverallResults" ) .writeAttribute( "successes", testRunStats.totals.assertions.passed ) .writeAttribute( "failures", testRunStats.totals.assertions.failed ) .writeAttribute( "expectedFailures", testRunStats.totals.assertions.failedButOk ); m_xml.endElement(); } private: Timer m_testCaseTimer; XmlWriter m_xml; int m_sectionDepth; }; INTERNAL_CATCH_REGISTER_REPORTER( "xml", XmlReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_junit.hpp #define TWOBLUECUBES_CATCH_REPORTER_JUNIT_HPP_INCLUDED #include namespace Catch { namespace { std::string getCurrentTimestamp() { // Beware, this is not reentrant because of backward compatibility issues // Also, UTC only, again because of backward compatibility (%z is C++11) time_t rawtime; std::time(&rawtime); const size_t timeStampSize = sizeof("2017-01-16T17:06:45Z"); #ifdef _MSC_VER std::tm timeInfo = {}; gmtime_s(&timeInfo, &rawtime); #else std::tm* timeInfo; timeInfo = std::gmtime(&rawtime); #endif char timeStamp[timeStampSize]; const char * const fmt = "%Y-%m-%dT%H:%M:%SZ"; #ifdef _MSC_VER std::strftime(timeStamp, timeStampSize, fmt, &timeInfo); #else std::strftime(timeStamp, timeStampSize, fmt, timeInfo); #endif return std::string(timeStamp); } } class JunitReporter : public CumulativeReporterBase { public: JunitReporter( ReporterConfig const& _config ) : CumulativeReporterBase( _config ), xml( _config.stream() ), m_okToFail( false ) { m_reporterPrefs.shouldRedirectStdOut = true; } virtual ~JunitReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results in an XML format that looks like Ant's junitreport target"; } virtual void noMatchingTestCases( std::string const& /*spec*/ ) CATCH_OVERRIDE {} virtual void testRunStarting( TestRunInfo const& runInfo ) CATCH_OVERRIDE { CumulativeReporterBase::testRunStarting( runInfo ); xml.startElement( "testsuites" ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { suiteTimer.start(); stdOutForSuite.str(""); stdErrForSuite.str(""); unexpectedExceptions = 0; CumulativeReporterBase::testGroupStarting( groupInfo ); } virtual void testCaseStarting( TestCaseInfo const& testCaseInfo ) CATCH_OVERRIDE { m_okToFail = testCaseInfo.okToFail(); } virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { if( assertionStats.assertionResult.getResultType() == ResultWas::ThrewException && !m_okToFail ) unexpectedExceptions++; return CumulativeReporterBase::assertionEnded( assertionStats ); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { stdOutForSuite << testCaseStats.stdOut; stdErrForSuite << testCaseStats.stdErr; CumulativeReporterBase::testCaseEnded( testCaseStats ); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { double suiteTime = suiteTimer.getElapsedSeconds(); CumulativeReporterBase::testGroupEnded( testGroupStats ); writeGroup( *m_testGroups.back(), suiteTime ); } virtual void testRunEndedCumulative() CATCH_OVERRIDE { xml.endElement(); } void writeGroup( TestGroupNode const& groupNode, double suiteTime ) { XmlWriter::ScopedElement e = xml.scopedElement( "testsuite" ); TestGroupStats const& stats = groupNode.value; xml.writeAttribute( "name", stats.groupInfo.name ); xml.writeAttribute( "errors", unexpectedExceptions ); xml.writeAttribute( "failures", stats.totals.assertions.failed-unexpectedExceptions ); xml.writeAttribute( "tests", stats.totals.assertions.total() ); xml.writeAttribute( "hostname", "tbd" ); // !TBD if( m_config->showDurations() == ShowDurations::Never ) xml.writeAttribute( "time", "" ); else xml.writeAttribute( "time", suiteTime ); xml.writeAttribute( "timestamp", getCurrentTimestamp() ); // Write test cases for( TestGroupNode::ChildNodes::const_iterator it = groupNode.children.begin(), itEnd = groupNode.children.end(); it != itEnd; ++it ) writeTestCase( **it ); xml.scopedElement( "system-out" ).writeText( trim( stdOutForSuite.str() ), false ); xml.scopedElement( "system-err" ).writeText( trim( stdErrForSuite.str() ), false ); } void writeTestCase( TestCaseNode const& testCaseNode ) { TestCaseStats const& stats = testCaseNode.value; // All test cases have exactly one section - which represents the // test case itself. That section may have 0-n nested sections assert( testCaseNode.children.size() == 1 ); SectionNode const& rootSection = *testCaseNode.children.front(); std::string className = stats.testInfo.className; if( className.empty() ) { if( rootSection.childSections.empty() ) className = "global"; } writeSection( className, "", rootSection ); } void writeSection( std::string const& className, std::string const& rootName, SectionNode const& sectionNode ) { std::string name = trim( sectionNode.stats.sectionInfo.name ); if( !rootName.empty() ) name = rootName + '/' + name; if( !sectionNode.assertions.empty() || !sectionNode.stdOut.empty() || !sectionNode.stdErr.empty() ) { XmlWriter::ScopedElement e = xml.scopedElement( "testcase" ); if( className.empty() ) { xml.writeAttribute( "classname", name ); xml.writeAttribute( "name", "root" ); } else { xml.writeAttribute( "classname", className ); xml.writeAttribute( "name", name ); } xml.writeAttribute( "time", Catch::toString( sectionNode.stats.durationInSeconds ) ); writeAssertions( sectionNode ); if( !sectionNode.stdOut.empty() ) xml.scopedElement( "system-out" ).writeText( trim( sectionNode.stdOut ), false ); if( !sectionNode.stdErr.empty() ) xml.scopedElement( "system-err" ).writeText( trim( sectionNode.stdErr ), false ); } for( SectionNode::ChildSections::const_iterator it = sectionNode.childSections.begin(), itEnd = sectionNode.childSections.end(); it != itEnd; ++it ) if( className.empty() ) writeSection( name, "", **it ); else writeSection( className, name, **it ); } void writeAssertions( SectionNode const& sectionNode ) { for( SectionNode::Assertions::const_iterator it = sectionNode.assertions.begin(), itEnd = sectionNode.assertions.end(); it != itEnd; ++it ) writeAssertion( *it ); } void writeAssertion( AssertionStats const& stats ) { AssertionResult const& result = stats.assertionResult; if( !result.isOk() ) { std::string elementName; switch( result.getResultType() ) { case ResultWas::ThrewException: case ResultWas::FatalErrorCondition: elementName = "error"; break; case ResultWas::ExplicitFailure: elementName = "failure"; break; case ResultWas::ExpressionFailed: elementName = "failure"; break; case ResultWas::DidntThrowException: elementName = "failure"; break; // We should never see these here: case ResultWas::Info: case ResultWas::Warning: case ResultWas::Ok: case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: elementName = "internalError"; break; } XmlWriter::ScopedElement e = xml.scopedElement( elementName ); xml.writeAttribute( "message", result.getExpandedExpression() ); xml.writeAttribute( "type", result.getTestMacroName() ); std::ostringstream oss; if( !result.getMessage().empty() ) oss << result.getMessage() << '\n'; for( std::vector::const_iterator it = stats.infoMessages.begin(), itEnd = stats.infoMessages.end(); it != itEnd; ++it ) if( it->type == ResultWas::Info ) oss << it->message << '\n'; oss << "at " << result.getSourceInfo(); xml.writeText( oss.str(), false ); } } XmlWriter xml; Timer suiteTimer; std::ostringstream stdOutForSuite; std::ostringstream stdErrForSuite; unsigned int unexpectedExceptions; bool m_okToFail; }; INTERNAL_CATCH_REGISTER_REPORTER( "junit", JunitReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_console.hpp #define TWOBLUECUBES_CATCH_REPORTER_CONSOLE_HPP_INCLUDED #include #include namespace Catch { struct ConsoleReporter : StreamingReporterBase { ConsoleReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ), m_headerPrinted( false ) {} virtual ~ConsoleReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results as plain lines of text"; } virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { stream << "No test cases matched '" << spec << '\'' << std::endl; } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { } virtual bool assertionEnded( AssertionStats const& _assertionStats ) CATCH_OVERRIDE { AssertionResult const& result = _assertionStats.assertionResult; bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); // Drop out if result was successful but we're not printing them. if( !includeResults && result.getResultType() != ResultWas::Warning ) return false; lazyPrint(); AssertionPrinter printer( stream, _assertionStats, includeResults ); printer.print(); stream << std::endl; return true; } virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { m_headerPrinted = false; StreamingReporterBase::sectionStarting( _sectionInfo ); } virtual void sectionEnded( SectionStats const& _sectionStats ) CATCH_OVERRIDE { if( _sectionStats.missingAssertions ) { lazyPrint(); Colour colour( Colour::ResultError ); if( m_sectionStack.size() > 1 ) stream << "\nNo assertions in section"; else stream << "\nNo assertions in test case"; stream << " '" << _sectionStats.sectionInfo.name << "'\n" << std::endl; } if( m_config->showDurations() == ShowDurations::Always ) { stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; } if( m_headerPrinted ) { m_headerPrinted = false; } StreamingReporterBase::sectionEnded( _sectionStats ); } virtual void testCaseEnded( TestCaseStats const& _testCaseStats ) CATCH_OVERRIDE { StreamingReporterBase::testCaseEnded( _testCaseStats ); m_headerPrinted = false; } virtual void testGroupEnded( TestGroupStats const& _testGroupStats ) CATCH_OVERRIDE { if( currentGroupInfo.used ) { printSummaryDivider(); stream << "Summary for group '" << _testGroupStats.groupInfo.name << "':\n"; printTotals( _testGroupStats.totals ); stream << '\n' << std::endl; } StreamingReporterBase::testGroupEnded( _testGroupStats ); } virtual void testRunEnded( TestRunStats const& _testRunStats ) CATCH_OVERRIDE { printTotalsDivider( _testRunStats.totals ); printTotals( _testRunStats.totals ); stream << std::endl; StreamingReporterBase::testRunEnded( _testRunStats ); } private: class AssertionPrinter { void operator= ( AssertionPrinter const& ); public: AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) : stream( _stream ), stats( _stats ), result( _stats.assertionResult ), colour( Colour::None ), message( result.getMessage() ), messages( _stats.infoMessages ), printInfoMessages( _printInfoMessages ) { switch( result.getResultType() ) { case ResultWas::Ok: colour = Colour::Success; passOrFail = "PASSED"; //if( result.hasMessage() ) if( _stats.infoMessages.size() == 1 ) messageLabel = "with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "with messages"; break; case ResultWas::ExpressionFailed: if( result.isOk() ) { colour = Colour::Success; passOrFail = "FAILED - but was ok"; } else { colour = Colour::Error; passOrFail = "FAILED"; } if( _stats.infoMessages.size() == 1 ) messageLabel = "with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "with messages"; break; case ResultWas::ThrewException: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "due to unexpected exception with "; if (_stats.infoMessages.size() == 1) messageLabel += "message"; if (_stats.infoMessages.size() > 1) messageLabel += "messages"; break; case ResultWas::FatalErrorCondition: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "due to a fatal error condition"; break; case ResultWas::DidntThrowException: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "because no exception was thrown where one was expected"; break; case ResultWas::Info: messageLabel = "info"; break; case ResultWas::Warning: messageLabel = "warning"; break; case ResultWas::ExplicitFailure: passOrFail = "FAILED"; colour = Colour::Error; if( _stats.infoMessages.size() == 1 ) messageLabel = "explicitly with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "explicitly with messages"; break; // These cases are here to prevent compiler warnings case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: passOrFail = "** internal error **"; colour = Colour::Error; break; } } void print() const { printSourceInfo(); if( stats.totals.assertions.total() > 0 ) { if( result.isOk() ) stream << '\n'; printResultType(); printOriginalExpression(); printReconstructedExpression(); } else { stream << '\n'; } printMessage(); } private: void printResultType() const { if( !passOrFail.empty() ) { Colour colourGuard( colour ); stream << passOrFail << ":\n"; } } void printOriginalExpression() const { if( result.hasExpression() ) { Colour colourGuard( Colour::OriginalExpression ); stream << " "; stream << result.getExpressionInMacro(); stream << '\n'; } } void printReconstructedExpression() const { if( result.hasExpandedExpression() ) { stream << "with expansion:\n"; Colour colourGuard( Colour::ReconstructedExpression ); stream << Text( result.getExpandedExpression(), TextAttributes().setIndent(2) ) << '\n'; } } void printMessage() const { if( !messageLabel.empty() ) stream << messageLabel << ':' << '\n'; for( std::vector::const_iterator it = messages.begin(), itEnd = messages.end(); it != itEnd; ++it ) { // If this assertion is a warning ignore any INFO messages if( printInfoMessages || it->type != ResultWas::Info ) stream << Text( it->message, TextAttributes().setIndent(2) ) << '\n'; } } void printSourceInfo() const { Colour colourGuard( Colour::FileName ); stream << result.getSourceInfo() << ": "; } std::ostream& stream; AssertionStats const& stats; AssertionResult const& result; Colour::Code colour; std::string passOrFail; std::string messageLabel; std::string message; std::vector messages; bool printInfoMessages; }; void lazyPrint() { if( !currentTestRunInfo.used ) lazyPrintRunInfo(); if( !currentGroupInfo.used ) lazyPrintGroupInfo(); if( !m_headerPrinted ) { printTestCaseAndSectionHeader(); m_headerPrinted = true; } } void lazyPrintRunInfo() { stream << '\n' << getLineOfChars<'~'>() << '\n'; Colour colour( Colour::SecondaryText ); stream << currentTestRunInfo->name << " is a Catch v" << libraryVersion() << " host application.\n" << "Run with -? for options\n\n"; if( m_config->rngSeed() != 0 ) stream << "Randomness seeded to: " << m_config->rngSeed() << "\n\n"; currentTestRunInfo.used = true; } void lazyPrintGroupInfo() { if( !currentGroupInfo->name.empty() && currentGroupInfo->groupsCounts > 1 ) { printClosedHeader( "Group: " + currentGroupInfo->name ); currentGroupInfo.used = true; } } void printTestCaseAndSectionHeader() { assert( !m_sectionStack.empty() ); printOpenHeader( currentTestCaseInfo->name ); if( m_sectionStack.size() > 1 ) { Colour colourGuard( Colour::Headers ); std::vector::const_iterator it = m_sectionStack.begin()+1, // Skip first section (test case) itEnd = m_sectionStack.end(); for( ; it != itEnd; ++it ) printHeaderString( it->name, 2 ); } SourceLineInfo lineInfo = m_sectionStack.back().lineInfo; if( !lineInfo.empty() ){ stream << getLineOfChars<'-'>() << '\n'; Colour colourGuard( Colour::FileName ); stream << lineInfo << '\n'; } stream << getLineOfChars<'.'>() << '\n' << std::endl; } void printClosedHeader( std::string const& _name ) { printOpenHeader( _name ); stream << getLineOfChars<'.'>() << '\n'; } void printOpenHeader( std::string const& _name ) { stream << getLineOfChars<'-'>() << '\n'; { Colour colourGuard( Colour::Headers ); printHeaderString( _name ); } } // if string has a : in first line will set indent to follow it on // subsequent lines void printHeaderString( std::string const& _string, std::size_t indent = 0 ) { std::size_t i = _string.find( ": " ); if( i != std::string::npos ) i+=2; else i = 0; stream << Text( _string, TextAttributes() .setIndent( indent+i) .setInitialIndent( indent ) ) << '\n'; } struct SummaryColumn { SummaryColumn( std::string const& _label, Colour::Code _colour ) : label( _label ), colour( _colour ) {} SummaryColumn addRow( std::size_t count ) { std::ostringstream oss; oss << count; std::string row = oss.str(); for( std::vector::iterator it = rows.begin(); it != rows.end(); ++it ) { while( it->size() < row.size() ) *it = ' ' + *it; while( it->size() > row.size() ) row = ' ' + row; } rows.push_back( row ); return *this; } std::string label; Colour::Code colour; std::vector rows; }; void printTotals( Totals const& totals ) { if( totals.testCases.total() == 0 ) { stream << Colour( Colour::Warning ) << "No tests ran\n"; } else if( totals.assertions.total() > 0 && totals.testCases.allPassed() ) { stream << Colour( Colour::ResultSuccess ) << "All tests passed"; stream << " (" << pluralise( totals.assertions.passed, "assertion" ) << " in " << pluralise( totals.testCases.passed, "test case" ) << ')' << '\n'; } else { std::vector columns; columns.push_back( SummaryColumn( "", Colour::None ) .addRow( totals.testCases.total() ) .addRow( totals.assertions.total() ) ); columns.push_back( SummaryColumn( "passed", Colour::Success ) .addRow( totals.testCases.passed ) .addRow( totals.assertions.passed ) ); columns.push_back( SummaryColumn( "failed", Colour::ResultError ) .addRow( totals.testCases.failed ) .addRow( totals.assertions.failed ) ); columns.push_back( SummaryColumn( "failed as expected", Colour::ResultExpectedFailure ) .addRow( totals.testCases.failedButOk ) .addRow( totals.assertions.failedButOk ) ); printSummaryRow( "test cases", columns, 0 ); printSummaryRow( "assertions", columns, 1 ); } } void printSummaryRow( std::string const& label, std::vector const& cols, std::size_t row ) { for( std::vector::const_iterator it = cols.begin(); it != cols.end(); ++it ) { std::string value = it->rows[row]; if( it->label.empty() ) { stream << label << ": "; if( value != "0" ) stream << value; else stream << Colour( Colour::Warning ) << "- none -"; } else if( value != "0" ) { stream << Colour( Colour::LightGrey ) << " | "; stream << Colour( it->colour ) << value << ' ' << it->label; } } stream << '\n'; } static std::size_t makeRatio( std::size_t number, std::size_t total ) { std::size_t ratio = total > 0 ? CATCH_CONFIG_CONSOLE_WIDTH * number/ total : 0; return ( ratio == 0 && number > 0 ) ? 1 : ratio; } static std::size_t& findMax( std::size_t& i, std::size_t& j, std::size_t& k ) { if( i > j && i > k ) return i; else if( j > k ) return j; else return k; } void printTotalsDivider( Totals const& totals ) { if( totals.testCases.total() > 0 ) { std::size_t failedRatio = makeRatio( totals.testCases.failed, totals.testCases.total() ); std::size_t failedButOkRatio = makeRatio( totals.testCases.failedButOk, totals.testCases.total() ); std::size_t passedRatio = makeRatio( totals.testCases.passed, totals.testCases.total() ); while( failedRatio + failedButOkRatio + passedRatio < CATCH_CONFIG_CONSOLE_WIDTH-1 ) findMax( failedRatio, failedButOkRatio, passedRatio )++; while( failedRatio + failedButOkRatio + passedRatio > CATCH_CONFIG_CONSOLE_WIDTH-1 ) findMax( failedRatio, failedButOkRatio, passedRatio )--; stream << Colour( Colour::Error ) << std::string( failedRatio, '=' ); stream << Colour( Colour::ResultExpectedFailure ) << std::string( failedButOkRatio, '=' ); if( totals.testCases.allPassed() ) stream << Colour( Colour::ResultSuccess ) << std::string( passedRatio, '=' ); else stream << Colour( Colour::Success ) << std::string( passedRatio, '=' ); } else { stream << Colour( Colour::Warning ) << std::string( CATCH_CONFIG_CONSOLE_WIDTH-1, '=' ); } stream << '\n'; } void printSummaryDivider() { stream << getLineOfChars<'-'>() << '\n'; } private: bool m_headerPrinted; }; INTERNAL_CATCH_REGISTER_REPORTER( "console", ConsoleReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_compact.hpp #define TWOBLUECUBES_CATCH_REPORTER_COMPACT_HPP_INCLUDED namespace Catch { struct CompactReporter : StreamingReporterBase { CompactReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ) {} virtual ~CompactReporter(); static std::string getDescription() { return "Reports test results on a single line, suitable for IDEs"; } virtual ReporterPreferences getPreferences() const { ReporterPreferences prefs; prefs.shouldRedirectStdOut = false; return prefs; } virtual void noMatchingTestCases( std::string const& spec ) { stream << "No test cases matched '" << spec << '\'' << std::endl; } virtual void assertionStarting( AssertionInfo const& ) {} virtual bool assertionEnded( AssertionStats const& _assertionStats ) { AssertionResult const& result = _assertionStats.assertionResult; bool printInfoMessages = true; // Drop out if result was successful and we're not printing those if( !m_config->includeSuccessfulResults() && result.isOk() ) { if( result.getResultType() != ResultWas::Warning ) return false; printInfoMessages = false; } AssertionPrinter printer( stream, _assertionStats, printInfoMessages ); printer.print(); stream << std::endl; return true; } virtual void sectionEnded(SectionStats const& _sectionStats) CATCH_OVERRIDE { if (m_config->showDurations() == ShowDurations::Always) { stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; } } virtual void testRunEnded( TestRunStats const& _testRunStats ) { printTotals( _testRunStats.totals ); stream << '\n' << std::endl; StreamingReporterBase::testRunEnded( _testRunStats ); } private: class AssertionPrinter { void operator= ( AssertionPrinter const& ); public: AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) : stream( _stream ) , stats( _stats ) , result( _stats.assertionResult ) , messages( _stats.infoMessages ) , itMessage( _stats.infoMessages.begin() ) , printInfoMessages( _printInfoMessages ) {} void print() { printSourceInfo(); itMessage = messages.begin(); switch( result.getResultType() ) { case ResultWas::Ok: printResultType( Colour::ResultSuccess, passedString() ); printOriginalExpression(); printReconstructedExpression(); if ( ! result.hasExpression() ) printRemainingMessages( Colour::None ); else printRemainingMessages(); break; case ResultWas::ExpressionFailed: if( result.isOk() ) printResultType( Colour::ResultSuccess, failedString() + std::string( " - but was ok" ) ); else printResultType( Colour::Error, failedString() ); printOriginalExpression(); printReconstructedExpression(); printRemainingMessages(); break; case ResultWas::ThrewException: printResultType( Colour::Error, failedString() ); printIssue( "unexpected exception with message:" ); printMessage(); printExpressionWas(); printRemainingMessages(); break; case ResultWas::FatalErrorCondition: printResultType( Colour::Error, failedString() ); printIssue( "fatal error condition with message:" ); printMessage(); printExpressionWas(); printRemainingMessages(); break; case ResultWas::DidntThrowException: printResultType( Colour::Error, failedString() ); printIssue( "expected exception, got none" ); printExpressionWas(); printRemainingMessages(); break; case ResultWas::Info: printResultType( Colour::None, "info" ); printMessage(); printRemainingMessages(); break; case ResultWas::Warning: printResultType( Colour::None, "warning" ); printMessage(); printRemainingMessages(); break; case ResultWas::ExplicitFailure: printResultType( Colour::Error, failedString() ); printIssue( "explicitly" ); printRemainingMessages( Colour::None ); break; // These cases are here to prevent compiler warnings case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: printResultType( Colour::Error, "** internal error **" ); break; } } private: // Colour::LightGrey static Colour::Code dimColour() { return Colour::FileName; } #ifdef CATCH_PLATFORM_MAC static const char* failedString() { return "FAILED"; } static const char* passedString() { return "PASSED"; } #else static const char* failedString() { return "failed"; } static const char* passedString() { return "passed"; } #endif void printSourceInfo() const { Colour colourGuard( Colour::FileName ); stream << result.getSourceInfo() << ':'; } void printResultType( Colour::Code colour, std::string const& passOrFail ) const { if( !passOrFail.empty() ) { { Colour colourGuard( colour ); stream << ' ' << passOrFail; } stream << ':'; } } void printIssue( std::string const& issue ) const { stream << ' ' << issue; } void printExpressionWas() { if( result.hasExpression() ) { stream << ';'; { Colour colour( dimColour() ); stream << " expression was:"; } printOriginalExpression(); } } void printOriginalExpression() const { if( result.hasExpression() ) { stream << ' ' << result.getExpression(); } } void printReconstructedExpression() const { if( result.hasExpandedExpression() ) { { Colour colour( dimColour() ); stream << " for: "; } stream << result.getExpandedExpression(); } } void printMessage() { if ( itMessage != messages.end() ) { stream << " '" << itMessage->message << '\''; ++itMessage; } } void printRemainingMessages( Colour::Code colour = dimColour() ) { if ( itMessage == messages.end() ) return; // using messages.end() directly yields compilation error: std::vector::const_iterator itEnd = messages.end(); const std::size_t N = static_cast( std::distance( itMessage, itEnd ) ); { Colour colourGuard( colour ); stream << " with " << pluralise( N, "message" ) << ':'; } for(; itMessage != itEnd; ) { // If this assertion is a warning ignore any INFO messages if( printInfoMessages || itMessage->type != ResultWas::Info ) { stream << " '" << itMessage->message << '\''; if ( ++itMessage != itEnd ) { Colour colourGuard( dimColour() ); stream << " and"; } } } } private: std::ostream& stream; AssertionStats const& stats; AssertionResult const& result; std::vector messages; std::vector::const_iterator itMessage; bool printInfoMessages; }; // Colour, message variants: // - white: No tests ran. // - red: Failed [both/all] N test cases, failed [both/all] M assertions. // - white: Passed [both/all] N test cases (no assertions). // - red: Failed N tests cases, failed M assertions. // - green: Passed [both/all] N tests cases with M assertions. std::string bothOrAll( std::size_t count ) const { return count == 1 ? std::string() : count == 2 ? "both " : "all " ; } void printTotals( const Totals& totals ) const { if( totals.testCases.total() == 0 ) { stream << "No tests ran."; } else if( totals.testCases.failed == totals.testCases.total() ) { Colour colour( Colour::ResultError ); const std::string qualify_assertions_failed = totals.assertions.failed == totals.assertions.total() ? bothOrAll( totals.assertions.failed ) : std::string(); stream << "Failed " << bothOrAll( totals.testCases.failed ) << pluralise( totals.testCases.failed, "test case" ) << ", " "failed " << qualify_assertions_failed << pluralise( totals.assertions.failed, "assertion" ) << '.'; } else if( totals.assertions.total() == 0 ) { stream << "Passed " << bothOrAll( totals.testCases.total() ) << pluralise( totals.testCases.total(), "test case" ) << " (no assertions)."; } else if( totals.assertions.failed ) { Colour colour( Colour::ResultError ); stream << "Failed " << pluralise( totals.testCases.failed, "test case" ) << ", " "failed " << pluralise( totals.assertions.failed, "assertion" ) << '.'; } else { Colour colour( Colour::ResultSuccess ); stream << "Passed " << bothOrAll( totals.testCases.passed ) << pluralise( totals.testCases.passed, "test case" ) << " with " << pluralise( totals.assertions.passed, "assertion" ) << '.'; } } }; INTERNAL_CATCH_REGISTER_REPORTER( "compact", CompactReporter ) } // end namespace Catch namespace Catch { // These are all here to avoid warnings about not having any out of line // virtual methods NonCopyable::~NonCopyable() {} IShared::~IShared() {} IStream::~IStream() CATCH_NOEXCEPT {} FileStream::~FileStream() CATCH_NOEXCEPT {} CoutStream::~CoutStream() CATCH_NOEXCEPT {} DebugOutStream::~DebugOutStream() CATCH_NOEXCEPT {} StreamBufBase::~StreamBufBase() CATCH_NOEXCEPT {} IContext::~IContext() {} IResultCapture::~IResultCapture() {} ITestCase::~ITestCase() {} ITestCaseRegistry::~ITestCaseRegistry() {} IRegistryHub::~IRegistryHub() {} IMutableRegistryHub::~IMutableRegistryHub() {} IExceptionTranslator::~IExceptionTranslator() {} IExceptionTranslatorRegistry::~IExceptionTranslatorRegistry() {} IReporter::~IReporter() {} IReporterFactory::~IReporterFactory() {} IReporterRegistry::~IReporterRegistry() {} IStreamingReporter::~IStreamingReporter() {} AssertionStats::~AssertionStats() {} SectionStats::~SectionStats() {} TestCaseStats::~TestCaseStats() {} TestGroupStats::~TestGroupStats() {} TestRunStats::~TestRunStats() {} CumulativeReporterBase::SectionNode::~SectionNode() {} CumulativeReporterBase::~CumulativeReporterBase() {} StreamingReporterBase::~StreamingReporterBase() {} ConsoleReporter::~ConsoleReporter() {} CompactReporter::~CompactReporter() {} IRunner::~IRunner() {} IMutableContext::~IMutableContext() {} IConfig::~IConfig() {} XmlReporter::~XmlReporter() {} JunitReporter::~JunitReporter() {} TestRegistry::~TestRegistry() {} FreeFunctionTestCase::~FreeFunctionTestCase() {} IGeneratorInfo::~IGeneratorInfo() {} IGeneratorsForTest::~IGeneratorsForTest() {} WildcardPattern::~WildcardPattern() {} TestSpec::Pattern::~Pattern() {} TestSpec::NamePattern::~NamePattern() {} TestSpec::TagPattern::~TagPattern() {} TestSpec::ExcludedPattern::~ExcludedPattern() {} Matchers::Impl::MatcherUntypedBase::~MatcherUntypedBase() {} void Config::dummy() {} namespace TestCaseTracking { ITracker::~ITracker() {} TrackerBase::~TrackerBase() {} SectionTracker::~SectionTracker() {} IndexTracker::~IndexTracker() {} } } #ifdef __clang__ # pragma clang diagnostic pop #endif #endif #ifdef CATCH_CONFIG_MAIN // #included from: internal/catch_default_main.hpp #define TWOBLUECUBES_CATCH_DEFAULT_MAIN_HPP_INCLUDED #ifndef __OBJC__ #if defined(WIN32) && defined(_UNICODE) && !defined(DO_NOT_USE_WMAIN) // Standard C/C++ Win32 Unicode wmain entry point extern "C" int wmain (int argc, wchar_t * argv[], wchar_t * []) { #else // Standard C/C++ main entry point int main (int argc, char * argv[]) { #endif int result = Catch::Session().run( argc, argv ); return ( result < 0xff ? result : 0xff ); } #else // __OBJC__ // Objective-C entry point int main (int argc, char * const argv[]) { #if !CATCH_ARC_ENABLED NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; #endif Catch::registerTestMethods(); int result = Catch::Session().run( argc, (char* const*)argv ); #if !CATCH_ARC_ENABLED [pool drain]; #endif return ( result < 0xff ? result : 0xff ); } #endif // __OBJC__ #endif #ifdef CLARA_CONFIG_MAIN_NOT_DEFINED # undef CLARA_CONFIG_MAIN #endif ////// // If this config identifier is defined then all CATCH macros are prefixed with CATCH_ #ifdef CATCH_CONFIG_PREFIX_ALL #if defined(CATCH_CONFIG_FAST_COMPILE) #define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #else #define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #endif #define CATCH_REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) #define CATCH_REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) #define CATCH_REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) #define CATCH_CHECK( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) #define CATCH_CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CATCH_CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CATCH_CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) #define CATCH_CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) #define CATCH_CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) #define CATCH_CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) #if defined(CATCH_CONFIG_FAST_COMPILE) #define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #else #define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #endif #define CATCH_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) #define CATCH_WARN( msg ) INTERNAL_CATCH_MSG( "CATCH_WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) #define CATCH_SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) #define CATCH_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) #define CATCH_SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) #ifdef CATCH_CONFIG_VARIADIC_MACROS #define CATCH_TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) #define CATCH_TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) #define CATCH_METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) #define CATCH_REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) #define CATCH_SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) #define CATCH_FAIL( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) #define CATCH_FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #define CATCH_SUCCEED( ... ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #else #define CATCH_TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) #define CATCH_TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) #define CATCH_METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) #define CATCH_REGISTER_TEST_CASE( function, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( function, name, description ) #define CATCH_SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) #define CATCH_FAIL( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) #define CATCH_FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) #define CATCH_SUCCEED( msg ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) #endif #define CATCH_ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) #define CATCH_REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) #define CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) #define CATCH_GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) // "BDD-style" convenience wrappers #ifdef CATCH_CONFIG_VARIADIC_MACROS #define CATCH_SCENARIO( ... ) CATCH_TEST_CASE( "Scenario: " __VA_ARGS__ ) #define CATCH_SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) #else #define CATCH_SCENARIO( name, tags ) CATCH_TEST_CASE( "Scenario: " name, tags ) #define CATCH_SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) #endif #define CATCH_GIVEN( desc ) CATCH_SECTION( std::string( "Given: ") + desc, "" ) #define CATCH_WHEN( desc ) CATCH_SECTION( std::string( " When: ") + desc, "" ) #define CATCH_AND_WHEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) #define CATCH_THEN( desc ) CATCH_SECTION( std::string( " Then: ") + desc, "" ) #define CATCH_AND_THEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) // If CATCH_CONFIG_PREFIX_ALL is not defined then the CATCH_ prefix is not required #else #if defined(CATCH_CONFIG_FAST_COMPILE) #define REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE", Catch::ResultDisposition::Normal, expr ) #define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #else #define REQUIRE( expr ) INTERNAL_CATCH_TEST( "REQUIRE", Catch::ResultDisposition::Normal, expr ) #define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #endif #define REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) #define REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) #define REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) #define REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) #define CHECK( expr ) INTERNAL_CATCH_TEST( "CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) #define CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) #define CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) #define CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) #define CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) #if defined(CATCH_CONFIG_FAST_COMPILE) #define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #else #define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #endif #define INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) #define WARN( msg ) INTERNAL_CATCH_MSG( "WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) #define SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) #define CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) #define SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) #ifdef CATCH_CONFIG_VARIADIC_MACROS #define TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) #define TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) #define METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) #define REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) #define SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) #define FAIL( ... ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) #define FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #define SUCCEED( ... ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #else #define TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) #define TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) #define METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) #define REGISTER_TEST_CASE( method, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( method, name, description ) #define SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) #define FAIL( msg ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) #define FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) #define SUCCEED( msg ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) #endif #define ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) #define REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) #define REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) #define GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) #endif #define CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) // "BDD-style" convenience wrappers #ifdef CATCH_CONFIG_VARIADIC_MACROS #define SCENARIO( ... ) TEST_CASE( "Scenario: " __VA_ARGS__ ) #define SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) #else #define SCENARIO( name, tags ) TEST_CASE( "Scenario: " name, tags ) #define SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) #endif #define GIVEN( desc ) SECTION( std::string(" Given: ") + desc, "" ) #define WHEN( desc ) SECTION( std::string(" When: ") + desc, "" ) #define AND_WHEN( desc ) SECTION( std::string("And when: ") + desc, "" ) #define THEN( desc ) SECTION( std::string(" Then: ") + desc, "" ) #define AND_THEN( desc ) SECTION( std::string(" And: ") + desc, "" ) using Catch::Detail::Approx; // #included from: internal/catch_reenable_warnings.h #define TWOBLUECUBES_CATCH_REENABLE_WARNINGS_H_INCLUDED #ifdef __clang__ # ifdef __ICC // icpc defines the __clang__ macro # pragma warning(pop) # else # pragma clang diagnostic pop # endif #elif defined __GNUC__ # pragma GCC diagnostic pop #endif #endif // TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED testthat/inst/CITATION0000644000176200001440000000116413202350207014220 0ustar liggesuserscitHeader("To cite the testthat package in publications, use:") citEntry( entry = "Article", author = personList(as.person("Hadley Wickham")), title = "testthat: Get Started with Testing", journal = "The R Journal", year = 2011, volume = 3, pages = "5--10", url = "https://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf", textVersion = paste( "Hadley Wickham. testthat: Get Started with Testing.", "The R Journal, vol. 3, no. 1, pp. 5--10, 2011" ) ) citFooter("As testthat is continually evolving, you may want to cite its version number. Find it with 'help(package=testthat)'.")